filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_8989 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import os
import pytest
import llnl.util.filesystem as fs
import spack.environment
import spack.repo
from spack.build_environment import ChildError, get_std_cmake_args, setup_package
from spack.spec import Spec
from spack.util.executable import which
DATA_PATH = os.path.join(spack.paths.test_path, 'data')
@pytest.mark.parametrize(
'directory',
glob.iglob(os.path.join(DATA_PATH, 'make', 'affirmative', '*'))
)
def test_affirmative_make_check(directory, config, mock_packages, working_env):
"""Tests that Spack correctly detects targets in a Makefile."""
# Get a fake package
s = Spec('mpich')
s.concretize()
pkg = spack.repo.get(s)
setup_package(pkg, False)
with fs.working_dir(directory):
assert pkg._has_make_target('check')
pkg._if_make_target_execute('check')
@pytest.mark.parametrize(
'directory',
glob.iglob(os.path.join(DATA_PATH, 'make', 'negative', '*'))
)
@pytest.mark.regression('9067')
def test_negative_make_check(directory, config, mock_packages, working_env):
"""Tests that Spack correctly ignores false positives in a Makefile."""
# Get a fake package
s = Spec('mpich')
s.concretize()
pkg = spack.repo.get(s)
setup_package(pkg, False)
with fs.working_dir(directory):
assert not pkg._has_make_target('check')
pkg._if_make_target_execute('check')
@pytest.mark.skipif(not which('ninja'), reason='ninja is not installed')
@pytest.mark.parametrize(
'directory',
glob.iglob(os.path.join(DATA_PATH, 'ninja', 'affirmative', '*'))
)
def test_affirmative_ninja_check(
directory, config, mock_packages, working_env):
"""Tests that Spack correctly detects targets in a Ninja build script."""
# Get a fake package
s = Spec('mpich')
s.concretize()
pkg = spack.repo.get(s)
setup_package(pkg, False)
with fs.working_dir(directory):
assert pkg._has_ninja_target('check')
pkg._if_ninja_target_execute('check')
# Clean up Ninja files
for filename in glob.iglob('.ninja_*'):
os.remove(filename)
@pytest.mark.skipif(not which('ninja'), reason='ninja is not installed')
@pytest.mark.parametrize(
'directory',
glob.iglob(os.path.join(DATA_PATH, 'ninja', 'negative', '*'))
)
def test_negative_ninja_check(directory, config, mock_packages, working_env):
"""Tests that Spack correctly ignores false positives in a Ninja
build script."""
# Get a fake package
s = Spec('mpich')
s.concretize()
pkg = spack.repo.get(s)
setup_package(pkg, False)
with fs.working_dir(directory):
assert not pkg._has_ninja_target('check')
pkg._if_ninja_target_execute('check')
def test_cmake_std_args(config, mock_packages):
# Call the function on a CMakePackage instance
s = Spec('cmake-client')
s.concretize()
pkg = spack.repo.get(s)
assert pkg.std_cmake_args == get_std_cmake_args(pkg)
# Call it on another kind of package
s = Spec('mpich')
s.concretize()
pkg = spack.repo.get(s)
assert get_std_cmake_args(pkg)
def test_cmake_bad_generator(config, mock_packages):
s = Spec('cmake-client')
s.concretize()
pkg = spack.repo.get(s)
pkg.generator = 'Yellow Sticky Notes'
with pytest.raises(spack.package.InstallError):
get_std_cmake_args(pkg)
def test_cmake_secondary_generator(config, mock_packages):
s = Spec('cmake-client')
s.concretize()
pkg = spack.repo.get(s)
pkg.generator = 'CodeBlocks - Unix Makefiles'
assert get_std_cmake_args(pkg)
@pytest.mark.usefixtures('config', 'mock_packages')
class TestAutotoolsPackage(object):
def test_with_or_without(self):
s = Spec('a')
s.concretize()
pkg = spack.repo.get(s)
options = pkg.with_or_without('foo')
# Ensure that values that are not representing a feature
# are not used by with_or_without
assert '--without-none' not in options
assert '--with-bar' in options
assert '--without-baz' in options
assert '--no-fee' in options
def activate(value):
return 'something'
options = pkg.with_or_without('foo', activation_value=activate)
assert '--without-none' not in options
assert '--with-bar=something' in options
assert '--without-baz' in options
assert '--no-fee' in options
options = pkg.enable_or_disable('foo')
assert '--disable-none' not in options
assert '--enable-bar' in options
assert '--disable-baz' in options
assert '--disable-fee' in options
options = pkg.with_or_without('bvv')
assert '--with-bvv' in options
options = pkg.with_or_without('lorem-ipsum', variant='lorem_ipsum')
assert '--without-lorem-ipsum' in options
def test_none_is_allowed(self):
s = Spec('a foo=none')
s.concretize()
pkg = spack.repo.get(s)
options = pkg.with_or_without('foo')
# Ensure that values that are not representing a feature
# are not used by with_or_without
assert '--with-none' not in options
assert '--without-bar' in options
assert '--without-baz' in options
assert '--no-fee' in options
def test_libtool_archive_files_are_deleted_by_default(
self, mutable_database
):
# Install a package that creates a mock libtool archive
s = Spec('libtool-deletion')
s.concretize()
s.package.do_install(explicit=True)
# Assert the libtool archive is not there and we have
# a log of removed files
assert not os.path.exists(s.package.libtool_archive_file)
search_directory = os.path.join(s.prefix, '.spack')
libtool_deletion_log = fs.find(
search_directory, 'removed_la_files.txt', recursive=True
)
assert libtool_deletion_log
def test_libtool_archive_files_might_be_installed_on_demand(
self, mutable_database, monkeypatch
):
# Install a package that creates a mock libtool archive,
# patch its package to preserve the installation
s = Spec('libtool-deletion')
s.concretize()
monkeypatch.setattr(s.package, 'install_libtool_archives', True)
s.package.do_install(explicit=True)
# Assert libtool archives are installed
assert os.path.exists(s.package.libtool_archive_file)
def test_autotools_gnuconfig_replacement(self, mutable_database):
"""
Tests whether only broken config.sub and config.guess are replaced with
files from working alternatives from the gnuconfig package.
"""
s = Spec('autotools-config-replacement +patch_config_files +gnuconfig')
s.concretize()
s.package.do_install()
with open(os.path.join(s.prefix.broken, 'config.sub')) as f:
assert "gnuconfig version of config.sub" in f.read()
with open(os.path.join(s.prefix.broken, 'config.guess')) as f:
assert "gnuconfig version of config.guess" in f.read()
with open(os.path.join(s.prefix.working, 'config.sub')) as f:
assert "gnuconfig version of config.sub" not in f.read()
with open(os.path.join(s.prefix.working, 'config.guess')) as f:
assert "gnuconfig version of config.guess" not in f.read()
def test_autotools_gnuconfig_replacement_disabled(self, mutable_database):
"""
Tests whether disabling patch_config_files
"""
s = Spec('autotools-config-replacement ~patch_config_files +gnuconfig')
s.concretize()
s.package.do_install()
with open(os.path.join(s.prefix.broken, 'config.sub')) as f:
assert "gnuconfig version of config.sub" not in f.read()
with open(os.path.join(s.prefix.broken, 'config.guess')) as f:
assert "gnuconfig version of config.guess" not in f.read()
with open(os.path.join(s.prefix.working, 'config.sub')) as f:
assert "gnuconfig version of config.sub" not in f.read()
with open(os.path.join(s.prefix.working, 'config.guess')) as f:
assert "gnuconfig version of config.guess" not in f.read()
@pytest.mark.disable_clean_stage_check
def test_autotools_gnuconfig_replacement_no_gnuconfig(self, mutable_database):
"""
Tests whether a useful error message is shown when patch_config_files is
enabled, but gnuconfig is not listed as a direct build dependency.
"""
s = Spec('autotools-config-replacement +patch_config_files ~gnuconfig')
s.concretize()
msg = "Cannot patch config files: missing dependencies: gnuconfig"
with pytest.raises(ChildError, match=msg):
s.package.do_install()
@pytest.mark.disable_clean_stage_check
def test_broken_external_gnuconfig(self, mutable_database, tmpdir):
"""
Tests whether we get a useful error message when gnuconfig is marked
external, but the install prefix is misconfigured and no config.guess
and config.sub substitute files are found in the provided prefix.
"""
env_dir = str(tmpdir.ensure('env', dir=True))
gnuconfig_dir = str(tmpdir.ensure('gnuconfig', dir=True)) # empty dir
with open(os.path.join(env_dir, 'spack.yaml'), 'w') as f:
f.write("""\
spack:
specs:
- 'autotools-config-replacement +patch_config_files +gnuconfig'
packages:
gnuconfig:
buildable: false
externals:
- spec: [email protected]
prefix: {0}
""".format(gnuconfig_dir))
msg = ("Spack could not find `config.guess`.*misconfigured as an "
"external package")
with spack.environment.Environment(env_dir) as e:
e.concretize()
with pytest.raises(ChildError, match=msg):
e.install_all()
@pytest.mark.usefixtures('config', 'mock_packages')
class TestCMakePackage(object):
def test_define(self):
s = Spec('cmake-client')
s.concretize()
pkg = spack.repo.get(s)
for cls in (list, tuple):
arg = pkg.define('MULTI', cls(['right', 'up']))
assert arg == '-DMULTI:STRING=right;up'
arg = pkg.define('ENABLE_TRUTH', False)
assert arg == '-DENABLE_TRUTH:BOOL=OFF'
arg = pkg.define('ENABLE_TRUTH', True)
assert arg == '-DENABLE_TRUTH:BOOL=ON'
arg = pkg.define('SINGLE', 'red')
assert arg == '-DSINGLE:STRING=red'
def test_define_from_variant(self):
s = Spec('cmake-client multi=up,right ~truthy single=red')
s.concretize()
pkg = spack.repo.get(s)
arg = pkg.define_from_variant('MULTI')
assert arg == '-DMULTI:STRING=right;up'
arg = pkg.define_from_variant('ENABLE_TRUTH', 'truthy')
assert arg == '-DENABLE_TRUTH:BOOL=OFF'
arg = pkg.define_from_variant('SINGLE')
assert arg == '-DSINGLE:STRING=red'
with pytest.raises(KeyError, match="not a variant"):
pkg.define_from_variant('NONEXISTENT')
@pytest.mark.usefixtures('config', 'mock_packages')
class TestGNUMirrorPackage(object):
def test_define(self):
s = Spec('mirror-gnu')
s.concretize()
pkg = spack.repo.get(s)
s = Spec('mirror-gnu-broken')
s.concretize()
pkg_broken = spack.repo.get(s)
cls_name = type(pkg_broken).__name__
with pytest.raises(AttributeError,
match=r'{0} must define a `gnu_mirror_path` '
r'attribute \[none defined\]'
.format(cls_name)):
pkg_broken.urls
assert pkg.urls[0] == 'https://ftpmirror.gnu.org/' \
'make/make-4.2.1.tar.gz'
@pytest.mark.usefixtures('config', 'mock_packages')
class TestSourceforgePackage(object):
def test_define(self):
s = Spec('mirror-sourceforge')
s.concretize()
pkg = spack.repo.get(s)
s = Spec('mirror-sourceforge-broken')
s.concretize()
pkg_broken = spack.repo.get(s)
cls_name = type(pkg_broken).__name__
with pytest.raises(AttributeError,
match=r'{0} must define a `sourceforge_mirror_path`'
r' attribute \[none defined\]'
.format(cls_name)):
pkg_broken.urls
assert pkg.urls[0] == 'https://prdownloads.sourceforge.net/' \
'tcl/tcl8.6.5-src.tar.gz'
@pytest.mark.usefixtures('config', 'mock_packages')
class TestSourcewarePackage(object):
def test_define(self):
s = Spec('mirror-sourceware')
s.concretize()
pkg = spack.repo.get(s)
s = Spec('mirror-sourceware-broken')
s.concretize()
pkg_broken = spack.repo.get(s)
cls_name = type(pkg_broken).__name__
with pytest.raises(AttributeError,
match=r'{0} must define a `sourceware_mirror_path` '
r'attribute \[none defined\]'
.format(cls_name)):
pkg_broken.urls
assert pkg.urls[0] == 'https://sourceware.org/pub/' \
'bzip2/bzip2-1.0.8.tar.gz'
@pytest.mark.usefixtures('config', 'mock_packages')
class TestXorgPackage(object):
def test_define(self):
s = Spec('mirror-xorg')
s.concretize()
pkg = spack.repo.get(s)
s = Spec('mirror-xorg-broken')
s.concretize()
pkg_broken = spack.repo.get(s)
cls_name = type(pkg_broken).__name__
with pytest.raises(AttributeError,
match=r'{0} must define a `xorg_mirror_path` '
r'attribute \[none defined\]'
.format(cls_name)):
pkg_broken.urls
assert pkg.urls[0] == 'https://www.x.org/archive/individual/' \
'util/util-macros-1.19.1.tar.bz2'
def test_cmake_define_from_variant_conditional(config, mock_packages):
"""Test that define_from_variant returns empty string when a condition on a variant
is not met. When this is the case, the variant is not set in the spec."""
s = Spec('cmake-conditional-variants-test').concretized()
assert 'example' not in s.variants
assert s.package.define_from_variant('EXAMPLE', 'example') == ''
|
the-stack_0_8991 | from sys import stdin, stdout
freq = {}
num_women = int(input())
for i in range(num_women):
line = stdin.readline().strip().split()
country = line[0]
if not country in freq:
freq[country] = 1
else:
freq[country] += 1
for pair in sorted(freq.items()):
stdout.write("{} {}\n".format(pair[0], pair[1])) |
the-stack_0_8992 | """Snapcast group."""
import asyncio
import logging
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-many-public-methods
class Snapgroup(object):
"""Represents a snapcast group."""
def __init__(self, server, data):
"""Initialize."""
self._server = server
self._snapshot = None
self._callback_func = None
self.update(data)
def update(self, data):
"""Update group."""
self._group = data
@property
def identifier(self):
"""Get group identifier."""
return self._group.get('id')
@property
def name(self):
"""Get group name."""
return self._group.get('name')
@property
def stream(self):
"""Get stream identifier."""
return self._group.get('stream_id')
@asyncio.coroutine
def set_stream(self, stream_id):
"""Set group stream."""
self._group['stream_id'] = stream_id
yield from self._server.group_stream(self.identifier, stream_id)
_LOGGER.info('set stream to %s on %s', stream_id, self.friendly_name)
@property
def stream_status(self):
"""Get stream status."""
return self._server.stream(self.stream).status
@property
def muted(self):
"""Get mute status."""
return self._group.get('muted')
@asyncio.coroutine
def set_muted(self, status):
"""Set group mute status."""
self._group['muted'] = status
yield from self._server.group_mute(self.identifier, status)
_LOGGER.info('set muted to %s on %s', status, self.friendly_name)
@property
def volume(self):
"""Get volume."""
volume_sum = 0
for client in self._group.get('clients'):
volume_sum += self._server.client(client.get('id')).volume
return int(volume_sum / len(self._group.get('clients')))
@asyncio.coroutine
def set_volume(self, volume):
"""Set volume."""
if volume not in range(0, 101):
raise ValueError('Volume out of range')
current_volume = self.volume
if volume == current_volume:
_LOGGER.info('left volume at %s on group %s', volume, self.friendly_name)
return
delta = volume - current_volume
if delta < 0:
ratio = (current_volume - volume) / current_volume
else:
ratio = (volume - current_volume) / (100 - current_volume)
for data in self._group.get('clients'):
client = self._server.client(data.get('id'))
client_volume = client.volume
if delta < 0:
client_volume -= ratio * client_volume
else:
client_volume += ratio * (100 - client_volume)
client_volume = round(client_volume)
yield from client.set_volume(client_volume, update_group=False)
client.update_volume({
'volume': {
'percent': client_volume,
'muted': client.muted
}
})
_LOGGER.info('set volume to %s on group %s', volume, self.friendly_name)
@property
def friendly_name(self):
"""Get friendly name."""
return self.name if self.name != '' else self.stream
@property
def clients(self):
"""Get client identifiers."""
return [client.get('id') for client in self._group.get('clients')]
@asyncio.coroutine
def add_client(self, client_identifier):
"""Add a client."""
if client_identifier in self.clients:
_LOGGER.error('%s already in group %s', client_identifier, self.identifier)
return
new_clients = self.clients
new_clients.append(client_identifier)
yield from self._server.group_clients(self.identifier, new_clients)
_LOGGER.info('added %s to %s', client_identifier, self.identifier)
status = yield from self._server.status()
self._server.synchronize(status)
self._server.client(client_identifier).callback()
self.callback()
@asyncio.coroutine
def remove_client(self, client_identifier):
"""Remove a client."""
new_clients = self.clients
new_clients.remove(client_identifier)
yield from self._server.group_clients(self.identifier, new_clients)
_LOGGER.info('removed %s from %s', client_identifier, self.identifier)
status = yield from self._server.status()
self._server.synchronize(status)
self._server.client(client_identifier).callback()
self.callback()
def streams_by_name(self):
"""Get available stream objects by name."""
return {stream.friendly_name: stream for stream in self._server.streams}
def update_mute(self, data):
"""Update mute."""
self._group['muted'] = data['mute']
self.callback()
_LOGGER.info('updated mute on %s', self.friendly_name)
def update_stream(self, data):
"""Update stream."""
self._group['stream_id'] = data['stream_id']
self.callback()
_LOGGER.info('updated stream to %s on %s', self.stream, self.friendly_name)
def snapshot(self):
"""Snapshot current state."""
self._snapshot = {
'muted': self.muted,
'volume': self.volume,
'stream': self.stream
}
_LOGGER.info('took snapshot of current state of %s', self.friendly_name)
@asyncio.coroutine
def restore(self):
"""Restore snapshotted state."""
if not self._snapshot:
return
yield from self.set_muted(self._snapshot['muted'])
yield from self.set_volume(self._snapshot['volume'])
yield from self.set_stream(self._snapshot['stream'])
self.callback()
_LOGGER.info('restored snapshot of state of %s', self.friendly_name)
def callback(self):
"""Run callback."""
if self._callback_func and callable(self._callback_func):
self._callback_func(self)
def set_callback(self, func):
"""Set callback."""
self._callback_func = func
def __repr__(self):
"""String representation."""
return 'Snapgroup ({}, {})'.format(self.friendly_name, self.identifier)
|
the-stack_0_8996 | import Adafruit_DHT
DHT_SENSOR = Adafruit_DHT.DHT22
DHT_PIN = 4
while True:
humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
if humidity is not None and temperature is not None:
print("Temp={0:0.1f}*C Humidity={1:0.1f}%".format(temperature, humidity))
else:
print("DATA RETRIEVAL ERROR") |
the-stack_0_8997 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import time
import oneflow.unittest
import oneflow as flow
import oneflow.nn as nn
import oneflow.utils.vision.transforms as transforms
# reference: http://tangshusen.me/Dive-into-DL-PyTorch/#/chapter03_DL-basics/3.10_mlp-pytorch
def load_data_fashion_mnist(
batch_size, resize=None, root="./data/fashion-mnist", download=True, source_url=None
):
"""Download the Fashion-MNIST dataset and then load into memory."""
root = os.path.expanduser(root)
transformer = []
if resize:
transformer += [transforms.Resize(resize)]
transformer += [transforms.ToTensor()]
transformer = transforms.Compose(transformer)
mnist_train = flow.utils.vision.datasets.FashionMNIST(
root=root,
train=True,
transform=transformer,
download=download,
source_url=source_url,
)
mnist_test = flow.utils.vision.datasets.FashionMNIST(
root=root,
train=False,
transform=transformer,
download=download,
source_url=source_url,
)
num_workers = 0
train_iter = flow.utils.data.DataLoader(
mnist_train, batch_size, shuffle=True, num_workers=num_workers
)
test_iter = flow.utils.data.DataLoader(
mnist_test, batch_size, shuffle=False, num_workers=num_workers
)
return train_iter, test_iter
def get_fashion_mnist_labels(labels):
"""Get text labels for Fashion-MNIST."""
text_labels = [
"t-shirt",
"trouser",
"pullover",
"dress",
"coat",
"sandal",
"shirt",
"sneaker",
"bag",
"ankle boot",
]
return [text_labels[int(i)] for i in labels]
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x): # x shape: (batch, *, *, ...)
res = x.reshape(shape=[x.shape[0], -1])
return res
def evaluate_accuracy(data_iter, net, device=None):
if device is None and isinstance(net, nn.Module):
# using net device if not specified
device = list(net.parameters())[0].device
acc_sum, n = 0.0, 0
net.eval()
with flow.no_grad():
for X, y in data_iter:
X = X.to(device=device)
y = y.to(device=device)
acc_sum += (
net(X.to(device)).argmax(dim=1).numpy() == y.to(device).numpy()
).sum()
n += y.shape[0]
net.train()
return acc_sum / n
def test(test_case):
num_inputs, num_outputs, num_hiddens = 784, 10, 256
net = nn.Sequential(
FlattenLayer(),
nn.Linear(num_inputs, num_hiddens),
nn.ReLU(),
nn.Linear(num_hiddens, num_outputs),
)
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
device = flow.device("cpu")
else:
device = flow.device("cuda")
net.to(device)
batch_size = 256
num_epochs = 1
data_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data-test"), "fashion-mnist"
)
source_url = "https://oneflow-public.oss-cn-beijing.aliyuncs.com/datasets/mnist/Fashion-MNIST/"
train_iter, test_iter = load_data_fashion_mnist(
batch_size, root=data_dir, download=True, source_url=source_url
)
loss = nn.CrossEntropyLoss()
loss.to(device)
optimizer = flow.optim.SGD(net.parameters(), lr=0.1)
final_accuracy = 0
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
start = time.time()
for X, y in train_iter:
X = X.to(device=device)
y = y.to(device=device)
y_hat = net(X)
l = loss(y_hat, y).sum()
optimizer.zero_grad()
l.backward()
optimizer.step()
train_l_sum += l.numpy()
train_acc_sum += (y_hat.argmax(dim=1).numpy() == y.numpy()).sum()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
final_accuracy = train_acc_sum / n
print(
"epoch %d, loss %.4f, train acc %.3f, test acc %.3f, cost >>>>>>> %s(s)"
% (
epoch + 1,
train_l_sum / n,
final_accuracy,
test_acc,
str(time.time() - start),
)
)
final_accuracy = train_acc_sum / n
test_case.assertLess(0.70, final_accuracy)
@flow.unittest.skip_unless_1n1d()
class TestFashionMnistDataset(flow.unittest.TestCase):
def test_fashion_mnist_dataset(test_case):
test(test_case)
if __name__ == "__main__":
unittest.main()
# 1 epoch training log
# epoch 1, loss 0.0034, train acc 0.718, test acc 0.771, cost >>>>>>> 158.32699990272522(s)
# epoch 2, loss 0.0022, train acc 0.807, test acc 0.726, cost >>>>>>> 159.64465260505676(s)
|
the-stack_0_8998 | """
This file offers the methods to automatically retrieve the graph Sphingomonas soli NBRC 100801.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def SphingomonasSoliNbrc100801(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Sphingomonas soli NBRC 100801 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Sphingomonas soli NBRC 100801 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="SphingomonasSoliNbrc100801",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_8999 | import base64
import requests
from constants import URL, IMAGE_EXTENSION
"""
http://<URL>/image/all (GET)
"""
def recover_images():
resource = f"{URL}image"
response = requests.get(url=resource)
for data in response.json():
image_64_encode = data['encoded_image']
image_64_encode = image_64_encode.encode("utf-8")
image_64_decode = base64.decodebytes(image_64_encode)
file_name = data['id'] + IMAGE_EXTENSION
with open(file_name, 'wb') as image_result:
image_result.write(image_64_decode)
print(response)
if __name__ == '__main__':
recover_images()
|
the-stack_0_9000 | from StringIO import StringIO
from mimetypes import guess_all_extensions, guess_type
import zipfile
import logging
import os
from django.contrib.auth.decorators import login_required
import json
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views.generic import View, TemplateView
from couchdbkit.exceptions import ResourceNotFound
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseServerError, HttpResponseBadRequest
from django.shortcuts import render
from corehq.apps.app_manager.decorators import safe_download, require_can_edit_apps
from corehq.apps.app_manager.view_helpers import ApplicationViewMixin
from corehq.apps.app_manager.models import get_app, RemoteApp
from corehq.apps.hqmedia.cache import BulkMultimediaStatusCache
from corehq.apps.hqmedia.controller import MultimediaBulkUploadController, MultimediaImageUploadController, MultimediaAudioUploadController, MultimediaVideoUploadController
from corehq.apps.hqmedia.decorators import login_with_permission_from_post
from corehq.apps.hqmedia.models import CommCareImage, CommCareAudio, CommCareMultimedia, MULTIMEDIA_PREFIX, CommCareVideo
from corehq.apps.hqmedia.tasks import process_bulk_upload_zip
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions
from corehq.util.zip_utils import DownloadZip
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.django.cached_object import CachedObject
from soil.util import expose_download
from django.utils.translation import ugettext as _
class BaseMultimediaView(ApplicationViewMixin, View):
@method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_with_permission_from_post()))
def dispatch(self, request, *args, **kwargs):
return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs)
class BaseMultimediaTemplateView(BaseMultimediaView, TemplateView):
"""
The base view for all the multimedia templates.
"""
@property
def page_context(self):
return {}
def get_context_data(self, **kwargs):
context = {
"domain": self.domain,
"app": self.app,
}
context.update(self.page_context)
return context
def render_to_response(self, context, **response_kwargs):
return render(self.request, self.template_name, context)
@require_can_edit_apps
def search_for_media(request, domain, app_id):
media_type = request.GET['t']
if media_type == 'Image':
files = CommCareImage.search(request.GET['q'])
elif media_type == 'Audio':
files = CommCareAudio.search(request.GET['q'])
else:
raise Http404()
return HttpResponse(json.dumps([
{'url': i.url(),
'licenses': [license.display_name for license in i.licenses],
'tags': [tag for tags in i.tags.values() for tag in tags],
'm_id': i._id} for i in files]))
@require_can_edit_apps
def choose_media(request, domain, app_id):
# TODO: Add error handling
app = get_app(domain, app_id)
media_type = request.POST['media_type']
media_id = request.POST['id']
if media_type == 'Image':
file = CommCareImage.get(media_id)
elif media_type == 'Audio':
file = CommCareImage.get(media_id)
else:
raise Http404()
if file is None or not file.is_shared:
return HttpResponse(json.dumps({
'match_found': False
}))
file.add_domain(domain)
app.create_mapping(file, request.POST['path'])
if media_type == 'Image':
return HttpResponse(json.dumps({
'match_found': True,
'image': {'m_id': file._id, 'url': file.url()},
'file': True
}))
elif media_type == 'Audio':
return HttpResponse(json.dumps({'match_found': True, 'audio': {'m_id': file._id, 'url': file.url()}}))
else:
raise Http404()
@require_can_edit_apps
def media_urls(request, domain, app_id):
# IS THIS USED?????
# I rewrote it so it actually produces _something_, but is it useful???
app = get_app(domain, app_id)
# todo remove get_media_references
multimedia = app.get_media_references()
pathUrls = {}
for section, types in multimedia['references'].items():
for media_type, info in types.items():
for m in info['maps']:
if m.get('path'):
pathUrls[m['path']] = m
return HttpResponse(json.dumps(pathUrls))
def media_from_path(request, domain, app_id, file_path):
# Not sure what the intentions were for this. I didn't see it getting used anywhere.
# Rewrote it to use new media refs.
# Yedi, care to comment?
app = get_app(domain, app_id)
if isinstance(app, RemoteApp):
raise Http404('Media not yet available for remote apps')
# todo remove get_media_references
multimedia = app.get_media_references()
for section, types in multimedia['references'].items():
for media_type, info in types.items():
for media_map in info['maps']:
# [10:] is to remove the 'jr://file/'
if media_map['path'][10:] == file_path and media_map.get('url'):
return HttpResponseRedirect(media_map['url'])
raise Http404('No Media Found')
class BaseMultimediaUploaderView(BaseMultimediaTemplateView):
@property
def page_context(self):
return {
'uploaders': self.upload_controllers,
"sessionid": self.request.COOKIES.get('sessionid'),
}
@property
def upload_controllers(self):
"""
Return a list of Upload Controllers
"""
raise NotImplementedError("You must specify a list of upload controllers")
class MultimediaReferencesView(BaseMultimediaUploaderView):
name = "hqmedia_references"
template_name = "hqmedia/references.html"
@property
def page_context(self):
context = super(MultimediaReferencesView, self).page_context
if self.app is None:
raise Http404(self)
context.update({
"references": self.app.get_references(),
"object_map": self.app.get_object_map(),
"totals": self.app.get_reference_totals(),
"sessionid": self.request.COOKIES.get('sessionid'),
})
return context
@property
def upload_controllers(self):
return [
MultimediaImageUploadController("hqimage", reverse(ProcessImageFileUploadView.name,
args=[self.domain, self.app_id])),
MultimediaAudioUploadController("hqaudio", reverse(ProcessAudioFileUploadView.name,
args=[self.domain, self.app_id])),
MultimediaVideoUploadController("hqvideo", reverse(ProcessVideoFileUploadView.name,
args=[self.domain, self.app_id])),
]
class BulkUploadMultimediaView(BaseMultimediaUploaderView):
name = "hqmedia_bulk_upload"
template_name = "hqmedia/bulk_upload.html"
@property
def upload_controllers(self):
return [MultimediaBulkUploadController("hqmedia_bulk", reverse(ProcessBulkUploadView.name,
args=[self.domain, self.app_id]))]
class BadMediaFileException(Exception):
pass
class BaseProcessUploadedView(BaseMultimediaView):
@property
def username(self):
return self.request.couch_user.username if self.request.couch_user else None
@property
def share_media(self):
return self.request.POST.get('shared') == 't'
@property
def license_used(self):
return self.request.POST.get('license', '')
@property
def author(self):
return self.request.POST.get('author', '')
@property
def attribution_notes(self):
return self.request.POST.get('attribution-notes', '')
@property
@memoized
def uploaded_file(self):
return self.request.FILES.get('Filedata')
@property
@memoized
def mime_type(self):
try:
data = self.uploaded_file.file.read()
return CommCareMultimedia.get_mime_type(data, filename=self.uploaded_file.name)
except Exception as e:
raise BadMediaFileException("There was an error fetching the MIME type of your file. Error: %s" % e)
def get(self, request, *args, **kwargs):
return HttpResponseBadRequest("You may only post to this URL.")
def post(self, request, *args, **kwargs):
self.errors = []
response = {}
try:
self.validate_file()
response.update(self.process_upload())
except BadMediaFileException as e:
self.errors.append(e.message)
response.update({
'errors': self.errors,
})
return HttpResponse(json.dumps(response))
def validate_file(self):
raise NotImplementedError("You must validate your uploaded file!")
def process_upload(self):
raise NotImplementedError("You definitely need to implement this guy.")
class ProcessBulkUploadView(BaseProcessUploadedView):
name = "hqmedia_uploader_bulk"
@property
@memoized
def uploaded_zip(self):
try:
self.uploaded_file.file.seek(0)
return zipfile.ZipFile(self.uploaded_file)
except Exception as e:
raise BadMediaFileException("There was an issue processing the zip file you provided. Error: %s" % e)
def validate_file(self):
if not self.mime_type in self.valid_mime_types():
raise BadMediaFileException("Your zip file doesn't have a valid mimetype.")
if not self.uploaded_zip:
raise BadMediaFileException("There is no ZIP file.")
if self.uploaded_zip.testzip():
raise BadMediaFileException("The ZIP file provided was bad.")
def process_upload(self):
# save the file w/ soil
self.uploaded_file.file.seek(0)
saved_file = expose_download(self.uploaded_file.file.read(), expiry=BulkMultimediaStatusCache.cache_expiry)
processing_id = saved_file.download_id
status = BulkMultimediaStatusCache(processing_id)
status.save()
process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id,
username=self.username,
share_media=self.share_media,
license_name=self.license_used,
author=self.author,
attribution_notes=self.attribution_notes)
return status.get_response()
@classmethod
def valid_mime_types(cls):
return [
'application/zip',
'application/x-zip',
'application/octet-stream',
'application/x-zip-compressed',
]
class BaseProcessFileUploadView(BaseProcessUploadedView):
media_class = None
@property
def form_path(self):
return self.request.POST.get('path', '')
def validate_file(self):
def file_ext(filename):
_, extension = os.path.splitext(filename)
return extension
def possible_extensions(filename):
possible_type = guess_type(filename)[0]
if not possible_type:
return []
return guess_all_extensions(guess_type(filename)[0])
if not self.mime_type:
raise BadMediaFileException("Did not process a mime type!")
base_type = self.mime_type.split('/')[0]
if base_type not in self.valid_base_types():
raise BadMediaFileException("Not a valid %s file." % self.media_class.get_nice_name().lower())
ext = file_ext(self.uploaded_file.name)
if ext.lower() not in possible_extensions(self.form_path):
raise BadMediaFileException("File %s has an incorrect file type (%s)." % (self.uploaded_file.name, ext))
def process_upload(self):
self.uploaded_file.file.seek(0)
data = self.uploaded_file.file.read()
multimedia = self.media_class.get_by_data(data)
multimedia.attach_data(data,
original_filename=self.uploaded_file.name,
username=self.username)
multimedia.add_domain(self.domain, owner=True)
if self.share_media:
multimedia.update_or_add_license(self.domain,
type=self.license_used,
author=self.author,
attribution_notes=self.attribution_notes)
self.app.create_mapping(multimedia, self.form_path)
return {
'ref': multimedia.get_media_info(self.form_path),
}
@classmethod
def valid_base_types(cls):
raise NotImplementedError("You need to specify a list of valid base mime types!")
class ProcessImageFileUploadView(BaseProcessFileUploadView):
media_class = CommCareImage
name = "hqmedia_uploader_image"
@classmethod
def valid_base_types(cls):
return ['image']
class ProcessAudioFileUploadView(BaseProcessFileUploadView):
media_class = CommCareAudio
name = "hqmedia_uploader_audio"
@classmethod
def valid_base_types(cls):
return ['audio']
class ProcessVideoFileUploadView(BaseProcessFileUploadView):
media_class = CommCareVideo
name = "hqmedia_uploader_video"
@classmethod
def valid_base_types(cls):
return ['video']
class CheckOnProcessingFile(BaseMultimediaView):
name = "hqmedia_check_processing"
def get(self, request, *args, **kwargs):
return HttpResponse("workin on it")
def _iter_media_files(media_objects):
"""
take as input the output of get_media_objects
and return an iterator of (path, data) tuples for the media files
as they should show up in the .zip
as well as a list of error messages
as a side effect of implementation,
errors will not include all error messages until the iterator is exhausted
"""
errors = []
def _media_files():
for path, media in media_objects:
try:
data, _ = media.get_display_file()
folder = path.replace(MULTIMEDIA_PREFIX, "")
if not isinstance(data, unicode):
yield os.path.join(folder), data
except NameError as e:
errors.append("%(path)s produced an ERROR: %(error)s" % {
'path': path,
'error': e,
})
return _media_files(), errors
class DownloadMultimediaZip(DownloadZip, ApplicationViewMixin):
"""
This is where the Multimedia for an application gets generated.
Expects domain and app_id to be in its args
"""
name = "download_multimedia_zip"
compress_zip = False
zip_name = 'commcare.zip'
def iter_files(self):
self.app.remove_unused_mappings()
return _iter_media_files(self.app.get_media_objects())
def check_before_zipping(self):
if not self.app.multimedia_map:
return HttpResponse("You have no multimedia to download.")
def log_errors(self, errors):
logging.error(
"Error downloading multimedia ZIP "
"for domain %s and application %s." % (
self.domain, self.app_id)
)
return HttpResponseServerError(
"Errors were encountered while "
"retrieving media for this application.<br /> %s" % (
"<br />".join(errors))
)
@method_decorator(safe_download)
def dispatch(self, request, *args, **kwargs):
return super(DownloadMultimediaZip, self).dispatch(request, *args, **kwargs)
class MultimediaUploadStatusView(View):
name = "hqmedia_upload_status"
@property
@memoized
def processing_id(self):
return self.request.POST.get('processing_id')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(MultimediaUploadStatusView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponseBadRequest("Please post to this.")
def post(self, request, *args, **kwargs):
if not self.processing_id:
return HttpResponseBadRequest("A processing_id is required.")
status = BulkMultimediaStatusCache.get(self.processing_id)
if status is None:
# No status could be retrieved from the cache
fake_status = BulkMultimediaStatusCache(self.processing_id)
fake_status.complete = True
fake_status.errors.append(_('There was an issue retrieving the status from the cache. '
'We are looking into it. Please try uploading again.'))
logging.error("[Multimedia Bulk Upload] Process ID #%s encountered an issue while retrieving "
"a status from the cache." % self.processing_id)
response = fake_status.get_response()
else:
response = status.get_response()
return HttpResponse(json.dumps(response))
class ViewMultimediaFile(View):
name = "hqmedia_download"
@property
@memoized
def media_class(self):
media_type = self.kwargs.get('media_type')
try:
return CommCareMultimedia.get_doc_class(media_type)
except KeyError:
raise Http404("Could not find media of that type.")
@property
@memoized
def doc_id(self):
return self.kwargs.get('doc_id')
@property
@memoized
def multimedia(self):
try:
return self.media_class.get(self.doc_id)
except ResourceNotFound:
raise Http404("Media not found.")
@property
@memoized
def thumb(self):
thumb = self.request.GET.get('thumb')
try:
return int(thumb), int(thumb)
except Exception:
return None
def get(self, request, *args, **kwargs):
obj = CachedObject(str(self.doc_id)
+ ':' + self.kwargs.get('media_type')
+ ':' + str(self.thumb))
if not obj.is_cached():
data, content_type = self.multimedia.get_display_file()
if self.thumb:
data = CommCareImage.get_thumbnail_data(data, self.thumb)
buffer = StringIO(data)
metadata = {'content_type': content_type}
obj.cache_put(buffer, metadata, timeout=0)
else:
metadata, buffer = obj.get()
data = buffer.getvalue()
content_type = metadata['content_type']
return HttpResponse(data, mimetype=content_type)
|
the-stack_0_9001 | import sys
import os
SUMMARYSTUFF = """
## Contents
{:.no_toc}
*
{: toc}
"""
filetoread = sys.argv[1]
fdtoread = open(filetoread)
fileprefix = ".".join(filetoread.split('.')[:-1])
filetowrite = fileprefix+".newmd"
buffer = ""
for line in fdtoread:
if line[0:2]=='# ':#assume title
title = line.strip()[2:]
else:
buffer = buffer + line
fdtoread.close()
preamble = "title: {}\nnotebook: {}\n".format(title, fileprefix+".ipynb" )
preamble = "---\n"+preamble+"---\n"
fdtowrite=open(filetowrite, "w")
summarystuff = SUMMARYSTUFF
fdtowrite.write(preamble+summarystuff+buffer)
fdtowrite.close()
os.rename(filetowrite, filetoread)
|
the-stack_0_9002 | """Worker pool executor base classes."""
import numbers
import os
import threading
import time
import datetime
import pprint
import traceback
from schema import Or, And
from testplan.common.config import ConfigOption, validate_func
from testplan.common import entity
from testplan.common.utils.thread import interruptible_join
from testplan.common.utils.strings import Color
from testplan.common.utils.timing import wait_until_predicate
from testplan.runners.base import Executor, ExecutorConfig
from .communication import Message
from .connection import QueueClient, QueueServer
from .tasks import Task, TaskResult
from testplan.common.entity import ResourceStatus
class WorkerConfig(entity.ResourceConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Worker` resource entity.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
"index": Or(int, str),
ConfigOption("transport", default=QueueClient): object,
ConfigOption("restart_count", default=3): int,
}
class Worker(entity.Resource):
"""
Worker resource that pulls tasks from the transport provided, executes them
and sends back task results.
:param index: Worker index id.
:type index: ``int`` or ``str``
:param transport: Transport class for pool/worker communication.
:type transport: :py:class:`~testplan.runners.pools.connection.Client`
:param restart_count: How many times the worker had restarted.
:type restart_count: ``int``
Also inherits all :py:class:`~testplan.common.entity.base.Resource`
options.
"""
CONFIG = WorkerConfig
def __init__(self, **options):
super(Worker, self).__init__(**options)
self._metadata = None
self._transport = self.cfg.transport()
self._handler = None
self.last_heartbeat = None
self.assigned = set()
self.requesting = 0
self.restart_count = self.cfg.restart_count
@property
def handler(self):
return self._handler
@property
def transport(self):
"""Pool/Worker communication transport."""
return self._transport
@property
def metadata(self):
"""Worker metadata information."""
if not self._metadata:
self._metadata = {
"thread": threading.current_thread(),
"index": self.cfg.index,
}
return self._metadata
@property
def outfile(self):
"""Stdout file."""
return os.path.join(
self.parent.runpath, "{}_startup".format(self.cfg.index)
)
def uid(self):
"""Worker unique index."""
return self.cfg.index
def starting(self):
"""Starts the daemonic worker loop."""
self.make_runpath_dirs()
self._handler = threading.Thread(
target=self._loop, args=(self._transport,)
)
self._handler.daemon = True
self._handler.start()
self.status.change(self.STATUS.STARTED)
def stopping(self):
"""Stops the worker."""
if self._handler:
interruptible_join(self._handler)
self._handler = None
self.status.change(self.STATUS.STOPPED)
def aborting(self):
"""Aborting logic, will not wait running tasks."""
self._transport.disconnect()
@property
def is_alive(self):
"""Poll the loop handler thread to check it is running as expected."""
return self._handler.is_alive()
def _loop(self, transport):
message = Message(**self.metadata)
while self.active and self.status.tag not in (
self.status.STOPPING,
self.status.STOPPED,
):
received = transport.send_and_receive(
message.make(message.TaskPullRequest, data=1)
)
if received is None or received.cmd == Message.Stop:
break
elif received.cmd == Message.TaskSending:
results = []
for item in received.data:
results.append(self.execute(item))
transport.send_and_receive(
message.make(message.TaskResults, data=results),
expect=message.Ack,
)
elif received.cmd == Message.Ack:
pass
time.sleep(self.cfg.active_loop_sleep)
def execute(self, task):
"""
Executes a task and return the associated task result.
:param task: Task that worker pulled for execution.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:return: Task result.
:rtype: :py:class:`~testplan.runners.pools.tasks.base.TaskResult`
"""
try:
target = task.materialize()
if isinstance(target, entity.Runnable):
if not target.parent:
target.parent = self
if not target.cfg.parent:
target.cfg.parent = self.cfg
result = target.run()
elif callable(target):
result = target()
else:
result = target.run()
except BaseException:
task_result = TaskResult(
task=task,
result=None,
status=False,
reason=traceback.format_exc(),
)
else:
task_result = TaskResult(task=task, result=result, status=True)
return task_result
def respond(self, msg):
"""
Method that the pool uses to respond with a message to the worker.
:param msg: Response message.
:type msg: :py:class:`~testplan.runners.pools.communication.Message`
"""
self._transport.respond(msg)
def __repr__(self):
return "{}[{}]".format(self.__class__.__name__, self.cfg.index)
def default_check_reschedule(pool, task_result):
"""
Determines if a task should be rescheduled based on the task result info.
"""
return False
class PoolConfig(ExecutorConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Pool` executor resource entity.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
"name": str,
ConfigOption("size", default=4): And(int, lambda x: x > 0),
ConfigOption("worker_type", default=Worker): object,
ConfigOption("worker_heartbeat", default=None): Or(
int, float, None
),
ConfigOption("heartbeats_miss_limit", default=3): int,
ConfigOption("task_retries_limit", default=3): int,
ConfigOption("max_active_loop_sleep", default=5): numbers.Number,
ConfigOption("restart_count", default=3): int,
}
class Pool(Executor):
"""
Pool task executor object that initializes workers and dispatches tasks.
:param name: Pool name.
:type name: ``str``
:param size: Pool workers size. Default: 4
:type size: ``int``
:param worker_type: Type of worker to be initialized.
:type worker_type: :py:class:`~testplan.runners.pools.base.Worker`
:param worker_heartbeat: Worker heartbeat period.
:type worker_heartbeat: ``int`` or ``float`` or ``NoneType``
:param heartbeats_miss_limit: Maximum times a heartbeat is missed.
:type heartbeats_miss_limit: ``int``
:param task_retries_limit: Maximum times a task can be re-assigned to pool.
:type task_retries_limit: ``int``
:param max_active_loop_sleep: Maximum value for delay logic in active sleep.
:type max_active_loop_sleep: ``int`` or ``float``
:param restart_count: How many times the pool had restarted.
:type restart_count: ``int``
Also inherits all :py:class:`~testplan.runners.base.Executor` options.
"""
CONFIG = PoolConfig
CONN_MANAGER = QueueServer
def __init__(
self,
name,
size=4,
worker_type=Worker,
worker_heartbeat=None,
heartbeats_miss_limit=3,
task_retries_limit=3,
max_active_loop_sleep=5,
restart_count=3,
**options
):
options.update(self.filter_locals(locals()))
super(Pool, self).__init__(**options)
self.unassigned = [] # unassigned tasks
self.task_assign_cnt = {} # uid: times_assigned
self.should_reschedule = default_check_reschedule
self._workers = entity.Environment(parent=self)
self._workers_last_result = {}
self._conn = self.CONN_MANAGER()
self._conn.parent = self
self._pool_lock = threading.Lock()
self._metadata = None # Set when Pool is started.
self._exit_loop = False
self._start_monitor_thread = True
# Methods for handling different Message types. These are expected to
# take the worker, request and response objects as the only required
# positional args.
self._request_handlers = {
Message.ConfigRequest: self._handle_cfg_request,
Message.TaskPullRequest: self._handle_taskpull_request,
Message.TaskResults: self._handle_taskresults,
Message.Heartbeat: self._handle_heartbeat,
Message.SetupFailed: self._handle_setupfailed,
}
def uid(self):
"""Pool name."""
return self.cfg.name
def add(self, task, uid):
"""
Add a task for execution.
:param task: Task to be scheduled to workers.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param uid: Task uid.
:type uid: ``str``
"""
if not isinstance(task, Task):
raise ValueError(
"Task was expected, got {} instead.".format(type(task))
)
super(Pool, self).add(task, uid)
self.unassigned.append(uid)
def set_reschedule_check(self, check_reschedule):
"""
Sets callable with custom rules to determine if a task should be
rescheduled. It must accept the pool object and the task result,
and based on these it returns if the task should be rescheduled
(i.e due to a known rare system error).
:param check_reschedule: Custom callable for task reschedule.
:type check_reschedule: ``callable`` that takes
``pool``, ``task_result`` arguments.
:return: True if Task should be rescheduled else False.
:rtype: ``bool``
"""
validate_func("pool", "task_result")(check_reschedule)
self.should_reschedule = check_reschedule
def _loop(self):
"""
Main executor work loop - runs in a seperate thread when the Pool is
started.
"""
if self._start_monitor_thread:
self.logger.debug("Starting worker monitor thread.")
self._worker_monitor = threading.Thread(
target=self._workers_monitoring
)
self._worker_monitor.daemon = True
self._worker_monitor.start()
while self.active and not self._exit_loop:
msg = self._conn.accept()
if msg:
try:
self.logger.debug("Received message from worker: %s.", msg)
self.handle_request(msg)
except Exception:
self.logger.error(traceback.format_exc())
time.sleep(self.cfg.active_loop_sleep)
def handle_request(self, request):
"""
Handles a worker request. I.e TaskPull, TaskResults, Heartbeat etc.
:param request: Worker request.
:type request: :py:class:`~testplan.runners.pools.communication.Message`
"""
sender_index = request.sender_metadata["index"]
worker = self._workers[sender_index]
if not worker.active:
self.logger.warning(
"Message {} - {} from inactive worker {}".format(
request.cmd, request.data, worker
)
)
self.logger.debug(
"Pool {} request received by {} - {}, {}".format(
self.cfg.name, worker, request.cmd, request.data
)
)
response = Message(**self._metadata)
if not self.active or self.status.tag == self.STATUS.STOPPING:
worker.respond(response.make(Message.Stop))
elif request.cmd in self._request_handlers:
self._request_handlers[request.cmd](worker, request, response)
else:
self.logger.error(
"Unknown request: {} {} {} {}".format(
request, dir(request), request.cmd, request.data
)
)
worker.respond(response.make(Message.Ack))
def _handle_cfg_request(self, worker, _, response):
"""Handle a ConfigRequest from a worker."""
options = []
cfg = self.cfg
while cfg:
options.append(cfg.denormalize())
cfg = cfg.parent
worker.respond(response.make(Message.ConfigSending, data=options))
def _handle_taskpull_request(self, worker, request, response):
"""Handle a TaskPullRequest from a worker."""
tasks = []
if self.status.tag == self.status.STARTED:
for _ in range(request.data):
try:
uid = self.unassigned.pop(0)
except IndexError:
break
if uid not in self.task_assign_cnt:
self.task_assign_cnt[uid] = 0
if self.task_assign_cnt[uid] >= self.cfg.task_retries_limit:
self._discard_task(
uid,
"{} already reached max retries: {}".format(
self._input[uid], self.cfg.task_retries_limit
),
)
continue
else:
self.task_assign_cnt[uid] += 1
task = self._input[uid]
self.logger.test_info(
"Scheduling {} to {}".format(task, worker)
)
worker.assigned.add(uid)
tasks.append(task)
if tasks:
worker.respond(response.make(Message.TaskSending, data=tasks))
worker.requesting = request.data - len(tasks)
return
worker.requesting = request.data
worker.respond(response.make(Message.Ack))
def _handle_taskresults(self, worker, request, response):
"""Handle a TaskResults message from a worker."""
worker.respond(response.make(Message.Ack))
for task_result in request.data:
uid = task_result.task.uid()
worker.assigned.remove(uid)
if worker not in self._workers_last_result:
self._workers_last_result[worker] = time.time()
self.logger.test_info(
"De-assign {} from {}".format(task_result.task, worker)
)
if self.should_reschedule(self, task_result):
if self.task_assign_cnt[uid] >= self.cfg.task_retries_limit:
self.logger.test_info(
"Will not reschedule %(input)s again as it "
"reached max retries %(retries)d",
{
"input": self._input[uid],
"retries": self.cfg.task_retries_limit,
},
)
else:
self.logger.test_info(
"Rescheduling {} due to "
"should_reschedule() cfg option of {}".format(
task_result.task, self
)
)
self.unassigned.append(uid)
continue
self._print_test_result(task_result)
self._results[uid] = task_result
self.ongoing.remove(uid)
def _handle_heartbeat(self, worker, request, response):
"""Handle a Heartbeat message received from a worker."""
worker.last_heartbeat = time.time()
self.logger.debug(
"Received heartbeat from {} at {} after {}s.".format(
worker, request.data, time.time() - request.data
)
)
worker.respond(response.make(Message.Ack, data=worker.last_heartbeat))
def _handle_setupfailed(self, worker, request, response):
"""Handle a SetupFailed message received from a worker."""
self.logger.test_info(
"Worker {} setup failed:{}{}".format(
worker, os.linesep, request.data
)
)
worker.respond(response.make(Message.Ack))
self._deco_worker(worker, "Aborting {}, setup failed.")
def _deco_worker(self, worker, message):
"""
Decommission a worker by move all assigned task back to pool
"""
self.logger.critical(message.format(worker))
if os.path.exists(worker.outfile):
self.logger.critical("\tlogfile: {}".format(worker.outfile))
while worker.assigned:
uid = worker.assigned.pop()
self.logger.test_info(
"Re-assigning {} from {} to {}.".format(
self._input[uid], worker, self
)
)
self.unassigned.append(uid)
def _workers_monitoring(self):
"""
Worker fault tolerance logic. Check is based on:
1) handler status
2) heartbeat if available
"""
previous_status = {"active": [], "inactive": [], "initializing": []}
if self.cfg.worker_heartbeat:
loop_interval = self.cfg.worker_heartbeat
else:
loop_interval = 5 # seconds
break_outer_loop = False
while self.active:
hosts_status = {"active": [], "inactive": [], "initializing": []}
for worker in self._workers:
status, reason = self._query_worker_status(worker)
if status == "inactive":
with self._pool_lock:
if self.active and self.status.tag not in (
self.status.STOPPING,
self.status.STOPPED,
):
if self._handle_inactive(worker, reason):
status = "active"
else:
# if pool is aborting/stopping, exit monitor
break_outer_loop = True
break
hosts_status[status].append(worker)
if break_outer_loop:
break
if hosts_status != previous_status:
self.logger.info(
"%s Hosts status update", datetime.datetime.now()
)
self.logger.info(pprint.pformat(hosts_status))
previous_status = hosts_status
if (
not hosts_status["active"]
and not hosts_status["initializing"]
and hosts_status["inactive"]
):
self.logger.critical(
"All workers of {} are inactive.".format(self)
)
self.abort()
break
try:
# For early finish of worker monitoring thread.
wait_until_predicate(
lambda: not self.is_alive,
timeout=loop_interval,
interval=0.05,
)
except RuntimeError:
break
def _query_worker_status(self, worker):
"""
Query the current status of a worker. If heartbeat monitoring is
enabled, check the last heartbeat time is within threshold.
:param worker: Pool worker to query
:return: worker status string - one of 'initializing', 'inactive' or
'active', and an optional reason string
"""
if not worker.active or worker.status.tag in (
worker.status.STOPPING,
worker.status.STOPPED,
):
return "inactive", "Worker in stop/abort status"
if worker.status.tag in (worker.status.NONE, worker.status.STARTING):
return "initializing", None
# else: worker must be in state STARTED
if worker.status.tag != worker.status.STARTED:
raise RuntimeError(
"Worker in unexpected state {}".format(worker.status.tag)
)
if not worker.is_alive: # handler based monitoring
return (
"inactive",
"Deco {}, handler no longer alive".format(worker),
)
# If no heartbeart is configured, we treat the worker as "active"
# since it is in state STARTED and its handler is alive.
if not self.cfg.worker_heartbeat:
return "active", None
# else: do heartbeat based monitoring
lag = time.time() - worker.last_heartbeat
if lag > self.cfg.worker_heartbeat * self.cfg.heartbeats_miss_limit:
return (
"inactive",
"Has not been receiving heartbeat from {} for {} "
"sec".format(worker, lag),
)
return "active", None
def _handle_inactive(self, worker, reason):
"""
Handle an inactive worker.
:param worker: worker object
:param reason: why worker is considered inactive
:return: True if worker restarted, else False
"""
if worker.status.tag != worker.status.STARTED:
return False
self._deco_worker(worker, reason)
if worker.restart_count:
worker.restart_count -= 1
try:
worker.restart()
return True
except Exception as exc:
self.logger.critical(
"Worker {} failed to restart: {}".format(worker, exc)
)
else:
worker.abort()
return False
def _discard_task(self, uid, reason):
self.logger.critical(
"Discard task {} of {} - {}.".format(
self._input[uid], self, reason
)
)
self._results[uid] = TaskResult(
task=self._input[uid],
status=False,
reason="Task discarded by {} - {}.".format(self, reason),
)
self.ongoing.remove(uid)
def _discard_pending_tasks(self):
self.logger.critical("Discard pending tasks of {}.".format(self))
while self.ongoing:
uid = self.ongoing[0]
self._results[uid] = TaskResult(
task=self._input[uid],
status=False,
reason="Task [{}] discarding due to {} abort.".format(
self._input[uid]._target, self
),
)
self.ongoing.pop(0)
def _print_test_result(self, task_result):
if (not isinstance(task_result.result, entity.RunnableResult)) or (
not hasattr(task_result.result, "report")
):
return
# Currently prints report top level result and not details.
name = task_result.result.report.name
if task_result.result.report.passed is True:
self.logger.test_info("{} -> {}".format(name, Color.green("Pass")))
else:
self.logger.test_info("{} -> {}".format(name, Color.red("Fail")))
def _add_workers(self):
"""Initialise worker instances."""
for idx in (str(i) for i in range(self.cfg.size)):
worker = self.cfg.worker_type(
index=idx, restart_count=self.cfg.restart_count
)
worker.parent = self
worker.cfg.parent = self.cfg
self._workers.add(worker, uid=idx)
self.logger.debug(
"Added worker %(index)s (outfile = %(outfile)s)",
{"index": idx, "outfile": worker.outfile},
)
def _start_workers(self):
"""Start all workers of the pool"""
for worker in self._workers:
self._conn.register(worker)
self._workers.start()
def starting(self):
"""Starting the pool and workers."""
# TODO do we need a lock here?
self.make_runpath_dirs()
if self.runpath is None:
raise RuntimeError("runpath was not set correctly")
self._metadata = {"runpath": self.runpath}
self._conn.start()
for worker in self._workers:
# reset worker (if any) status
worker.status.change(ResourceStatus.STARTING)
self._exit_loop = False
super(Pool, self).starting() # start the loop & monitor
if not self._workers:
self._add_workers()
self._start_workers()
if self._workers.start_exceptions:
for msg in self._workers.start_exceptions.values():
self.logger.error(msg)
self.abort()
raise RuntimeError(
"All workers of {} failed to start.".format(self)
)
self.status.change(self.status.STARTED)
self.logger.debug("%s started.", self.__class__.__name__)
def workers_requests(self):
"""Count how many tasks workers are requesting."""
return sum(worker.requesting for worker in self._workers)
def _stop_workers(self):
self._workers.stop()
def stopping(self):
"""Stop connections and workers."""
with self._pool_lock:
self._stop_workers()
for worker in self._workers:
worker.transport.disconnect()
self._exit_loop = True
super(Pool, self).stopping() # stop the loop and the monitor
self._conn.stop()
self.status.change(self.status.STOPPED)
self.logger.debug("Stopped %s", self.__class__.__name__)
def abort_dependencies(self):
"""Empty generator to override parent implementation."""
return
yield
def aborting(self):
"""Aborting logic."""
self.logger.debug("Aborting pool {}".format(self))
for worker in self._workers:
worker.abort()
super(Pool, self).stopping() # stop the loop and the monitor
self._conn.abort()
self._discard_pending_tasks()
self.logger.debug("Aborted pool {}".format(self))
|
the-stack_0_9004 | import os
from typing import Optional
import time
from fastapi import FastAPI
from transformers import pipeline
from pydantic import BaseModel, PositiveInt, constr
import ray
from ray import serve
app = FastAPI()
class Request(BaseModel):
text: constr(min_length=1, strip_whitespace=True)
min_length: Optional[PositiveInt]
max_length: Optional[PositiveInt]
@serve.deployment
@serve.ingress(app)
class Summarizer:
def __init__(self):
self.summarize = pipeline("summarization", model="t5-small")
@app.post("/")
def get_summary(self, payload: Request):
summary_list = self.summarize(
payload.text,
min_length=payload.min_length or 0,
max_length=payload.max_length or 256,
)
summary = summary_list[0]["summary_text"]
return summary
ray.init(_node_ip_address="0.0.0.0") # needed for gcloud container compatibility
serve.start(
http_options={"host": "0.0.0.0", "port": int(os.environ.get("PORT", "8000"))}
)
Summarizer.deploy()
# Block the container process from exit
while True:
time.sleep(5)
|
the-stack_0_9005 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ChildProduct(msrest.serialization.Model):
"""The product documentation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar const_property: Required. Constant string. Default value: "constant".
:vartype const_property: str
:param count: Count.
:type count: int
"""
_validation = {
'const_property': {'required': True, 'constant': True},
}
_attribute_map = {
'const_property': {'key': 'constProperty', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
const_property = "constant"
def __init__(
self,
*,
count: Optional[int] = None,
**kwargs
):
super(ChildProduct, self).__init__(**kwargs)
self.count = count
class ConstantProduct(msrest.serialization.Model):
"""The product documentation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar const_property: Required. Constant string. Default value: "constant".
:vartype const_property: str
:ivar const_property2: Required. Constant string2. Default value: "constant2".
:vartype const_property2: str
"""
_validation = {
'const_property': {'required': True, 'constant': True},
'const_property2': {'required': True, 'constant': True},
}
_attribute_map = {
'const_property': {'key': 'constProperty', 'type': 'str'},
'const_property2': {'key': 'constProperty2', 'type': 'str'},
}
const_property = "constant"
const_property2 = "constant2"
def __init__(
self,
**kwargs
):
super(ConstantProduct, self).__init__(**kwargs)
class Error(msrest.serialization.Model):
"""Error.
:param code:
:type code: int
:param message:
:type message: str
:param fields:
:type fields: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
'fields': {'key': 'fields', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[int] = None,
message: Optional[str] = None,
fields: Optional[str] = None,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
self.fields = fields
class Product(msrest.serialization.Model):
"""The product documentation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param display_names: Non required array of unique items from 0 to 6 elements.
:type display_names: list[str]
:param capacity: Non required int betwen 0 and 100 exclusive.
:type capacity: int
:param image: Image URL representing the product.
:type image: str
:param child: Required. The product documentation.
:type child: ~validation.models.ChildProduct
:param const_child: Required. The product documentation.
:type const_child: ~validation.models.ConstantProduct
:ivar const_int: Required. Constant int. Default value: "0".
:vartype const_int: int
:ivar const_string: Required. Constant string. Default value: "constant".
:vartype const_string: str
:ivar const_string_as_enum: Constant string as Enum. Default value: "constant_string_as_enum".
:vartype const_string_as_enum: str
"""
_validation = {
'display_names': {'max_items': 6, 'min_items': 0, 'unique': True},
'capacity': {'maximum_ex': 100, 'minimum_ex': 0},
'image': {'pattern': r'http://\w+'},
'child': {'required': True},
'const_child': {'required': True},
'const_int': {'required': True, 'constant': True},
'const_string': {'required': True, 'constant': True},
'const_string_as_enum': {'constant': True},
}
_attribute_map = {
'display_names': {'key': 'display_names', 'type': '[str]'},
'capacity': {'key': 'capacity', 'type': 'int'},
'image': {'key': 'image', 'type': 'str'},
'child': {'key': 'child', 'type': 'ChildProduct'},
'const_child': {'key': 'constChild', 'type': 'ConstantProduct'},
'const_int': {'key': 'constInt', 'type': 'int'},
'const_string': {'key': 'constString', 'type': 'str'},
'const_string_as_enum': {'key': 'constStringAsEnum', 'type': 'str'},
}
const_int = 0
const_string = "constant"
const_string_as_enum = "constant_string_as_enum"
def __init__(
self,
*,
child: "ChildProduct",
const_child: "ConstantProduct",
display_names: Optional[List[str]] = None,
capacity: Optional[int] = None,
image: Optional[str] = None,
**kwargs
):
super(Product, self).__init__(**kwargs)
self.display_names = display_names
self.capacity = capacity
self.image = image
self.child = child
self.const_child = const_child
|
the-stack_0_9011 | from fairseq.models.roberta import RobertaModel
from fairseq.data.data_utils import collate_tokens
import nltk
import random
# DOWNLOAD: wget https://storage.googleapis.com/poloma-models/airbnb_model.tar.gz
# EXTRACT: tar -xvzf airbnb_model.tar.gz
# MAKE SURE model directory points to where you downloaded the model
MODEL_DIR = './airbnb_train/'
# DEPENDENCIES:
# pip install fairseq
# pip install nltk
# import nltk
# nltk.download('punkt')
## USAGE:
# from run_inf import Roberta
# model = Roberta(use_gpu=False, model_dir='./airbnb_train/')
# label = model.classify(review)
CHECKPOINT_FILE = 'checkpoint_best.pt'
CLASSES = ['NOT_GREAT', 'GREAT']
# how many sentences to run through at the same time. Tweak if running out of memory
CHUNK_SIZE=4
# set bias based on excel spreadsheet
BIAS = 10
class Roberta (object):
def __init__(self,model_dir=MODEL_DIR,ckpt_file=CHECKPOINT_FILE,
use_gpu=False):
self.model = RobertaModel.from_pretrained(model_dir, checkpoint_file=ckpt_file)
self.model.eval() # disable dropout
if use_gpu: self.model.cuda()
def classify(self, review, logits=False):
reviews = self.batch_review(review)
roberta = self.model
tokens = map(lambda x: x if len(x) < 512 else x[:511], [roberta.encode(r) for r in reviews])
batch = collate_tokens(list(tokens), pad_idx=1)
label = roberta.predict('sentence_classification_head', batch)
if logits:
return label.sum(dim=0).tolist()
else:
logits = label.sum(dim=0).tolist()
return CLASSES[0] if logits[0] > logits[1] + BIAS else CLASSES[1]
def batch_review(self, review):
sents = nltk.sent_tokenize(review)
buffer = []
chunks = []
for sent in sents:
buffer.append(sent)
if (len(buffer)) % CHUNK_SIZE == 0:
chunks.append(" ".join(buffer))
buffer = [buffer[random.randint(0,CHUNK_SIZE-1)]]
chunks.append(" ".join(buffer))
return chunks
|
the-stack_0_9014 | import asyncio
from typing import Any
from app.recorder.recorder import Recorder
from ib_insync import IB
class MarketRecorder(object):
def __init__(self, ib: IB, recorder: Recorder) -> None:
self._ib = ib
self._recorder = recorder
self._ib.pendingTickersEvent += self.on_market_data
self._queue: asyncio.Queue = asyncio.Queue()
self._task: asyncio.Task = asyncio.create_task(self._process())
async def _process(self):
while True:
file_name, *data = await self._queue.get()
await self._recorder.consume(file_name, data)
def on_market_data(self, tickers: Any) -> None:
for ticker in tickers:
depth = min(len(ticker.domBids), len(ticker.domAsks))
if depth == 0:
continue
symbol = ticker.contract.symbol
file_name = f'{symbol}'
data = [file_name, ticker.time.timestamp()]
for i in range(0, depth):
data.append(ticker.domBids[i].price)
data.append(ticker.domBids[i].size)
data.append(ticker.domAsks[i].price)
data.append(ticker.domAsks[i].size)
self._queue.put_nowait(tuple(data))
|
the-stack_0_9015 | # -*- coding: utf-8 -*-
from collections import OrderedDict
import pykintone
import numpy as np
import pandas as pd
from karura.core.dataframe_extension import DataFrameExtension, FType
from karura.env import get_kintone_env
class Field():
def __init__(self, code, f_type, label, is_unique):
self.code = code
self.f_type = f_type
self.label = label
self.is_unique = is_unique
@classmethod
def create(cls, f_code, f_dict):
f_type = f_dict["type"]
f_label = f_dict["label"]
is_unique = False
if "unique" in f_dict:
is_unique = f_dict["unique"]
f = Field(f_code, f_type, f_label, is_unique)
if f.get_feature_type() is not None:
return f
else:
return None
def get_feature_type(self):
ftype = None
if self.f_type in ["CREATED_TIME", "CREATOR", "MODIFIER", "UPDATED_TIME", "RECORD_NUMBER", "作成日時", "作成者", "更新者", "更新日時", "レコード番号"]:
pass
elif self.f_type in ["FILE", "LINK", "RICH_TEXT", "STATUS_ASSIGNEE", "SUBTABLE"]:
pass
elif self.is_unique:
ftype = FType.unique
elif self.f_type in ["RADIO_BUTTON", "DROP_DOWN", "CHECK_BOX", "MULTI_SELECT", "CATEGORY", "STATUS", "カテゴリー", "ステータス", "USER_SELECT"]:
# todo: think about multiselect
ftype = FType.categorical
elif self.f_type in ["DATE", "DATETIME"]:
ftype = FType.datetime
elif self.f_type in ["NUMBER", "CALC"]:
ftype = FType.numerical
elif self.f_type in ["MULTI_LINE_TEXT", "SINGLE_LINE_TEXT"]:
# optional inferring
if self.code.endswith("_type") or self.code.endswith("_category"):
ftype = FType.categorical
elif not self.is_unique and (self.code.endswith("_id") or self.code.endswith("_cd")):
ftype = FType.categorical
elif self.code.endswith("_value"):
ftype = FType.numerical
else:
ftype = FType.text
return ftype
def __repr__(self):
repr = "<{}:{}->{} {}>".format(self.__class__, self.f_type, self.get_feature_type(), self.label)
return repr
class Application():
def __init__(self, env=None):
self.env = env if env is not None else get_kintone_env()
self.max_count = 10000
self._kintone_limit = 500
def get_app_id(self, app_name):
kintone = pykintone.login(self.env.domain, self.env.login_id, self.env.password)
result = kintone.administration().select_app_info()
if result.ok:
matched = [a for a in result.infos if a.name == app_name]
if len(matched) > 0:
return matched[0].app_id
else:
return ""
else:
raise Exception("Error occurred when getting the app_id")
def load(self, app_id, query="", fields=(), target=""):
app = pykintone.login(self.env.domain, self.env.login_id, self.env.password).app(app_id)
fields_d = self.get_fields(app_id)
if len(fields) > 0:
d = OrderedDict()
for f in fields:
if f in fields_d:
d[f] = fields_d[f]
fields_d = d
q = query + " " if query else ""
records = []
_fields = list(fields_d.keys())
selected = app.select(query=q + "limit {}".format(self._kintone_limit), fields=_fields)
records = selected.records
if selected.total_count > self._kintone_limit:
repeat = np.floor(min(self.max_count, selected.total_count) / self._kintone_limit)
for i in range(int(repeat)):
selected = app.select(query=q + "limit {} offset {}".format(self._kintone_limit, (i + 1) * self._kintone_limit), fields=_fields)
if len(selected.records) > 0:
records += selected.records
data = []
columns = []
for i, r in enumerate(records):
row = []
if i == 0:
columns = [f for f in _fields if f in r]
for f in columns:
v = r[f]["value"]
row.append(v)
if len(row) > 0:
data.append(row)
fs = [fields_d[c] for c in columns]
df = pd.DataFrame(np.array(data), columns=[f.label for f in fs])
categoricals = [f.label for f in fs if f.get_feature_type() == FType.categorical]
numericals = [f.label for f in fs if f.get_feature_type() == FType.numerical]
datetimes = [f.label for f in fs if f.get_feature_type() == FType.datetime]
texts = [f.label for f in fs if f.get_feature_type() == FType.text]
uniques = [f.label for f in fs if f.get_feature_type() == FType.unique]
dfe = DataFrameExtension(df, categoricals, numericals, datetimes, texts, uniques)
if target:
dfe.target = fields_d[target].label
return dfe
def get_fields(self, app_id):
# todo: if app_id exists on karura app, get field definition from it.
app = pykintone.login(self.env.domain, self.env.login_id, self.env.password).app(app_id)
fields = app.administration().form().get()
if not fields.ok:
raise Exception("Error occurred when getting the form information from kintone.")
fs = fields.raw
d = OrderedDict()
for f_code in fs:
f = Field.create(f_code, fs[f_code])
if f:
d[f_code] = f
return d
|
the-stack_0_9018 | #!/usr/bin/env python
from __future__ import print_function
from collections import OrderedDict
import re
regexes = {
'hybrid-assembly': ['v_pipeline.txt', r"(\S+)"],
'Nextflow': ['v_nextflow.txt', r"(\S+)"],
'FastQC': ['v_fastqc.txt', r"FastQC v(\S+)"],
'MultiQC': ['v_multiqc.txt', r"multiqc, version (\S+)"],
'QUAST': ['v_quast.txt', r"WARNING: Python locale settings can't be changed\nQUAST v(\S+)"],
'Canu': ['v_canu.txt', r"Canu (\S+)"],
'SPAdes': ['v_spades.txt', r"SPAdes v(\S+)"],
'minimap2': ['v_minimap.txt', r"(\S+)"],
'pilon': ['v_pilon.txt', r"Pilon version (\S+)"]
}
results = OrderedDict()
results['hybrid-assembly'] = '<span style="color:#999999;\">N/A</span>'
results['Nextflow'] = '<span style="color:#999999;\">N/A</span>'
results['FastQC'] = '<span style="color:#999999;\">N/A</span>'
results['MultiQC'] = '<span style="color:#999999;\">N/A</span>'
results['QUAST'] = '<span style="color:#999999;\">N/A</span>'
results['Canu'] = '<span style="color:#999999;\">N/A</span>'
results['SPAdes'] = '<span style="color:#999999;\">N/A</span>'
results['minimap2'] = '<span style="color:#999999;\">N/A</span>'
results['pilon'] = '<span style="color:#999999;\">N/A</span>'
# Search each file using its regex
for k, v in regexes.items():
with open(v[0]) as x:
versions = x.read()
match = re.search(v[1], versions)
if match:
results[k] = "v{}".format(match.group(1))
# Dump to YAML
print ('''
id: 'hybrid-assembly-software-versions'
section_name: 'hybrid-assembly Software Versions'
section_href: 'https://github.com/kevinmenden/hybrid-assembly'
plot_type: 'html'
description: 'are collected at run time from the software output.'
data: |
<dl class="dl-horizontal">
''')
for k,v in results.items():
print(" <dt>{}</dt><dd>{}</dd>".format(k,v))
print (" </dl>")
|
the-stack_0_9019 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers.solidfire import SolidFireDriver
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.append_config_values(mox.IgnoreArg())
return configuration
class SolidFireVolumeTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
self._mox = mox.Mox()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.sf_allow_tenant_qos = True
self.configuration.san_is_local = True
self.configuration.sf_emulate_512 = True
self.configuration.sf_account_prefix = 'cinder'
super(SolidFireVolumeTestCase, self).setUp()
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
self.expected_qos_results = {'minIOPS': 1000,
'maxIOPS': 10000,
'burstIOPS': 20000}
def fake_issue_api_request(obj, method, params, version='1.0'):
if method is 'GetClusterCapacity' and version == '1.0':
LOG.info('Called Fake GetClusterCapacity...')
data = {'result':
{'clusterCapacity': {'maxProvisionedSpace': 99999999,
'usedSpace': 999,
'compressionPercent': 100,
'deDuplicationPercent': 100,
'thinProvisioningPercent': 100}}}
return data
elif method is 'GetClusterInfo' and version == '1.0':
LOG.info('Called Fake GetClusterInfo...')
results = {'result': {'clusterInfo':
{'name': 'fake-cluster',
'mvip': '1.1.1.1',
'svip': '1.1.1.1',
'uniqueID': 'unqid',
'repCount': 2,
'attributes': {}}}}
return results
elif method is 'AddAccount' and version == '1.0':
LOG.info('Called Fake AddAccount...')
return {'result': {'accountID': 25}, 'id': 1}
elif method is 'GetAccountByName' and version == '1.0':
LOG.info('Called Fake GetAccountByName...')
results = {'result': {'account':
{'accountID': 25,
'username': params['username'],
'status': 'active',
'initiatorSecret': '123456789012',
'targetSecret': '123456789012',
'attributes': {},
'volumes': [6, 7, 20]}},
"id": 1}
return results
elif method is 'CreateVolume' and version == '1.0':
LOG.info('Called Fake CreateVolume...')
return {'result': {'volumeID': 5}, 'id': 1}
elif method is 'DeleteVolume' and version == '1.0':
LOG.info('Called Fake DeleteVolume...')
return {'result': {}, 'id': 1}
elif method is 'ModifyVolume' and version == '5.0':
LOG.info('Called Fake ModifyVolume...')
return {'result': {}, 'id': 1}
elif method is 'CloneVolume':
return {'result': {'volumeID': 6}, 'id': 2}
elif method is 'ModifyVolume':
return
elif method is 'ListVolumesForAccount' and version == '1.0':
test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66'
LOG.info('Called Fake ListVolumesForAccount...')
result = {'result': {
'volumes': [{'volumeID': 5,
'name': test_name,
'accountID': 25,
'sliceCount': 1,
'totalSize': 1 * units.GiB,
'enable512e': True,
'access': "readWrite",
'status': "active",
'attributes': None,
'qos': None,
'iqn': test_name}]}}
return result
else:
LOG.error('Crap, unimplemented API call in Fake:%s' % method)
def fake_issue_api_request_fails(obj, method, params, version='1.0'):
return {'error': {'code': 000,
'name': 'DummyError',
'message': 'This is a fake error response'},
'id': 1}
def fake_set_qos_by_volume_type(self, type_id, ctxt):
return {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000}
def fake_volume_get(obj, key, default=None):
return {'qos': 'fast'}
def fake_update_cluster_status(self):
return
def fake_get_model_info(self, account, vid):
return {'fake': 'fake-model'}
def test_create_with_qos_type(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_set_qos_by_volume_type',
self.fake_set_qos_by_volume_type)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': 'fast',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
def test_create_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
self.assertIsNone(model_update.get('provider_geometry', None))
def test_create_volume_non_512(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
self.configuration.sf_emulate_512 = False
sfv = SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertEqual(model_update.get('provider_geometry', None),
'4096 4096')
self.configuration.sf_emulate_512 = True
def test_create_snapshot(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_get_model_info',
self.fake_get_model_info)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
testsnap = {'project_id': 'testprjid',
'name': 'testvol',
'volume_size': 1,
'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66',
'volume_id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
sfv.create_snapshot(testsnap)
def test_create_clone(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_get_model_info',
self.fake_get_model_info)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
testvol_b = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv.create_cloned_volume(testvol_b, testvol)
def test_initialize_connector_with_blocksizes(self):
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'provider_location': '10.10.7.1:3260 iqn.2010-01.com.'
'solidfire:87hg.uuid-2cc06226-cc'
'74-4cb7-bd55-14aed659a0cc.4060 0',
'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
'c76370d66b 2FE0CQ8J196R',
'provider_geometry': '4096 4096',
'created_at': timeutils.utcnow(),
}
sfv = SolidFireDriver(configuration=self.configuration)
properties = sfv.initialize_connection(testvol, connector)
self.assertEqual(properties['data']['physical_block_size'], '4096')
self.assertEqual(properties['data']['logical_block_size'], '4096')
def test_create_volume_with_qos(self):
preset_qos = {}
preset_qos['qos'] = 'fast'
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'metadata': [preset_qos],
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
def test_create_volume_fails(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
try:
sfv.create_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_create_sfaccount(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._create_sfaccount('project-id')
self.assertIsNotNone(account)
def test_create_sfaccount_fails(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._create_sfaccount('project-id')
self.assertIsNone(account)
def test_get_sfaccount_by_name(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._get_sfaccount_by_name('some-name')
self.assertIsNotNone(account)
def test_get_sfaccount_by_name_fails(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._get_sfaccount_by_name('some-name')
self.assertIsNone(account)
def test_delete_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv.delete_volume(testvol)
def test_delete_volume_fails_no_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
try:
sfv.delete_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_delete_volume_fails_account_lookup(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAccountNotFound,
sfv.delete_volume,
testvol)
def test_get_cluster_info(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
sfv = SolidFireDriver(configuration=self.configuration)
sfv._get_cluster_info()
def test_get_cluster_info_fail(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request_fails)
sfv = SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAPIException,
sfv._get_cluster_info)
def test_extend_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv.extend_volume(testvol, 2)
def test_extend_volume_fails_no_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'not-found'}
sfv = SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.VolumeNotFound,
sfv.extend_volume,
testvol, 2)
def test_extend_volume_fails_account_lookup(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is intentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAccountNotFound,
sfv.extend_volume,
testvol, 2)
def test_set_by_qos_spec_with_scoping(self):
sfv = SolidFireDriver(configuration=self.configuration)
qos_ref = qos_specs.create(self.ctxt,
'qos-specs-1', {'qos:minIOPS': '1000',
'qos:maxIOPS': '10000',
'qos:burstIOPS': '20000'})
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:minIOPS": "100",
"qos:burstIOPS": "300",
"qos:maxIOPS": "200"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'])
self.assertEqual(qos, self.expected_qos_results)
def test_set_by_qos_spec(self):
sfv = SolidFireDriver(configuration=self.configuration)
qos_ref = qos_specs.create(self.ctxt,
'qos-specs-1', {'minIOPS': '1000',
'maxIOPS': '10000',
'burstIOPS': '20000'})
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:minIOPS": "100",
"qos:burstIOPS": "300",
"qos:maxIOPS": "200"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'])
self.assertEqual(qos, self.expected_qos_results)
def test_set_by_qos_by_type_only(self):
sfv = SolidFireDriver(configuration=self.configuration)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:minIOPS": "100",
"qos:burstIOPS": "300",
"qos:maxIOPS": "200"})
qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'])
self.assertEqual(qos, {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 300})
|
the-stack_0_9020 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for multi-worker training tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class MultiWorkerTutorialTest(parameterized.TestCase, test.TestCase):
"""Test multi-worker training flow demo'ed in go/multi-worker-with-keras."""
@contextlib.contextmanager
def skip_fetch_failure_exception(self):
try:
yield
except Exception as e: # pylint: disable=broad-except
if 'URL fetch failure' in str(e):
self.skipTest('URL fetch error not considered failure of the test.')
else:
raise
@combinations.generate(
combinations.combine(
mode=['eager'],
shard_policy=[None] + list(distribute_options.AutoShardPolicy)))
def testMultiWorkerTutorial(self, mode, shard_policy):
"""Test multi-worker training flow demo'ed in go/multi-worker-with-keras.
This test should be kept in sync with the code samples in
go/multi-worker-with-keras.
Args:
mode: Runtime mode.
shard_policy: None or any of tf.data.experimental.AutoShardPolicy for
testing.
"""
if shard_policy is distribute_options.AutoShardPolicy.FILE:
self.skipTest('TensorSliceDataset is not shardable with FILE policy.')
def mnist_dataset(batch_size):
with self.skip_fetch_failure_exception():
(x_train, y_train), _ = mnist.load_data()
# The `x` arrays are in uint8 and have values in the range [0, 255].
# We need to convert them to float32 with values in the range [0, 1]
x_train = x_train / np.float32(255)
y_train = y_train.astype(np.int64)
train_dataset = dataset_ops.DatasetV2.from_tensor_slices(
(x_train, y_train)).shuffle(60000).repeat().batch(batch_size)
return train_dataset
def build_and_compile_cnn_model():
model = keras.Sequential([
keras.layers.Input(shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(32, 3, activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=gradient_descent.SGD(learning_rate=0.001),
metrics=['accuracy'])
return model
per_worker_batch_size = 64
single_worker_dataset = mnist_dataset(per_worker_batch_size)
single_worker_model = build_and_compile_cnn_model()
single_worker_model.fit(single_worker_dataset, epochs=3, steps_per_epoch=70)
num_workers = 4
def proc_func():
global_batch_size = per_worker_batch_size * num_workers
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
with strategy.scope():
multi_worker_model = build_and_compile_cnn_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(self.get_temp_dir(), 'checkpoint'))
]
multi_worker_dataset = mnist_dataset(global_batch_size)
if shard_policy:
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = shard_policy
multi_worker_dataset = multi_worker_dataset.with_options(options)
multi_worker_model.fit(
multi_worker_dataset,
epochs=3,
steps_per_epoch=70,
callbacks=callbacks)
with test_util.skip_if_error(self, errors_impl.UnavailableError):
mpr_result = multi_process_runner.run(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=num_workers),
list_stdout=True)
def extract_accuracy(worker_id, input_string):
match = re.match(
r'\[worker\-{}\].*accuracy: (\d+\.\d+).*'.format(worker_id),
input_string)
return None if match is None else float(match.group(1))
for worker_id in range(num_workers):
accu_result = nest.map_structure(
lambda x: extract_accuracy(worker_id, x), # pylint: disable=cell-var-from-loop
mpr_result.stdout)
self.assertTrue(
any(accu_result), 'Every worker is supposed to have accuracy result.')
if __name__ == '__main__':
multi_process_runner.test_main()
|
the-stack_0_9025 | """
setup for vmnlcli package
"""
from setuptools import setup, find_packages
with open('README.md') as f:
long_description = f.read()
# remove header, but have one \n before first headline
start = long_description.find('# vmnlcli')
assert start >= 0
long_description = '\n' + long_description[start:]
setup(
name='vmnlcli',
version='0.2.0',
url='http://github.com/thomaswaldmann/velomobielnl/',
license='MIT',
author='Thomas Waldmann',
author_email='[email protected]',
description='command line interface for some dutch velomobile websites',
long_description=long_description,
long_description_content_type='text/markdown',
keywords="velomobile odometer cli velomobiel.nl intercitybike.nl welmers.net",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
entry_points={
'console_scripts': [
'vmnlcli = vmnlcli:main',
]
},
platforms='any',
setup_requires=[],
install_requires=[
'requests',
],
python_requires='>=3.5',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Utilities',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
)
|
the-stack_0_9028 | """
This file offers the methods to automatically retrieve the graph Streptomyces cyaneogriseus subsp. noncyanogenus.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def StreptomycesCyaneogriseusSubspNoncyanogenus(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Streptomyces cyaneogriseus subsp. noncyanogenus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces cyaneogriseus subsp. noncyanogenus graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesCyaneogriseusSubspNoncyanogenus",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_9030 | from enum import Enum
from typing import List, NamedTuple, Optional # Callable
import random
# from math import sqrt
from generic_search import dfs, node_to_path, Node, bfs # astar
class Cell(str, Enum):
EMPTY = " "
BLOCKED = "X"
START = "S"
GOAL = "G"
PATH = "*"
class MazeLocation(NamedTuple):
row: int
column: int
class Maze:
def __init__(
self,
rows: int = 10,
columns: int = 10,
sparseness: float = 0.2,
start: MazeLocation = MazeLocation(0, 0),
goal: MazeLocation = MazeLocation(9, 9),
) -> None:
self._rows: int = rows
self._columns: int = columns
self.start: MazeLocation = start
self.goal: MazeLocation = goal
# preenche a grade com células vazias
self._grid: List[List[Cell]] = [
[Cell.EMPTY for c in range(columns)] for r in range(rows)
]
# preenche a grade com células bloqueadas
self._randomly_fill(rows, columns, sparseness)
# preenche as posições inicial e final
self._grid[start.row][start.column] = Cell.START
self._grid[goal.row][goal.column] = Cell.GOAL
def _randomly_fill(self, rows: int, columns: int, sparseness: float):
for row in range(rows):
for column in range(columns):
if random.uniform(0, 1.0) < sparseness:
self._grid[row][column] = Cell.BLOCKED
def __str__(self) -> str:
output: str = "------------\n"
for row in self._grid:
output += "|" + "".join([c.value for c in row]) + "|\n"
output += "------------\n"
return output
def goal_test(self, ml: MazeLocation) -> bool:
return ml == self.goal
# flake8: noqa
def successors(self, ml: MazeLocation) -> List[MazeLocation]:
locations: List[MazeLocation] = []
if (
ml.row + 1 < self._rows
and self._grid[ml.row + 1][ml.column] != Cell.BLOCKED
):
locations.append(MazeLocation(ml.row + 1, ml.column))
if ml.row - 1 >= 0 and self._grid[ml.row - 1][ml.column] != Cell.BLOCKED:
locations.append(MazeLocation(ml.row - 1, ml.column))
if (
ml.column + 1 < self._columns
and self._grid[ml.row][ml.column + 1] != Cell.BLOCKED
):
locations.append(MazeLocation(ml.row, ml.column + 1))
if ml.column - 1 >= 0 and self._grid[ml.row][ml.column - 1] != Cell.BLOCKED:
locations.append(MazeLocation(ml.row, ml.column - 1))
return locations
def mark(self, path: List[MazeLocation]):
for maze_location in path:
self._grid[maze_location.row][maze_location.column] = Cell.PATH
self._grid[self.start.row][self.start.column] = Cell.START
self._grid[self.goal.row][self.goal.column] = Cell.GOAL
def clear(self, path: List[MazeLocation]):
for maze_location in path:
self._grid[maze_location.row][maze_location.column] = Cell.EMPTY
self._grid[self.start.row][self.start.column] = Cell.START
self._grid[self.goal.row][self.goal.column] = Cell.GOAL
if __name__ == "__main__":
# DFS - Pesquisa em profundidade
maze: Maze = Maze()
print(maze)
solution_one: Optional[Node[MazeLocation]] = dfs(
maze.start, maze.goal_test, maze.successors
)
if solution_one is None:
print("No solution found using depth-first search!")
else:
path_one: List[MazeLocation] = node_to_path(solution_one)
maze.mark(path_one)
print(maze)
maze.clear(path_one)
solution_two: Optional[Node[MazeLocation]] = bfs(
maze.start, maze.goal_test, maze.successors
)
if solution_two is None:
print("No solution found using breadth-first search!")
else:
path_two: List[MazeLocation] = node_to_path(solution_two)
maze.mark(path_two)
print(maze)
maze.clear(path_two)
|
the-stack_0_9031 | """
"vendors" notary into docker and runs integration tests - then builds the
docker client binary with an API version compatible with the existing
daemon
Usage:
python docker-integration-test.py
This assumes that your docker directory is in $GOPATH/src/github.com/docker/docker
and your notary directory, irrespective of where this script is located, is
at $GOPATH/src/github.com/docker/notary.
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
def from_gopath(gopkg):
"""
Gets the location of the go source given go package, based on the $GOPATH.
"""
gopaths = os.getenv("GOPATH")
for path in gopaths.split(":"):
maybe_path = os.path.abspath(os.path.expanduser(os.path.join(
path, "src", *gopkg.split("/"))))
if os.path.isdir(maybe_path):
return maybe_path
return ""
DOCKER_DIR = from_gopath("github.com/docker/docker")
NOTARY_DIR = from_gopath("github.com/docker/notary")
def fake_vendor():
"""
"vendors" notary into docker by copying all of notary into the docker
vendor directory - also appending several lines into the Dockerfile because
it pulls down notary from github and builds the binaries
"""
docker_notary_relpath = "vendor/src/github.com/docker/notary"
docker_notary_abspath = os.path.join(DOCKER_DIR, docker_notary_relpath)
print("copying notary ({0}) into {1}".format(NOTARY_DIR, docker_notary_abspath))
def ignore_dirs(walked_dir, _):
"""
Don't vendor everything, particularly not the docker directory
recursively, if it happened to be in the notary directory
"""
if walked_dir == NOTARY_DIR:
return [".git", ".cover", "docs", "bin"]
elif walked_dir == os.path.join(NOTARY_DIR, "fixtures"):
return ["compatibility"]
return []
if os.path.exists(docker_notary_abspath):
shutil.rmtree(docker_notary_abspath)
shutil.copytree(
NOTARY_DIR, docker_notary_abspath, symlinks=True, ignore=ignore_dirs)
# hack this because docker/docker's Dockerfile checks out a particular version of notary
# based on a tag or SHA, and we want to build based on what was vendored in
dockerfile_addition = ("\n"
"RUN set -x && "
"export GO15VENDOREXPERIMENT=1 && "
"go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server &&"
"go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary")
with open(os.path.join(DOCKER_DIR, "Dockerfile")) as dockerfile:
text = dockerfile.read()
if not text.endswith(dockerfile_addition):
with open(os.path.join(DOCKER_DIR, "Dockerfile"), 'a+') as dockerfile:
dockerfile.write(dockerfile_addition)
# hack the makefile so that we tag the built image as something else so we
# don't interfere with any other docker test builds
with open(os.path.join(DOCKER_DIR, "Makefile"), 'r') as makefile:
makefiletext = makefile.read()
with open(os.path.join(DOCKER_DIR, "Makefile"), 'wb') as makefile:
image_name = os.getenv("DOCKER_TEST_IMAGE_NAME", "notary-docker-vendor-test")
text = re.sub("^DOCKER_IMAGE := .+$", "DOCKER_IMAGE := {0}".format(image_name),
makefiletext, 1, flags=re.M)
makefile.write(text)
def run_integration_test():
"""
Presumes that the fake vendoring has already happened - this runs the
integration tests.
"""
env = os.environ.copy()
env["TESTFLAGS"] = '-check.f DockerTrustSuite*'
subprocess.check_call(
"make test-integration-cli".split(), cwd=DOCKER_DIR, env=env)
if __name__ == "__main__":
if len(sys.argv) > 1:
print("\nWarning: Ignoring all extra arguments: {0}".format(" ".join(sys.argv[1:])))
print("\nUsage: python {0}\n\n".format(sys.argv[0]))
if DOCKER_DIR == "":
print("ERROR: Could not find github.com/docker/docker in your GOPATH='{0}'"
.format(os.getenv("GOPATH")))
sys.exit(1)
if NOTARY_DIR == "":
print("ERROR: Could not find github.com/docker/notary in your GOPATH='{0}'"
.format(os.getenv("GOPATH")))
sys.exit(1)
fake_vendor()
run_integration_test()
|
the-stack_0_9034 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BgpPeerStatus(Model):
"""BGP peer status details.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar local_address: The virtual network gateway's local address
:vartype local_address: str
:ivar neighbor: The remote BGP peer
:vartype neighbor: str
:ivar asn: The autonomous system number of the remote BGP peer
:vartype asn: int
:ivar state: The BGP peer state. Possible values include: 'Unknown',
'Stopped', 'Idle', 'Connecting', 'Connected'
:vartype state: str or ~azure.mgmt.network.v2018_01_01.models.BgpPeerState
:ivar connected_duration: For how long the peering has been up
:vartype connected_duration: str
:ivar routes_received: The number of routes learned from this peer
:vartype routes_received: long
:ivar messages_sent: The number of BGP messages sent
:vartype messages_sent: long
:ivar messages_received: The number of BGP messages received
:vartype messages_received: long
"""
_validation = {
'local_address': {'readonly': True},
'neighbor': {'readonly': True},
'asn': {'readonly': True},
'state': {'readonly': True},
'connected_duration': {'readonly': True},
'routes_received': {'readonly': True},
'messages_sent': {'readonly': True},
'messages_received': {'readonly': True},
}
_attribute_map = {
'local_address': {'key': 'localAddress', 'type': 'str'},
'neighbor': {'key': 'neighbor', 'type': 'str'},
'asn': {'key': 'asn', 'type': 'int'},
'state': {'key': 'state', 'type': 'str'},
'connected_duration': {'key': 'connectedDuration', 'type': 'str'},
'routes_received': {'key': 'routesReceived', 'type': 'long'},
'messages_sent': {'key': 'messagesSent', 'type': 'long'},
'messages_received': {'key': 'messagesReceived', 'type': 'long'},
}
def __init__(self, **kwargs) -> None:
super(BgpPeerStatus, self).__init__(**kwargs)
self.local_address = None
self.neighbor = None
self.asn = None
self.state = None
self.connected_duration = None
self.routes_received = None
self.messages_sent = None
self.messages_received = None
|
the-stack_0_9035 | # wiki/tests.py
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from wiki.models import Page
class PageListViewTests(TestCase):
def test_multiple_pages(self):
# Make some test data to be displayed on the page.
user = User.objects.create()
Page.objects.create(title="My Test Page", content="test", author=user)
Page.objects.create(title="Another Test Page", content="test",
author=user)
# Issue a GET request to the MakeWiki homepage.
# When we make a request, we get a response back.
response = self.client.get('/')
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
# Check that the number of pages passed to the template
# matches the number of pages we have in the database.
responses = response.context['pages']
self.assertEqual(len(responses), 2)
self.assertQuerysetEqual(
responses,
['<Page: My Test Page>', '<Page: Another Test Page>'],
ordered=False
)
class PageDetailViewTests(TestCase):
def test_single_page(self):
# Create a test page
user = User.objects.create()
page = Page(title="Test Page", content="Test", author=user)
page.save()
response = self.client.get(reverse('wiki-details-page',
args=[page.slug]))
self.assertEqual(response.status_code, 200)
def test_new_page_form(self):
response = self.client.get(reverse('wiki-new-page'))
self.assertIn(b'Title of your page', response.content)
def test_create_page(self):
user = User.objects.create()
args = {'title': "Test Page", 'content': 'TEST', 'author': user.id}
response = self.client.post(reverse('wiki-new-page'), args)
self.assertEqual(response.status_code, 302)
response = self.client.get('/')
responses = response.context['pages']
self.assertQuerysetEqual(responses, ['<Page: Test Page>'])
class WikiTestCase(TestCase):
def test_true_is_true(self):
""" Tests if True is equal to True. Should always pass. """
self.assertEqual(True, True)
def test_page_slugify_on_save(self):
""" Tests the slug generated when saving a Page. """
# Author is a required field in our model.
# Create a user for this test and save it to the test database.
user = User()
user.save()
# Create and save a new page to the test database.
page = Page(title="My Test Page", content="test", author=user)
page.save()
# Make sure the slug that was generated in Page.save()
# matches what we think it should be.
self.assertEqual(page.slug, "my-test-page")
|
the-stack_0_9036 | #!/usr/bin/env python3
from contextlib import ExitStack
from time import sleep
from urllib.request import urlopen
import argparse
import json
import random
import sys
import yaml
from plumbum import local
from dyno_cluster import DynoCluster, DynoSpec
from func_test import comparison_test
from utils import generate_ips, setup_temp_dir, sleep_with_animation
from redis_node import RedisNode
REDIS_PORT = 1212
STATS_PORT = 22222
SETTLE_TIME = 3
def main():
parser = argparse.ArgumentParser(
description='Autogenerates a Dynomite cluster and runs functional ' +
'tests against it')
parser.add_argument('dynospec_file', default='test/dyno_spec_file.yaml',
help='YAML file describing desired cluster', nargs='?')
args = parser.parse_args()
# Setup a temporary directory to store logs and configs for this cluster.
temp = setup_temp_dir()
specs = parse_dynospec_file(args.dynospec_file)
# Create a standalone Redis node.
standalone_redis_ip = redis_ip(len(specs))
standalone_redis = RedisNode(standalone_redis_ip, REDIS_PORT)
# Create a Dynomite cluster.
dynomite_cluster = DynoCluster.fromDynomiteSpecs(specs, launch_nodes=False)
with ExitStack() as stack:
# Make sure to change the working directory to the temp dir before running the
# tests.
stack.enter_context(local.cwd(temp))
# Launch the standalone Redis node and the dynomite cluster.
stack.enter_context(standalone_redis)
stack.enter_context(dynomite_cluster)
# Wait for a while for the above nodes to start.
sleep_with_animation(SETTLE_TIME, "Waiting for cluster to start")
# Run all the functional comparison tests.
comparison_test(standalone_redis, dynomite_cluster, False)
random_node = random.choice(dynomite_cluster.nodes)
stats_url = 'http://{}:{}/info'.format(random_node.ip, STATS_PORT)
json.loads(urlopen(stats_url).read().decode('ascii'))
def redis_ip(dyno_node_count):
assert dyno_node_count < 254
return "127.0.0.254"
def parse_dynospec_file(filename):
with open(filename, 'r') as f:
specs = yaml.safe_load(f)
return [DynoSpec(**dct) for dct in specs]
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_9037 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import threading
import time
import sys
import math
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os
import os.path
import pymumble_py3 as pymumble
import pymumble_py3.constants
import variables as var
import logging
import logging.handlers
import traceback
import struct
from packaging import version
import util
import command
import constants
import media.playlist
from constants import tr_cli as tr
from database import SettingsDatabase, MusicDatabase, DatabaseMigration
from media.item import ValidationFailedError, PreparationFailedError
from media.cache import MusicCache
class MumbleBot:
version = 'git'
def __init__(self, args):
self.log = logging.getLogger("bot")
self.log.info(f"bot: botamusique version {self.get_version()}, starting...")
signal.signal(signal.SIGINT, self.ctrl_caught)
self.cmd_handle = {}
self.stereo = var.config.getboolean('bot', 'stereo', fallback=True)
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel", fallback=None)
var.user = args.user
var.is_proxified = var.config.getboolean(
"webinterface", "is_web_proxified")
# Flags to indicate the bot is exiting (Ctrl-C, or !kill)
self.exit = False
self.nb_exit = 0
# Related to ffmpeg thread
self.thread = None
self.thread_stderr = None
self.read_pcm_size = 0
self.pcm_buffer_size = 0
self.last_ffmpeg_err = ""
# Play/pause status
self.is_pause = False
self.pause_at_id = ""
self.playhead = -1 # current position in a song.
self.song_start_at = -1
self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread
#
self.on_interrupting = False
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel")
if args.certificate:
certificate = args.certificate
else:
certificate = util.solve_filepath(var.config.get("server", "certificate"))
if args.tokens:
tokens = args.tokens
else:
tokens = var.config.get("server", "tokens")
tokens = tokens.split(',')
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
if args.bandwidth:
self.bandwidth = args.bandwidth
else:
self.bandwidth = var.config.getint("bot", "bandwidth")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=tokens,
stereo=self.stereo,
debug=var.config.getboolean('debug', 'mumbleConnection'),
certfile=certificate)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
if self.mumble.connected >= pymumble.constants.PYMUMBLE_CONN_STATE_FAILED:
exit()
self.set_comment()
self.mumble.users.myself.unmute() # by sure the user is not muted
self.join_channel()
self.mumble.set_bandwidth(self.bandwidth)
# ====== Volume ======
self.volume_helper = util.VolumeHelper()
_volume = var.config.getfloat('bot', 'volume', fallback=0.8)
if var.db.has_option('bot', 'volume'):
_volume = var.db.getfloat('bot', 'volume')
self.volume_helper.set_volume(_volume)
self.is_ducking = False
self.on_ducking = False
self.ducking_release = time.time()
self.last_volume_cycle_time = time.time()
self._ducking_volume = 0
_ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.50)
_ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=_ducking_volume)
self.volume_helper.set_ducking_volume(_ducking_volume)
self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000)
self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold)
if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False) \
or var.config.getboolean("bot", "ducking"):
self.is_ducking = True
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED,
self.ducking_sound_received)
self.mumble.set_receive_sound(True)
assert var.config.get("bot", "when_nobody_in_channel") in ['pause', 'pause_resume', 'stop', 'nothing', ''], \
"Unknown action for when_nobody_in_channel"
if var.config.get("bot", "when_nobody_in_channel", fallback='') in ['pause', 'pause_resume', 'stop']:
user_change_callback = \
lambda user, action: threading.Thread(target=self.users_changed,
args=(user, action), daemon=True).start()
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, user_change_callback)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, user_change_callback)
# Debug use
self._loop_status = 'Idle'
self._display_rms = False
self._max_rms = 0
self.redirect_ffmpeg_log = var.config.getboolean('debug', 'redirect_ffmpeg_log', fallback=True)
if var.config.getboolean("bot", "auto_check_update"):
def check_update():
nonlocal self
new_version, changelog = util.check_update(self.get_version())
if new_version:
self.send_channel_msg(tr('new_version_found', new_version=new_version, changelog=changelog))
th = threading.Thread(target=check_update, name="UpdateThread")
th.daemon = True
th.start()
last_startup_version = var.db.get("bot", "version", fallback=None)
if not last_startup_version or version.parse(last_startup_version) < version.parse(self.version):
var.db.set("bot", "version", self.version)
changelog = util.fetch_changelog()
self.send_channel_msg(tr("update_successful", version=self.version, changelog=changelog))
# Set the CTRL+C shortcut
def ctrl_caught(self, signal, frame):
self.log.info(
"\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
if self.nb_exit > 1:
self.log.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
self.exit = True
def get_version(self):
if self.version != "git":
return self.version
else:
return util.get_snapshot_version()
def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False, admin=False):
cmds = cmd.split(",")
for command in cmds:
command = command.strip()
if command:
self.cmd_handle[command] = {'handle': handle,
'partial_match': not no_partial_match,
'access_outside_channel': access_outside_channel,
'admin': admin}
self.log.debug("bot: command added: " + command)
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def join_channel(self):
if self.channel:
if '/' in self.channel:
self.mumble.channels.find_by_tree(self.channel.split('/')).move_in()
else:
self.mumble.channels.find_by_name(self.channel).move_in()
# =======================
# Message
# =======================
# All text send to the chat is analysed by this function
def message_received(self, text):
raw_message = text.message.strip()
message = re.sub(r'<.*?>', '', raw_message)
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('commands', 'split_username_at_space'):
# in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes ,
# you want to split the username
user = user.split()[0]
if message[0] in var.config.get('commands', 'command_symbol'):
# remove the symbol from the message
message = message[1:].split(' ', 1)
# use the first word as a command, the others one as parameters
if len(message) > 0:
command = message[0].lower()
parameter = ''
if len(message) > 1:
parameter = message[1].rstrip()
else:
return
self.log.info('bot: received command ' + command + ' - ' + parameter + ' by ' + user)
# Anti stupid guy function
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_text_message(
tr('pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_text_message(
tr('user_ban'))
return
if not self.is_admin(user) and parameter:
input_url = util.get_url_from_input(parameter)
if input_url and var.db.has_option('url_ban', input_url):
self.mumble.users[text.actor].send_text_message(
tr('url_ban'))
return
command_exc = ""
try:
if command in self.cmd_handle:
command_exc = command
else:
# try partial match
cmds = self.cmd_handle.keys()
matches = []
for cmd in cmds:
if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']:
matches.append(cmd)
if len(matches) == 1:
self.log.info("bot: {:s} matches {:s}".format(command, matches[0]))
command_exc = matches[0]
elif len(matches) > 1:
self.mumble.users[text.actor].send_text_message(
tr('which_command', commands="<br>".join(matches)))
return
else:
self.mumble.users[text.actor].send_text_message(
tr('bad_command', command=command))
return
if self.cmd_handle[command_exc]['admin'] and not self.is_admin(user):
self.mumble.users[text.actor].send_text_message(tr('not_admin'))
return
if not self.cmd_handle[command_exc]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_text_message(
tr('not_in_my_channel'))
return
self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, parameter)
except:
error_traceback = traceback.format_exc()
error = error_traceback.rstrip().split("\n")[-1]
self.log.error(f"bot: command {command_exc} failed with error: {error_traceback}\n")
self.send_msg(tr('error_executing_command', command=command_exc, error=error), text)
def send_msg(self, msg, text):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
# text if the object message, contain information if direct message or channel message
self.mumble.users[text.actor].send_text_message(msg)
def send_channel_msg(self, msg):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
@staticmethod
def is_admin(user):
list_admin = var.config.get('bot', 'admin').rstrip().split(';')
if user in list_admin:
return True
else:
return False
# =======================
# Users changed
# =======================
def users_changed(self, user, message):
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
# only check if there is one more user currently in the channel
# else when the music is paused and somebody joins, music would start playing again
if len(own_channel.get_users()) == 2:
if var.config.get("bot", "when_nobody_in_channel") == "pause_resume":
self.resume()
elif var.config.get("bot", "when_nobody_in_channel") == "pause" and self.is_pause:
self.send_channel_msg(tr("auto_paused"))
elif len(own_channel.get_users()) == 1 and len(var.playlist) != 0:
# if the bot is the only user left in the channel and the playlist isn't empty
self.log.info('bot: Other users in the channel left. Stopping music now.')
if var.config.get("bot", "when_nobody_in_channel") == "stop":
self.clear()
else:
self.pause()
# =======================
# Launch and Download
# =======================
def launch_music(self, music_wrapper, start_from=0):
assert music_wrapper.is_ready()
uri = music_wrapper.uri()
self.log.info("bot: play music " + music_wrapper.format_debug_string())
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(music_wrapper.format_current_playing())
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
channels = 2 if self.stereo else 1
self.pcm_buffer_size = 960 * channels
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i',
uri, '-ss', f"{start_from:f}", '-ac', str(channels), '-f', 's16le', '-ar', '48000', '-')
self.log.debug("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
if self.redirect_ffmpeg_log:
pipe_rd, pipe_wd = util.pipe_no_wait() # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
else:
pipe_rd, pipe_wd = None, None
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=self.pcm_buffer_size)
def async_download_next(self):
# Function start if the next music isn't ready
# Do nothing in case the next music is already downloaded
self.log.debug("bot: Async download next asked ")
while var.playlist.next_item():
# usually, all validation will be done when adding to the list.
# however, for performance consideration, youtube playlist won't be validate when added.
# the validation has to be done here.
next = var.playlist.next_item()
try:
if not next.is_ready():
self.async_download(next)
break
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(next.id)
var.cache.free_and_delete(next.id)
def async_download(self, item):
th = threading.Thread(
target=self._download, name="Prepare-" + item.id[:7], args=(item,))
self.log.info(f"bot: start preparing item in thread: {item.format_debug_string()}")
th.daemon = True
th.start()
return th
def start_download(self, item):
if not item.is_ready():
self.log.info("bot: current music isn't ready, start downloading.")
self.async_download(item)
self.send_channel_msg(
tr('download_in_progress', item=item.format_title()))
def _download(self, item):
ver = item.version
try:
item.validate()
if item.is_ready():
return True
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(item.id)
var.cache.free_and_delete(item.id)
return False
try:
item.prepare()
if item.version > ver:
var.playlist.version += 1
return True
except PreparationFailedError as e:
self.send_channel_msg(e.msg)
return False
# =======================
# Loop
# =======================
# Main loop of the Bot
def loop(self):
while not self.exit and self.mumble.is_alive():
while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
# If the buffer isn't empty, I cannot send new music part, so I wait
self._loop_status = f'Wait for buffer {self.mumble.sound_output.get_buffer_size():.3f}'
time.sleep(0.01)
raw_music = None
if self.thread:
# I get raw from ffmpeg thread
# move playhead forward
self._loop_status = 'Reading raw'
if self.song_start_at == -1:
self.song_start_at = time.time() - self.playhead
self.playhead = time.time() - self.song_start_at
raw_music = self.thread.stdout.read(self.pcm_buffer_size)
self.read_pcm_size += len(raw_music)
if self.redirect_ffmpeg_log:
try:
self.last_ffmpeg_err = self.thread_stderr.readline()
if self.last_ffmpeg_err:
self.log.debug("ffmpeg: " + self.last_ffmpeg_err.strip("\n"))
except:
pass
if raw_music:
# Adjust the volume and send it to mumble
self.volume_cycle()
if not self.on_interrupting and len(raw_music) == self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(raw_music, 2, self.volume_helper.real_volume))
elif self.read_pcm_size == 0:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=True), 2, self.volume_helper.real_volume))
elif self.on_interrupting or len(raw_music) < self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=False), 2, self.volume_helper.real_volume))
self.thread.kill()
self.thread = None
time.sleep(0.1)
self.on_interrupting = False
else:
time.sleep(0.1)
else:
time.sleep(0.1)
if not self.is_pause and not raw_music:
self.thread = None
# bot is not paused, but ffmpeg thread has gone.
# indicate that last song has finished, or the bot just resumed from pause, or something is wrong.
if self.read_pcm_size < self.pcm_buffer_size \
and var.playlist.current_index != -1 \
and self.last_ffmpeg_err:
current = var.playlist.current_item()
self.log.error("bot: cannot play music %s", current.format_debug_string())
self.log.error("bot: with ffmpeg error: %s", self.last_ffmpeg_err)
self.last_ffmpeg_err = ""
self.send_channel_msg(tr('unable_play', item=current.format_title()))
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
# move to the next song.
if not self.wait_for_ready: # if wait_for_ready flag is not true, move to the next song.
if var.playlist.next():
current = var.playlist.current_item()
self.log.debug(f"bot: next into the song: {current.format_debug_string()}")
try:
self.start_download(current)
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = 0
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
else:
self._loop_status = 'Empty queue'
else:
# if wait_for_ready flag is true, means the pointer is already
# pointing to target song. start playing
current = var.playlist.current_item()
if current:
if current.is_ready():
self.wait_for_ready = False
self.read_pcm_size = 0
self.launch_music(current, self.playhead)
self.last_volume_cycle_time = time.time()
self.async_download_next()
elif current.is_failed():
var.playlist.remove_by_id(current.id)
self.wait_for_ready = False
else:
self._loop_status = 'Wait for the next item to be ready'
else:
self.wait_for_ready = False
while self.mumble.sound_output.get_buffer_size() > 0 and self.mumble.is_alive():
# Empty the buffer before exit
time.sleep(0.01)
time.sleep(0.5)
if self.exit:
self._loop_status = "exited"
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def volume_cycle(self):
delta = time.time() - self.last_volume_cycle_time
if self.on_ducking and self.ducking_release < time.time():
self.on_ducking = False
self._max_rms = 0
if delta > 0.001:
if self.is_ducking and self.on_ducking:
self.volume_helper.real_volume = \
(self.volume_helper.real_volume - self.volume_helper.ducking_volume_set) * math.exp(- delta / 0.2) \
+ self.volume_helper.ducking_volume_set
else:
self.volume_helper.real_volume = self.volume_helper.volume_set - \
(self.volume_helper.volume_set - self.volume_helper.real_volume) * math.exp(- delta / 0.5)
self.last_volume_cycle_time = time.time()
def ducking_sound_received(self, user, sound):
rms = audioop.rms(sound.pcm, 2)
self._max_rms = max(rms, self._max_rms)
if self._display_rms:
if rms < self.ducking_threshold:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(rms / 200), end='\r')
else:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(self.ducking_threshold / 200)
+ '+' * int((rms - self.ducking_threshold) / 200), end='\r')
if rms > self.ducking_threshold:
if self.on_ducking is False:
self.log.debug("bot: ducking triggered")
self.on_ducking = True
self.ducking_release = time.time() + 1 # ducking release after 1s
def _fadeout(self, _pcm_data, stereo=False, fadein=False):
pcm_data = bytearray(_pcm_data)
if stereo:
if not fadein:
mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 4))]
else:
mask = [math.exp(-x / 60) for x in reversed(range(0, int(len(pcm_data) / 4)))]
for i in range(int(len(pcm_data) / 4)):
pcm_data[4 * i:4 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[4 * i:4 * i + 2])[0] * mask[i]))
pcm_data[4 * i + 2:4 * i + 4] = struct.pack("<h", round(
struct.unpack("<h", pcm_data[4 * i + 2:4 * i + 4])[0] * mask[i]))
else:
mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 2))]
for i in range(int(len(pcm_data) / 2)):
pcm_data[2 * i:2 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[2 * i:2 * i + 2])[0] * mask[i]))
return bytes(pcm_data) + bytes(len(pcm_data))
# =======================
# Play Control
# =======================
def play(self, index=-1, start_at=0):
if not self.is_pause:
self.interrupt()
if index != -1:
var.playlist.point_to(index)
current = var.playlist.current_item()
self.start_download(current)
self.is_pause = False
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = start_at
def clear(self):
# Kill the ffmpeg thread and empty the playlist
self.interrupt()
var.playlist.clear()
self.wait_for_ready = False
self.log.info("bot: music stopped. playlist trashed.")
def stop(self):
self.interrupt()
self.is_pause = True
if len(var.playlist) > 0:
self.wait_for_ready = True
else:
self.wait_for_ready = False
self.log.info("bot: music stopped.")
def interrupt(self):
# Kill the ffmpeg thread
if self.thread:
self.on_interrupting = True
time.sleep(0.1)
self.song_start_at = -1
self.read_pcm_size = 0
def pause(self):
# Kill the ffmpeg thread
self.interrupt()
self.is_pause = True
self.song_start_at = -1
if len(var.playlist) > 0:
self.pause_at_id = var.playlist.current_item().id
self.log.info(f"bot: music paused at {self.playhead:.2f} seconds.")
def resume(self):
self.is_pause = False
if var.playlist.current_index == -1:
var.playlist.next()
self.playhead = 0
return
music_wrapper = var.playlist.current_item()
if not music_wrapper or not music_wrapper.id == self.pause_at_id or not music_wrapper.is_ready():
self.playhead = 0
return
self.wait_for_ready = True
self.pause_at_id = ""
def start_web_interface(addr, port):
global formatter
import interface
# setup logger
werkzeug_logger = logging.getLogger('werkzeug')
logfile = util.solve_filepath(var.config.get('webinterface', 'web_logfile'))
if logfile:
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
else:
handler = logging.StreamHandler()
werkzeug_logger.addHandler(handler)
interface.init_proxy()
interface.web.env = 'development'
interface.web.secret_key = var.config.get('webinterface', 'flask_secret')
interface.web.run(port=port, host=addr)
if __name__ == '__main__':
supported_languages = util.get_supported_language()
parser = argparse.ArgumentParser(
description='Bot for playing music on Mumble')
# General arguments
parser.add_argument("--config", dest='config', type=str, default='configuration.ini',
help='Load configuration from this file. Default: configuration.ini')
parser.add_argument("--db", dest='db', type=str,
default=None, help='Settings database file')
parser.add_argument("--music-db", dest='music_db', type=str,
default=None, help='Music library database file')
parser.add_argument("--lang", dest='lang', type=str, default=None,
help='Preferred language. Support ' + ", ".join(supported_languages))
parser.add_argument("-q", "--quiet", dest="quiet",
action="store_true", help="Only Error logs")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Show debug log")
# Mumble arguments
parser.add_argument("-s", "--server", dest="host",
type=str, help="Hostname of the Mumble server")
parser.add_argument("-u", "--user", dest="user",
type=str, help="Username for the bot")
parser.add_argument("-P", "--password", dest="password",
type=str, help="Server password, if required")
parser.add_argument("-T", "--tokens", dest="tokens",
type=str, help="Server tokens to enter a channel, if required (multiple entries separated with comma ','")
parser.add_argument("-p", "--port", dest="port",
type=int, help="Port for the Mumble server")
parser.add_argument("-c", "--channel", dest="channel",
type=str, help="Default channel for the bot")
parser.add_argument("-C", "--cert", dest="certificate",
type=str, default=None, help="Certificate file")
parser.add_argument("-b", "--bandwidth", dest="bandwidth",
type=int, help="Bandwidth used by the bot")
args = parser.parse_args()
# ======================
# Load Config
# ======================
config = configparser.ConfigParser(interpolation=None, allow_no_value=True)
var.config = config
parsed_configs = config.read([util.solve_filepath('configuration.default.ini'), util.solve_filepath(args.config)],
encoding='utf-8')
if len(parsed_configs) == 0:
logging.error('Could not read configuration from file \"{}\"'.format(args.config))
sys.exit()
# ======================
# Setup Logger
# ======================
bot_logger = logging.getLogger("bot")
bot_logger.setLevel(logging.INFO)
if args.verbose:
bot_logger.setLevel(logging.DEBUG)
bot_logger.debug("Starting in DEBUG loglevel")
elif args.quiet:
bot_logger.setLevel(logging.ERROR)
bot_logger.error("Starting in ERROR loglevel")
logfile = util.solve_filepath(var.config.get('bot', 'logfile').strip())
handler = None
if logfile:
print(f"Redirecting stdout and stderr to log file: {logfile}")
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
sys.stdout = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stdout.buffer)
sys.stderr = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stderr.buffer)
else:
handler = logging.StreamHandler()
util.set_logging_formatter(handler, bot_logger.level)
bot_logger.addHandler(handler)
logging.getLogger("root").addHandler(handler)
var.bot_logger = bot_logger
# ======================
# Load Database
# ======================
if args.user:
username = args.user
else:
username = var.config.get("bot", "username")
sanitized_username = "".join([x if x.isalnum() else "_" for x in username])
var.settings_db_path = args.db if args.db is not None else util.solve_filepath(
config.get("bot", "database_path", fallback=f"settings-{sanitized_username}.db"))
var.music_db_path = args.music_db if args.music_db is not None else util.solve_filepath(
config.get("bot", "music_database_path", fallback="music.db"))
var.db = SettingsDatabase(var.settings_db_path)
if var.config.get("bot", "save_music_library", fallback=True):
var.music_db = MusicDatabase(var.music_db_path)
else:
var.music_db = MusicDatabase(":memory:")
DatabaseMigration(var.db, var.music_db).migrate()
var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder'))
if not var.music_folder.endswith(os.sep):
# The file searching logic assumes that the music folder ends in a /
var.music_folder = var.music_folder + os.sep
var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder'))
# ======================
# Translation
# ======================
lang = ""
if args.lang:
lang = args.lang
else:
lang = var.config.get('bot', 'language', fallback='en_US')
if lang not in supported_languages:
raise KeyError(f"Unsupported language {lang}")
var.language = lang
constants.load_lang(lang)
# ======================
# Prepare Cache
# ======================
var.cache = MusicCache(var.music_db)
if var.config.getboolean("bot", "refresh_cache_on_startup", fallback=True):
var.cache.build_dir_cache()
# ======================
# Load playback mode
# ======================
playback_mode = None
if var.db.has_option("playlist", "playback_mode"):
playback_mode = var.db.get('playlist', 'playback_mode')
else:
playback_mode = var.config.get('bot', 'playback_mode', fallback="one-shot")
if playback_mode in ["one-shot", "repeat", "random", "autoplay"]:
var.playlist = media.playlist.get_playlist(playback_mode)
else:
raise KeyError(f"Unknown playback mode '{playback_mode}'")
# ======================
# Create bot instance
# ======================
var.bot = MumbleBot(args)
command.register_all_commands(var.bot)
# load playlist
if var.config.getboolean('bot', 'save_playlist', fallback=True):
var.bot_logger.info("bot: load playlist from previous session")
var.playlist.load()
# ============================
# Start the web interface
# ============================
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
tt = threading.Thread(
target=start_web_interface, name="WebThread", args=(wi_addr, wi_port))
tt.daemon = True
bot_logger.info('Starting web interface on {}:{}'.format(wi_addr, wi_port))
tt.start()
# Start the main loop.
var.bot.loop()
|
the-stack_0_9040 | import os
import importlib.util
from setuptools import setup
# Boilerplate to load commonalities
spec = importlib.util.spec_from_file_location(
"setup_common", os.path.join(os.path.dirname(__file__), "setup_common.py")
)
common = importlib.util.module_from_spec(spec)
spec.loader.exec_module(common)
common.KWARGS["install_requires"] += [
"aiohttp>=3.5.4",
"bandit>=1.6.2",
"safety>=1.8.5",
]
common.KWARGS["entry_points"] = {
"console_scripts": ["shouldi = shouldi.cli:ShouldI.main"],
"dffml.operation": [
"run_bandit = shouldi.python.bandit:run_bandit",
"safety_check = shouldi.python.safety:safety_check",
"pypi_package_json = shouldi.python.pypi:pypi_package_json",
"pypi_package_contents = shouldi.python.pypi:pypi_package_contents",
"cleanup_pypi_package = shouldi.python.pypi:cleanup_pypi_package",
],
}
# Hiding down hear away from operations tutorial
common.KWARGS["install_requires"] += [
"PyYAML>=5.1.2",
]
common.KWARGS["entry_points"].update(
{
"shouldi.project.bom.db": [
"yaml = shouldi.project.bom.db.yaml:YAMLDB",
"pypi = shouldi.project.bom.db.pypi:PyPiDB",
]
}
)
setup(**common.KWARGS)
|
the-stack_0_9041 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The YEP developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from time import sleep
from test_framework.test_framework import YepTestFramework
from test_framework.util import set_node_times, assert_equal
class YEP_RPCSporkTest(YepTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[]] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with clean chain
self._initialize_chain_clean()
self.enable_mocktime()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Performs tests on the Spork RPC"
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def run_test(self):
self.log_title()
set_node_times(self.nodes, self.mocktime)
sporkName = "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"
# 0 - check SPORK 8 status from node 1 (must be inactive)
assert_equal(False, self.is_spork_active(1, sporkName))
# 1 - activate SPORK 8 with nodes[0]
assert_equal("success", self.activate_spork(0, sporkName))
sleep(1)
# check SPORK 8 status from nodes[1] (must be active)
assert_equal(True, self.is_spork_active(1, sporkName))
# 2 - Adjust time to 1 sec in the future and deactivate SPORK 8 with node[0]
self.mocktime += 1
set_node_times(self.nodes, self.mocktime)
assert_equal("success", self.deactivate_spork(0, sporkName))
sleep(1)
# check SPORK 8 value from nodes[1] (must be inactive again)
assert_equal(False, self.is_spork_active(1, sporkName))
# 3 - Adjust time to 1 sec in the future and set new value (mocktime) for SPORK 8 with node[0]
self.mocktime += 1
set_node_times(self.nodes, self.mocktime)
assert_equal("success", self.set_spork(0, sporkName, self.mocktime))
sleep(1)
# check SPORK 8 value from nodes[1] (must be equal to mocktime)
assert_equal(self.mocktime, self.get_spork(1, sporkName))
# 4 - Stop nodes and check value again after restart
self.log.info("Stopping nodes...")
self.stop_nodes()
self.log.info("Restarting node 1...")
self.start_node(1, [])
assert_equal(self.mocktime, self.get_spork(1, sporkName))
self.log.info("%s: TEST PASSED" % self.__class__.__name__)
if __name__ == '__main__':
YEP_RPCSporkTest().main()
|
the-stack_0_9042 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
# pylint: disable=invalid-name
"""Testing 'python-dateutil' package"""
import logging
import argparse
# packages needed for tests
import datetime
# package to test
import dateutil
import dateutil.relativedelta
def main():
"""Testing 'python-dateutil' package"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
action="count",
default=0,
help="set verbosity level",
)
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
logging.debug(
datetime.datetime.now() + dateutil.relativedelta.relativedelta(months=+1)
)
if __name__ == "__main__":
main()
|
the-stack_0_9043 | #!/usr/bin/env python3
"""
Fonctions principales d'assistant de paris
"""
import copy
import datetime
import requests
import socket
import sys
import traceback
import urllib
import urllib.error
import urllib.request
from itertools import combinations, permutations, product
from multiprocessing.pool import ThreadPool
from pprint import pprint
import numpy as np
import selenium
import selenium.common
import tabulate
import sportsbetting as sb
from sportsbetting import selenium_init
from sportsbetting.database_functions import (get_id_from_competition_name, get_competition_by_id, import_teams_by_url,
import_teams_by_sport, import_teams_by_competition_id_thesportsdb)
from sportsbetting.parser_functions import parse
from sportsbetting.auxiliary_functions import (valid_odds, format_team_names, merge_dict_odds, afficher_mises_combine,
cotes_combine_all_sites, defined_bets, binomial, best_match_base,
filter_dict_dates, get_nb_outcomes, best_combine_reduit,
filter_dict_minimum_odd, cotes_combine_reduit_all_sites, copy_to_clipboard)
from sportsbetting.basic_functions import (gain2, mises2, gain, mises, mises_freebet, cotes_freebet,
gain_pari_rembourse_si_perdant, gain_freebet2, mises_freebet2,
mises_pari_rembourse_si_perdant, gain_promo_gain_cote, mises_promo_gain_cote,
gain_gains_nets_boostes, mises_gains_nets_boostes, gain3, mises3, cotes_combine_optimise,
gain_defi_rembourse_ou_gagnant, mises_defi_rembourse_ou_gagnant)
from sportsbetting.lambda_functions import get_best_odds, get_profit
def parse_competition(competition, sport, *sites):
"""
Retourne les cotes d'une competition donnée pour un ou plusieurs sites de
paris. Si aucun site n'est choisi, le parsing se fait sur l'ensemble des
bookmakers reconnus par l'ARJEL
"""
if sb.ABORT:
raise sb.AbortException
try:
_id = get_id_from_competition_name(competition, sport)
except TypeError:
print("Competition inconnue")
return
print(competition, *sites)
if not sites:
sites = sb.BOOKMAKERS
res_parsing = {}
for site in sites:
if len(sites) > 1:
print(site)
database_site = site if site not in ["barrierebet", "vbet"] else "pasinobet"
url = get_competition_by_id(_id, database_site)
try:
if url:
res_parsing[site] = parse(site, url)
else:
print("Pas d'url en base pour {} sur {}".format(competition, site))
except urllib.error.URLError:
print("{} non accessible sur {} (délai écoulé)".format(competition, site))
except KeyboardInterrupt:
res_parsing[site] = {}
except selenium.common.exceptions.TimeoutException:
print("Element non trouvé par selenium ({} sur {})".format(competition, site))
except sb.UnavailableCompetitionException:
print("{} non disponible sur {}".format(competition, site))
except socket.timeout:
print("{} non accessible sur {} (timeout socket)".format(competition, site))
except selenium.common.exceptions.StaleElementReferenceException:
print("StaleElement non trouvé par selenium ({} sur {})".format(competition, site))
except selenium.common.exceptions.WebDriverException:
print("Connection closed ({} sur {})".format(competition, site))
except requests.exceptions.SSLError:
print("Max retries ({} sur {})".format(competition, site))
res = format_team_names(res_parsing, sport, competition)
out = valid_odds(merge_dict_odds(res), sport)
return out
def parse_competitions_site(competitions, sport, site):
list_odds = []
if len(competitions) > 40 and site == "winamax": # to avoid being blocked by winamax
competitions = competitions[:40]
sb.SITE_PROGRESS[site] = 0
try:
for competition in competitions:
list_odds.append(parse_competition(competition, sport, site))
sb.PROGRESS += 100 / (len(competitions) * sb.SUB_PROGRESS_LIMIT)
sb.SITE_PROGRESS[site] += 100 / len(competitions)
except sb.UnavailableSiteException:
print("{} non accessible".format(site))
sb.SITE_PROGRESS[site] = 100
except sb.AbortException:
print("Interruption", site)
return merge_dict_odds(list_odds)
def parse_competitions(competitions, sport, *sites):
sites_order = ['betfair', 'joa', 'betway', 'pmu', 'barrierebet', 'pasinobet', 'vbet', 'france_pari', 'netbet', 'zebet',
'winamax', 'pinnacle', 'betclic', 'pokerstars', 'unibet', 'unibet_boost', 'bwin', 'parionssport']
if not sites:
sites = sites_order
sb.EXPECTED_TIME = 28 + len(competitions) * 12.5
sites = [site for site in sites_order if site in sites]
sb.PROGRESS = 0
selenium_sites = sb.SELENIUM_SITES.intersection(sites)
for site in selenium_sites:
selenium_init.start_driver(site)
sb.PROGRESS += 100/len(selenium_sites)
sb.PROGRESS = 0
sb.SUB_PROGRESS_LIMIT = len(sites)
if sb.DB_MANAGEMENT:
for competition in competitions:
if competition == sport or "Tout le" in competition:
import_teams_by_sport(sport)
else:
id_competition = get_id_from_competition_name(competition, sport)
if id_competition < 0:
import_teams_by_competition_id_thesportsdb(id_competition)
else:
import_teams_by_url("http://www.comparateur-de-cotes.fr/comparateur/" + sport
+ "/a-ed" + str(id_competition))
list_odds = []
try:
sb.IS_PARSING = True
list_odds = ThreadPool(3).map(lambda x: parse_competitions_site(competitions, sport, x), sites)
sb.ODDS[sport] = merge_dict_odds(list_odds)
except Exception:
print(traceback.format_exc(), file=sys.stderr)
sb.IS_PARSING = False
sb.ABORT = False
sb.SEEN_SUREBET[sport] = False
print("Dernière récupération des cotes à", datetime.datetime.today().strftime("%H:%M"))
def odds_match(match, sport="football"):
"""
Retourne les cotes d'un match donné sur tous les sites de l'ARJEL
"""
odds_match = sb.ODDS[sport].get(match)
if odds_match:
return match, copy.deepcopy(odds_match)
return None, None
def best_stakes_match(match, site, bet, minimum_odd, sport="football"):
"""
Pour un match, un bookmaker, une somme à miser sur ce bookmaker et une cote
minimale donnés, retourne la meilleure combinaison de paris à placer
"""
best_match, all_odds = odds_match(match, sport)
if not all_odds:
print("No match found")
return
print(best_match)
pprint(all_odds)
odds_site = all_odds['odds'][site]
best_odds = copy.deepcopy(odds_site)
best_profit = -float("inf")
n = len(all_odds['odds'][site])
best_sites = [site for _ in range(n)]
best_i = 0
best_overall_odds = None
bets = None
sites = None
for odds in all_odds['odds'].items():
if odds[0] == "unibet_boost":
continue
for i in range(n):
if odds[1][i] > best_odds[i] and (odds[1][i] >= 1.1 or odds[0] == "pmu"):
best_odds[i] = odds[1][i]
best_sites[i] = odds[0]
for i in range(n):
if odds_site[i] >= minimum_odd:
odds_to_check = (best_odds[:i] + [odds_site[i]] + best_odds[i + 1:])
profit = gain2(odds_to_check, i, bet)
if profit > best_profit:
best_profit = profit
best_overall_odds = odds_to_check
sites = best_sites[:i] + [site] + best_sites[i + 1:]
bets = mises2(odds_to_check, bet, i)
best_i = i
if best_overall_odds:
mises2(best_overall_odds, bet, best_i, True)
afficher_mises_combine(best_match.split(" / "), [sites], [bets], all_odds["odds"], sport, profit=best_profit)
else:
print("No match found")
def best_match_under_conditions(site, minimum_odd, bet, sport="football", date_max=None,
time_max=None, date_min=None, time_min=None, one_site=False):
"""
Retourne le meilleur match sur lequel miser lorsqu'on doit miser une somme
donnée à une cote donnée. Cette somme peut-être sur seulement une issue
(one_site=False) ou bien répartie sur plusieurs issues d'un même match
(one_site=True), auquel cas, chacune des cotes du match doivent respecter le
critère de cote minimale.
"""
odds_function = get_best_odds(one_site)
profit_function = get_profit(bet, one_site)
criteria = lambda odds_to_check, i: ((not one_site and odds_to_check[i] >= minimum_odd)
or (one_site and all(odd >= minimum_odd
for odd in odds_to_check)))
display_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet,
best_rank, True) if not one_site
else mises(best_overall_odds, bet,
True))
result_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet,
best_rank, False) if not one_site
else mises(best_overall_odds, bet,
False))
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min, one_site=one_site)
def best_match_under_conditions2(site, minimum_odd, stake, sport="football", date_max=None,
time_max=None, date_min=None, time_min=None, miles=False, rate_eur_miles=0, multiplicator=1):
all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max, date_min, time_min)
best_profit = -float("inf")
best_match = None
sites = None
nb_matches = len(all_odds)
n = get_nb_outcomes(sport)
for match in all_odds:
sb.PROGRESS += 100 / nb_matches
if site in all_odds[match]['odds']:
odds_site = all_odds[match]['odds'][site]
best_odds = copy.deepcopy(odds_site)
best_sites = [site for _ in range(n)]
for odds in all_odds[match]['odds'].items():
if odds[0] == "unibet_boost":
continue
for i in range(n):
if odds[1][i] > best_odds[i] and (odds[1][i] >= 1.1 or odds[0] == "pmu"):
best_odds[i] = odds[1][i]
best_sites[i] = odds[0]
for odd_i, site_i in zip(best_odds, best_sites):
if odd_i < 1.1 and site_i != "pmu":
break
else:
profit = gain3(odds_site, best_odds, stake, minimum_odd, miles, rate_eur_miles, multiplicator)
if profit > best_profit:
best_profit = profit
best_odds_site = copy.deepcopy(odds_site)
best_best_odds = copy.deepcopy(best_odds)
best_match = match
stakes, best_indices = mises3(odds_site, best_odds, stake, minimum_odd, False, miles, rate_eur_miles, multiplicator)
sites = [site if i in best_indices else best_sites[i] for i in range(n)]
if best_match:
print(best_match)
pprint(all_odds[best_match])
mises3(best_odds_site, best_best_odds, stake, minimum_odd, True, miles, rate_eur_miles, multiplicator)
afficher_mises_combine([best_match], [sites], [stakes],
all_odds[best_match]["odds"], sport, profit=best_profit)
else:
print("No match found")
def best_match_pari_gagnant(site, minimum_odd, bet, sport="football",
date_max=None, time_max=None, date_min=None,
time_min=None, nb_matches_combine=1):
"""
Retourne le meilleur match sur lequel miser lorsqu'on doit gagner un pari à
une cote donnée sur un site donné.
"""
stakes = []
n = get_nb_outcomes(sport)
for _ in range(n**nb_matches_combine):
stakes.append([bet, site, minimum_odd])
best_match_stakes_to_bet(stakes, nb_matches_combine, sport, date_max, time_max, True)
def best_match_freebet(site, freebet, sport="football", live=False, date_max=None, time_max=None,
date_min=None, time_min=None):
"""
Retourne le match qui génère le meilleur gain pour un unique freebet placé,
couvert avec de l'argent réel.
"""
fact_live = 1 - 0.2 * live
odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [odds_site[i] * fact_live - 1]
+ best_odds[i + 1:])
profit_function = lambda odds_to_check, i: gain2(odds_to_check, i) + 1
criteria = lambda odds_to_check, i: True
display_function = lambda x, i: mises_freebet(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, True)
result_function = lambda x, i: mises_freebet(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, False)
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min, freebet=True)
def best_match_freebet2(site, freebet, sport="football", live=False, date_max=None, time_max=None,
date_min=None, time_min=None):
"""
Retourne le match qui génère le meilleur gain pour un unique freebet placé,
couvert avec de l'argent réel.
"""
fact_live = 1 - 0.2 * live
odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [odds_site[i] * fact_live - 1]
+ best_odds[i + 1:])
profit_function = lambda x, i: gain_freebet2(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i)
criteria = lambda odds_to_check, i: True
display_function = lambda x, i: mises_freebet2(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, True)
result_function = lambda x, i: mises_freebet2(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, False)
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min, freebet=True)
def best_match_cashback(site, minimum_odd, bet, sport="football", freebet=True,
combi_max=0, combi_odd=1, rate_cashback=1, date_max=None,
time_max=None, date_min=None, time_min=None):
"""
Retourne le match qui génère le meilleur gain pour une promotion de type
"Pari remboursé si perdant". Le bonus combi-max, la côte des sélections
supposées sûres (dans le cadre d'une promotion sur combiné) ainsi que le
bonus combi-max sont également paramétrables
"""
odds_function = lambda best_odds, odds_site, i: (best_odds[:i]
+ [combi_odd * odds_site[i]
* (1 + combi_max) - combi_max]
+ best_odds[i + 1:])
profit_function = lambda odds_to_check, i: gain_pari_rembourse_si_perdant(odds_to_check, bet, i,
freebet,
rate_cashback)
criteria = lambda odds_to_check, i: (odds_to_check[i] + combi_max) / (1 + combi_max) >= minimum_odd
display_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet,
rate_cashback, True)
result_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet,
rate_cashback, False)
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min)
def best_matches_combine(site, minimum_odd, bet, sport="football", nb_matches=2, one_site=False,
date_max=None, time_max=None, date_min=None, time_min=None,
minimum_odd_selection=1.01):
"""
Retourne les meilleurs matches sur lesquels miser lorsqu'on doit miser une somme
donnée à une cote donnée sur un combiné
"""
all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max, date_min, time_min)
all_odds = filter_dict_minimum_odd(all_odds, minimum_odd_selection, site)
sb.ALL_ODDS_COMBINE = {}
nb_combine = binomial(len(all_odds), nb_matches)
sb.PROGRESS = 0
def compute_all_odds_combine(nb_combine, combine):
sb.PROGRESS += 100/nb_combine
try:
sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites(
*[match[1] for match in combine]
)
except KeyError:
pass
ThreadPool(4).map(lambda x: compute_all_odds_combine(nb_combine, x),
combinations(all_odds.items(), nb_matches))
sb.PROGRESS = 0
odds_function = get_best_odds(one_site)
profit_function = get_profit(bet, one_site)
criteria = lambda odds_to_check, i: ((not one_site and odds_to_check[i] >= minimum_odd)
or (one_site and all(odd >= minimum_odd for
odd in odds_to_check)))
display_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet,
best_rank, True) if not one_site
else mises(best_overall_odds, bet,
True))
result_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet,
best_rank, False) if not one_site
else mises(best_overall_odds, bet,
False))
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min, True, nb_matches, one_site=one_site, combine_opt=True)
def best_matches_combine_cashback_une_selection_perdante(site, cote_minimale_selection, combi_max=0,
nb_matches=2, date_max=None, time_max=None,
date_min=None, time_min=None):
"""
Calcule la meilleure combinaison de matches et les mises à jouer pour une promotion du type
"Combiné remboursé si une seule selection perdante, sans limite du nombre de paris remboursés"
"""
sport = "football"
bet = 10000
all_odds = sb.ODDS[sport]
sb.ALL_ODDS_COMBINE = {}
for combine in combinations(all_odds.items(), nb_matches):
try:
if all([odd >= cote_minimale_selection for odds in list(all_odds[match[0]]["odds"][site]
for match in combine)
for odd in odds]):
sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites(
*[match[1] for match in combine]
)
except KeyError:
pass
odds_function = lambda best_odds, odds_site, i: list(
map(lambda x: x * (1 + combi_max) - combi_max,
odds_site))
profit_function = lambda odds_to_check, i: gain(odds_to_check, bet) - bet
criteria = lambda odds_to_check, i: (odds_to_check[i] + combi_max) / (1 + combi_max) >= 1.1
display_function = lambda x, i: mises(x, bet, True)
return_function = lambda x, i: mises(x, bet, False)
best_match_base(odds_function, profit_function, criteria, display_function,
return_function, site, sport, date_max, time_max, date_min,
time_min, True, nb_matches, one_site=True, recalcul=True)
def best_matches_combine_cashback(site, minimum_odd, bet, sport="football",
freebet=True, combi_max=0, rate_cashback=1,
nb_matches=2, date_max=None, time_max=None,
date_min=None, time_min=None):
"""
Calcule la répartition des mises lorsqu'un unique combiné est remboursé s'il est perdant
"""
all_odds = sb.ODDS[sport]
sb.ALL_ODDS_COMBINE = {}
for combine in combinations(all_odds.items(), nb_matches):
sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites(
*[match[1] for match in combine]
)
odds_function = lambda best_odds, odds_site, i: (best_odds[:i]
+ [odds_site[i] * (1 + combi_max) - combi_max]
+ best_odds[i + 1:])
profit_function = lambda odds_to_check, i: gain_pari_rembourse_si_perdant(odds_to_check, bet, i,
freebet,
rate_cashback)
criteria = lambda odds_to_check, i: (odds_to_check[i] + combi_max) / (1 + combi_max) >= minimum_odd
display_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet,
rate_cashback, True)
return_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet,
rate_cashback, False)
best_match_base(odds_function, profit_function, criteria, display_function,
return_function, site, sport, date_max, time_max, date_min,
time_min, True, nb_matches, combine_opt=True, taux_cashback=rate_cashback, cashback_freebet=freebet)
def best_match_stakes_to_bet(stakes, nb_matches=1, sport="football", date_max=None, time_max=None, identical_stakes=False):
second_sites = {stake[1] for stake in stakes}
main_sites = sb.BOOKMAKERS
all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max)
best_profit = -sum(stake[0] for stake in stakes)
n = get_nb_outcomes(sport) ** nb_matches
nb_stakes = len(stakes)
all_odds_combine = {}
combis = list(combinations(all_odds.items(), nb_matches))
nb_combis = len(combis)
best_combine = None
best_bets = None
main_site_odds = []
main_sites_distribution = []
sb.PROGRESS = 0
for i, combine in enumerate(combis):
sb.PROGRESS += 100 / nb_combis
match_combine = " / ".join([match[0] for match in combine])
all_odds_combine[match_combine] = cotes_combine_all_sites(*[match[1] for match in combine])
for main0 in main_sites:
try:
main_sites_distribution = [main0 for _ in range(n)]
main_site_odds = copy.deepcopy(all_odds_combine[match_combine]["odds"][main0])
break
except KeyError:
pass
for main in main_sites[:i] + main_sites[i + 1:]:
try:
potential_odds = all_odds_combine[match_combine]["odds"][main]
for j, odd in enumerate(potential_odds):
if odd > main_site_odds[j]:
main_site_odds[j] = odd
main_sites_distribution[j] = main
except KeyError:
pass
second_odds = {second_site: all_odds_combine[match_combine]["odds"][second_site]
for second_site in second_sites if second_site in all_odds_combine[match_combine]["odds"]}
if not second_odds:
continue
dict_combine_odds = copy.deepcopy(second_odds)
for perm in permutations(range(n), nb_stakes):
valid_perm = True
defined_second_sites = [[perm[j], stake[0], stake[1]]
for j, stake in enumerate(stakes)]
for j, stake in enumerate(stakes):
if dict_combine_odds[defined_second_sites[j][2]][defined_second_sites[j][0]] < stake[2]:
valid_perm = False
break
if not valid_perm:
if identical_stakes:
break
continue
defined_bets_temp = defined_bets(main_site_odds, dict_combine_odds,
main_sites_distribution,
defined_second_sites)
profit = defined_bets_temp[0] - np.sum(defined_bets_temp[1])
if profit > best_profit:
best_profit = profit
best_combine = combine
best_bets = defined_bets_temp
if identical_stakes:
break
if best_combine:
best_match_combine = " / ".join([match[0] for match in best_combine])
odds_best_match = copy.deepcopy(all_odds_combine[best_match_combine])
all_sites = main_sites + list(second_sites)
for site in all_odds_combine[best_match_combine]["odds"]:
if site not in all_sites:
del odds_best_match["odds"][site]
print(best_match_combine)
pprint(odds_best_match, compact=1)
print("Plus-value =", round(best_profit, 2))
print("Gain référence =", round(best_bets[0], 2))
print("Somme des mises =", round(np.sum(best_bets[1]), 2))
afficher_mises_combine([x[0] for x in best_combine], best_bets[2], best_bets[1],
all_odds_combine[best_match_combine]["odds"], sport, profit=best_profit)
else:
print("No match found")
def best_matches_freebet(main_sites, freebets, sport, *matches):
"""
Compute of the best way to bet freebets following the model
[[bet, bookmaker], ...]
:param main_sites:
:type freebets: List[List[List[str] or str]]
"""
second_sites = {freebet[1] for freebet in freebets}
if not second_sites:
print("Veuillez sélectionner des freebets secondaires")
return
if matches:
new_odds = {}
for match in matches:
match_name, odds = odds_match(match)
new_odds[match_name] = odds
else:
new_odds = sb.ODDS[sport]
all_odds = {}
for match in new_odds:
if (not (any([site not in new_odds[match]["odds"].keys() for site in main_sites])
or any([site not in new_odds[match]["odds"].keys() for site in second_sites]))):
if new_odds[match]["odds"]:
all_odds[match] = new_odds[match]
best_rate = 0
nb_matches = 2
n = 3 ** nb_matches
nb_freebets = len(freebets)
all_odds_combine = {}
combis = list(combinations(all_odds.items(), nb_matches))
best_combine = None
real_odds = {}
for combine in combis:
match_combine = " / ".join([match[0] for match in combine])
all_odds_combine[match_combine] = cotes_combine_all_sites(*[match[1] for match in combine],
freebet=True)
real_odds[match_combine] = cotes_combine_all_sites(*[match[1] for match in combine])
main_sites_distribution = [main_sites[0] for _ in range(n)]
main_site_odds = copy.deepcopy(all_odds_combine[match_combine]["odds"][main_sites[0]])
for main in main_sites[1:]:
potential_odds = all_odds_combine[match_combine]["odds"][main]
for j, odd in enumerate(potential_odds):
if odd > main_site_odds[j]:
main_site_odds[j] = odd
main_sites_distribution[j] = main
second_odds = {second_site: all_odds_combine[match_combine]["odds"][second_site]
for second_site in second_sites}
dict_combine_odds = copy.deepcopy(second_odds)
for perm in permutations(range(n), nb_freebets):
defined_second_sites = [[perm[i], freebet[0], freebet[1]]
for i, freebet in enumerate(freebets)]
defined_bets_temp = defined_bets(main_site_odds, dict_combine_odds,
main_sites_distribution,
defined_second_sites)
if defined_bets_temp[0] / np.sum(defined_bets_temp[1]) > best_rate:
best_rate = defined_bets_temp[0] / np.sum(defined_bets_temp[1])
best_combine = combine
best_bets = defined_bets_temp
if best_combine:
best_match_combine = " / ".join([match[0] for match in best_combine])
odds_best_match = copy.deepcopy(all_odds_combine[best_match_combine])
all_sites = main_sites + list(second_sites)
for site in all_odds_combine[best_match_combine]["odds"]:
if site not in all_sites:
del odds_best_match["odds"][site]
print(best_match_combine)
pprint(odds_best_match, compact=1)
print("Taux =", best_rate)
print("Gain référence =", best_bets[0])
print("Somme des mises =", np.sum(best_bets[1]))
afficher_mises_combine([x[0] for x in best_combine], best_bets[2], best_bets[1],
real_odds[best_match_combine]["odds"], "football",
uniquement_freebet=True, profit=best_rate)
def best_matches_freebet_one_site(site, freebet, sport="football", nb_matches=2,
minimum_odd=1.1, date_max=None, time_max=None,
date_min=None, time_min=None):
"""
Calcule la répartition des paris gratuits sur un unique site
"""
all_odds = sb.ODDS[sport]
sb.ALL_ODDS_COMBINE = {}
for combine in combinations(all_odds.items(), nb_matches):
sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites(
*[match[1] for match in combine]
)
odds_function = lambda best_odds, odds_site, i: cotes_freebet(odds_site)
profit_function = lambda odds_to_check, i: gain(odds_to_check, freebet) - freebet
criteria = lambda odds_to_check, i: all(odd >= minimum_odd for odd in odds_to_check)
display_function = lambda best_overall_odds, best_rank: mises(best_overall_odds, freebet, True, True)
result_function = lambda best_overall_odds, best_rank: mises(best_overall_odds, freebet, False)
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min, True, nb_matches, True, one_site=True)
def best_match_gain_cote(site, bet, sport="football", date_max=None, time_max=None, date_min=None,
time_min=None):
"""
Retourne le match sur lequel miser pour optimiser une promotion du type "gain de la cote gagnée"
"""
odds_function = get_best_odds(False)
profit_function = lambda odds_to_check, i: gain_promo_gain_cote(odds_to_check, bet, i)
criteria = lambda odds_to_check, i: True
display_function = lambda best_overall_odds, best_rank: mises_promo_gain_cote(best_overall_odds,
bet, best_rank,
True)
result_function = lambda best_overall_odds, best_rank: mises_promo_gain_cote(best_overall_odds,
bet, best_rank,
False)
best_match_base(odds_function, profit_function, criteria, display_function, result_function,
site, sport, date_max, time_max, date_min, time_min)
def best_match_cotes_boostees(site, gain_max, sport="football", date_max=None, time_max=None,
date_min=None, time_min=None):
odds_function = get_best_odds(True)
profit_function = lambda odds_to_check, i: gain_gains_nets_boostes(odds_to_check, gain_max,
False)
criteria = lambda odds_to_check, i: odds_to_check[i] >= 1.5
display_function = lambda odds_to_check, i: mises_gains_nets_boostes(odds_to_check, gain_max,
False, True)
result_function = lambda odds_to_check, i: mises_gains_nets_boostes(odds_to_check, gain_max,
False, False)
best_match_base(odds_function, profit_function, criteria, display_function, result_function,
site, sport, date_max, time_max, date_min, time_min)
def best_combine_booste(matches, combinaison_boostee, site_combinaison, mise, sport, cote_boostee):
best_combine_reduit(matches, combinaison_boostee, site_combinaison, mise, sport, cote_boostee)
def best_combine_booste_progressif(matches, combinaison_boostee, site_combinaison, mise, sport, cote_boostee):
outcomes = []
odds = []
bookmakers = []
stakes = []
simulated_odds = []
outcome_boost = []
matches.sort(key=lambda x: sb.ODDS[sport][x]["date"], reverse=True)
time_intervals = [sb.ODDS[sport][x]["date"] - sb.ODDS[sport][y]["date"] for x, y in zip(matches[:-1], matches[1:])]
print("Répartition des mises (les totaux affichés prennent en compte les éventuels freebets):")
if time_intervals and min(time_intervals) < datetime.timedelta(hours=2):
print("Methode impossible (pas assez de temps entre 2 matches)")
return
reference_gain = round(mise * cote_boostee, 2)
sum_stakes = 0
for j, match in enumerate(matches):
sum_stakes_match = 0
teams = match.split(" - ")
if get_nb_outcomes(sport) == 3:
teams.insert(1, "Nul ({} - {})".format(*teams))
_, bookmakers_match, odds_match = trj_match(sb.ODDS[sport][match])
for i, team in enumerate(teams):
if combinaison_boostee[j] == i:
outcome_boost.append(team)
continue
outcomes.append(team)
odds.append(odds_match[i])
bookmakers.append(bookmakers_match[i])
stake = round((reference_gain - sum_stakes) / odds_match[i], 2)
stakes.append(stake)
sum_stakes_match += stake
simulated_odds.append(reference_gain / stake)
sum_stakes += sum_stakes_match
outcomes.append(" / ".join(outcome_boost))
odds.append(cote_boostee)
bookmakers.append(site_combinaison)
stakes.append(mise)
simulated_odds.append(cote_boostee)
totals = [round(stake * odd, 2) for (stake, odd) in zip(stakes, odds)]
table = {"Issue": reversed(outcomes), "Bookmaker": reversed(bookmakers), "Cote": reversed(odds), "Mise": reversed(stakes), "Total": reversed(totals), "TRJ":[round(100*gain(simulated_odds), 3), "Bénéfice", round(reference_gain-sum(stakes), 2)]}
text = tabulate.tabulate(table, headers='keys', tablefmt='fancy_grid')
print(text)
print("Ne couvrir un match qu'une fois le résultat du match précédent connu")
if sys.platform.startswith("win"):
copy_to_clipboard(text)
def trj_match(match_odds):
odds = []
bookmakers = []
for bookmaker in match_odds["odds"]:
if bookmaker == "unibet_boost":
continue
tmp_odds = match_odds["odds"][bookmaker]
tmp_bookmakers = [bookmaker for _ in tmp_odds]
if not odds:
odds = copy.deepcopy(tmp_odds)
bookmakers = copy.deepcopy(tmp_bookmakers)
continue
for i, tmp_odd in enumerate(tmp_odds):
if not odds[i]:
odds[i] = 1.01
if not tmp_odd:
continue
try:
if tmp_odd > odds[i]:
odds[i] = tmp_odd
bookmakers[i] = bookmaker
except TypeError:
print(match_odds, tmp_odd, odds[i])
if not odds or 1.01 in odds:
return 0, bookmakers, odds
return gain(odds), bookmakers, odds
def get_values(match_odds, rate):
odds = []
bookmakers = []
sums = []
for bookmaker in match_odds["odds"]:
if bookmaker == "unibet_boost":
continue
tmp_odds = match_odds["odds"][bookmaker]
tmp_bookmakers = [bookmaker for _ in tmp_odds]
if not odds:
odds = copy.deepcopy(tmp_odds)
sums = copy.deepcopy(tmp_odds)
bookmakers = copy.deepcopy(tmp_bookmakers)
continue
for i, tmp_odd in enumerate(tmp_odds):
sums[i] += tmp_odd
if tmp_odd > odds[i]:
odds[i] = tmp_odd
bookmakers[i] = bookmaker
values = []
best_rate = rate-1
n = len(match_odds["odds"])
i = 0
has_pinnacle = "pinnacle" in match_odds["odds"]
for odd, sum, bookmaker in zip(odds, sums, bookmakers):
if odd < 1.1:
return 0, []
ref = sum/n if not has_pinnacle else match_odds["odds"]["pinnacle"][i]
if ref < 1.1:
return 0, []
rate_tmp = odd/ref-1
if rate_tmp >= rate:
best_rate = max(best_rate, rate_tmp)
value = [odd, rate_tmp, bookmaker]
values.append(value)
i += 1
return best_rate, values
def best_matches_combine2(site, minimum_odd, bet, sport, minimum_odd_selection, date_max=None, time_max=None,
date_min=None, time_min=None):
nb_matches = 2
all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max, date_min, time_min)
all_odds = filter_dict_minimum_odd(all_odds, minimum_odd_selection, site)
odds_combine_opt = [{} for _ in range(6)]
nb_combine = binomial(len(all_odds), nb_matches)
sb.PROGRESS = 0
combis = cotes_combine_optimise([[1 for _ in range(3)] for i in range(nb_matches)])[1]
print(combis)
def compute_all_odds_combine_optimise(nb_combine, combine, odds_combine_opt):
sb.PROGRESS += 100/nb_combine
try:
cotes_combination = cotes_combine_reduit_all_sites(
*[match[1] for match in combine]
)
for i in range(6):
odds_combine_opt[i][" / ".join([match[0] for match in combine])] = cotes_combination[i]
# combis[i] = cotes_combination[i][1]
except KeyError:
pass
ThreadPool(4).map(lambda x: compute_all_odds_combine_optimise(nb_combine, x, odds_combine_opt),
combinations(all_odds.items(), nb_matches))
sb.PROGRESS = 0
odds_function = get_best_odds(False)
profit_function = get_profit(bet, False)
criteria = lambda odds_to_check, i: all(odd >= minimum_odd for odd in odds_to_check)
for i, combination in enumerate(combis):
sb.ALL_ODDS_COMBINE = odds_combine_opt[i]
# display_function = lambda odds_to_check, i: mises_combine_optimise(odds_to_check, combination, bet, minimum_odd, True)
# result_function = lambda odds_to_check, i: mises_combine_optimise(odds_to_check, combination, bet, minimum_odd, False)
display_function = lambda best_overall_odds, best_rank: mises2(best_overall_odds, bet, best_rank, True)
result_function = lambda best_overall_odds, best_rank: mises2(best_overall_odds, bet, best_rank, False)
best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min,
time_min, True, nb_matches, combine_opt=True)
def best_matches_combine3(site, minimum_odd, bet, sport="football",
date_max=None, time_max=None, date_min=None,
time_min=None, nb_matches_combine=2):
stakes = []
for _ in range(5):
stakes.append([bet, site, minimum_odd])
best_match_stakes_to_bet2(stakes, nb_matches_combine, sport, date_max, time_max, True)
def convert_indices_to_opponents(combination_indices, matches, sport):
combination_opponents = []
matches_outcomes = [match.split(" - ") for match in matches]
if get_nb_outcomes(sport) == 3:
for match in matches_outcomes:
match.insert(1, "Nul")
for indices in combination_indices:
opponents = []
for i, index in enumerate(indices):
if index == float("inf"):
continue
opponents.append(matches_outcomes[i][index])
combination_opponents.append(tuple(opponents))
return combination_opponents
def best_match_stakes_to_bet2(stakes, nb_matches=2, sport="football", date_max=None, time_max=None, identical_stakes=False):
second_sites = {stake[1] for stake in stakes if stake[1] != "unibet_boost"}
main_sites = sb.BOOKMAKERS
all_odds = get_matches_with_best_trj(sport, 20)
all_odds = filter_dict_dates(all_odds, date_max, time_max)
best_profit = -sum(stake[0] for stake in stakes)
n = 5#get_nb_outcomes(sport) ** nb_matches
nb_stakes = len(stakes)
all_odds_combine = [{} for _ in range(6)]
combis = list(combinations(all_odds.items(), nb_matches))
nb_combis = len(combis)
best_combine = None
best_bets = None
main_site_odds = []
main_sites_distribution = []
sb.PROGRESS = 0
list_combinations = cotes_combine_optimise([[1 for _ in range(3)] for i in range(nb_matches)])[1]
for k in range(6):
for i, combine in enumerate(combis):
sb.PROGRESS += 100 / nb_combis
match_combine = " / ".join([match[0] for match in combine])
cotes_combination = cotes_combine_reduit_all_sites(
*[match[1] for match in combine]
)
# print(cotes_combination[k])
all_odds_combine[k][match_combine] = cotes_combination[k]
for main0 in main_sites:
try:
main_sites_distribution = [main0 for _ in range(n)]
main_site_odds = copy.deepcopy(all_odds_combine[k][match_combine]["odds"][main0])
break
except KeyError:
pass
for main in main_sites[:i] + main_sites[i + 1:]:
try:
potential_odds = all_odds_combine[k][match_combine]["odds"][main]
for j, odd in enumerate(potential_odds):
if odd > main_site_odds[j]:
main_site_odds[j] = odd
main_sites_distribution[j] = main
except KeyError:
pass
second_odds = {second_site: all_odds_combine[k][match_combine]["odds"][second_site]
for second_site in second_sites if second_site in all_odds_combine[k][match_combine]["odds"]}
if not second_odds:
continue
dict_combine_odds = copy.deepcopy(second_odds)
for perm in permutations(range(n), nb_stakes):
valid_perm = True
defined_second_sites = [[perm[j], stake[0], stake[1]]
for j, stake in enumerate(stakes)]
for j, stake in enumerate(stakes):
if dict_combine_odds[defined_second_sites[j][2]][defined_second_sites[j][0]] < stake[2]:
valid_perm = False
break
if not valid_perm:
if identical_stakes:
break
continue
defined_bets_temp = defined_bets(main_site_odds, dict_combine_odds,
main_sites_distribution,
defined_second_sites)
profit = defined_bets_temp[0] - np.sum(defined_bets_temp[1])
if profit > best_profit:
best_profit = profit
best_combine = combine
best_bets = defined_bets_temp
best_combination = k
if identical_stakes:
break
if best_combine:
best_match_combine = " / ".join([match[0] for match in best_combine])
odds_best_match = copy.deepcopy(all_odds_combine[best_combination][best_match_combine])
all_sites = main_sites + list(second_sites)
for site in all_odds_combine[best_combination][best_match_combine]["odds"]:
if site not in all_sites:
del odds_best_match["odds"][site]
print(best_match_combine)
pprint(odds_best_match, compact=1)
print("Plus-value =", round(best_profit, 2))
print("Gain référence =", round(best_bets[0], 2))
print("Somme des mises =", round(np.sum(best_bets[1]), 2))
afficher_mises_combine([x[0] for x in best_combine], best_bets[2], best_bets[1],
all_odds_combine[best_combination][best_match_combine]["odds"], sport,
combinaisons=convert_indices_to_opponents(list_combinations[best_combination], [x[0] for x in best_combine], sport), profit=best_profit)
else:
print("No match found")
def best_matches_freebet2(site, freebet, sport, nb_matches=2):
# all_odds = sb.ODDS[sport]
all_odds = get_matches_with_best_trj(sport, 10, site)
best_profit = float("-inf")
combis = list(combinations(all_odds.items(), nb_matches))
if not combis:
print("No match found")
return
nb_combis = len(combis)
best_combine = None
best_bets = None
best_matches = []
best_choice = [0 for _ in range(nb_matches)]
best_odd = 1.01
choices = list(product(*[range(get_nb_outcomes(sport)) for _ in range(nb_matches)]))
for combi in combis:
if any([site not in x[1]["odds"] for x in combi]):
continue
matches = [x[0] for x in combi]
for choice in choices:
choice_list = list(choice)
odd = 1
for match, outcome in zip(combi, choice_list):
odd *= match[1]["odds"][site][outcome]
profit = best_combine_reduit(matches, choice_list, site, freebet, sport, odd-1, output=False)
if profit < best_profit:
continue
best_profit = profit
best_matches = matches
best_choice = choice_list
best_odd = odd
best_combine_reduit(best_matches, best_choice, site, freebet, sport, best_odd-1, freebet=True)
def get_matches_with_best_trj(sport, nb_matches, site=None):
matches = sorted(filter(lambda x: not site or site in x[1]["odds"], sb.ODDS[sport].items()), key=lambda x:trj_match(x[1])[0], reverse=True)[:nb_matches]
return {match:odds for match, odds in matches}
def best_match_defi_rembourse_ou_gagnant(site, minimum_odd, stake, sport, date_max=None,
time_max=None, date_min=None, time_min=None):
odds_function = get_best_odds(False)
profit_function = lambda best_overall_odds, best_rank: gain_defi_rembourse_ou_gagnant(best_overall_odds, stake, best_rank, True)
profit_function = lambda odds_to_check, i: gain_defi_rembourse_ou_gagnant(odds_to_check, stake, i)
criteria = lambda odds_to_check, i: odds_to_check[i] >= minimum_odd
display_function = lambda best_overall_odds, best_rank: mises_defi_rembourse_ou_gagnant(best_overall_odds, stake, best_rank, True)
result_function = lambda best_overall_odds, best_rank: mises_defi_rembourse_ou_gagnant(best_overall_odds, stake, best_rank, False)
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min)
def get_sports_with_surebet():
sports_with_surebet = []
for sport in sb.SPORTS:
if sb.SEEN_SUREBET[sport]:
continue
if sport not in sb.ODDS:
continue
for match in sb.ODDS[sport]:
if trj_match(sb.ODDS[sport][match])[0]>=1:
sports_with_surebet.append(sport)
break
return sports_with_surebet
|
the-stack_0_9044 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Provider packages of Airflow project."""
import collections
import difflib
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import warnings
from contextlib import contextmanager
from copy import deepcopy
from datetime import datetime, timedelta
from enum import Enum
from functools import lru_cache
from os.path import dirname, relpath
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Set, Tuple, Type, Union
import jsonschema
import rich_click as click
from github import Github, Issue, PullRequest, UnknownObjectException
from packaging.version import Version
from rich.console import Console
from rich.progress import Progress
from rich.syntax import Syntax
from airflow.utils.yaml import safe_load
ALL_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"]
INITIAL_CHANGELOG_CONTENT = """
.. Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
.. http://www.apache.org/licenses/LICENSE-2.0
.. Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
Changelog
---------
1.0.0
.....
Initial version of the provider.
"""
HTTPS_REMOTE = "apache-https-for-providers"
HEAD_OF_HTTPS_REMOTE = f"{HTTPS_REMOTE}/main"
MY_DIR_PATH = Path(__file__).parent
SOURCE_DIR_PATH = MY_DIR_PATH.parent.parent
AIRFLOW_PATH = SOURCE_DIR_PATH / "airflow"
DIST_PATH = SOURCE_DIR_PATH / "dist"
PROVIDERS_PATH = AIRFLOW_PATH / "providers"
DOCUMENTATION_PATH = SOURCE_DIR_PATH / "docs"
TARGET_PROVIDER_PACKAGES_PATH = SOURCE_DIR_PATH / "provider_packages"
GENERATED_AIRFLOW_PATH = TARGET_PROVIDER_PACKAGES_PATH / "airflow"
GENERATED_PROVIDERS_PATH = GENERATED_AIRFLOW_PATH / "providers"
PROVIDER_RUNTIME_DATA_SCHEMA_PATH = SOURCE_DIR_PATH / "airflow" / "provider_info.schema.json"
sys.path.insert(0, str(SOURCE_DIR_PATH))
# those imports need to come after the above sys.path.insert to make sure that Airflow
# sources are importable without having to add the airflow sources to the PYTHONPATH before
# running the script
from dev.import_all_classes import import_all_classes # noqa # isort:skip
from setup import PROVIDERS_REQUIREMENTS # type: ignore[attr-defined] # isort:skip # noqa
from setup import PREINSTALLED_PROVIDERS # type: ignore[attr-defined] # isort:skip # noqa
# Note - we do not test protocols as they are not really part of the official API of
# Apache Airflow
logger = logging.getLogger(__name__)
PY3 = sys.version_info[0] == 3
console = Console(width=400, color_system="standard")
@click.group(context_settings={'help_option_names': ['-h', '--help'], 'max_content_width': 500})
def cli():
...
option_skip_tag_check = click.option(
"--skip-tag-check/--no-skip-tag-check",
default=False,
is_flag=True,
help="Skip checking if the tag already exists in the remote repository",
)
option_git_update = click.option(
'--git-update/--no-git-update',
default=True,
is_flag=True,
help=f"If the git remote {HTTPS_REMOTE} already exists, don't try to update it",
)
option_version_suffix = click.option(
"--version-suffix",
metavar="suffix",
help=textwrap.dedent(
"""
adds version suffix to version of the packages.
only useful when generating rc candidates for pypi."""
),
)
option_verbose = click.option(
"--verbose",
is_flag=True,
help="Print verbose information about performed steps",
)
option_force = click.option(
"--force",
is_flag=True,
help="Forces regeneration of already generated documentation",
)
argument_package_id = click.argument('package_id')
argument_changelog_files = click.argument('changelog_files', nargs=-1)
argument_package_ids = click.argument('package_ids', nargs=-1)
@contextmanager
def with_group(title):
"""
If used in GitHub Action, creates an expandable group in the GitHub Action log.
Otherwise, display simple text groups.
For more information, see:
https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#grouping-log-lines
"""
if os.environ.get('GITHUB_ACTIONS', 'false') != "true":
console.print("[blue]" + "#" * 10 + ' ' + title + ' ' + "#" * 10 + "[/]")
yield
return
console.print(f"::group::{title}")
yield
console.print("::endgroup::")
class EntityType(Enum):
Operators = "Operators"
Transfers = "Transfers"
Sensors = "Sensors"
Hooks = "Hooks"
Secrets = "Secrets"
class EntityTypeSummary(NamedTuple):
entities: List[str]
new_entities_table: str
wrong_entities: List[Tuple[type, str]]
class VerifiedEntities(NamedTuple):
all_entities: Set[str]
wrong_entities: List[Tuple[type, str]]
class ProviderPackageDetails(NamedTuple):
provider_package_id: str
full_package_name: str
pypi_package_name: str
source_provider_package_path: str
documentation_provider_package_path: str
provider_description: str
versions: List[str]
excluded_python_versions: List[str]
ENTITY_NAMES = {
EntityType.Operators: "Operators",
EntityType.Transfers: "Transfer Operators",
EntityType.Sensors: "Sensors",
EntityType.Hooks: "Hooks",
EntityType.Secrets: "Secrets",
}
TOTALS: Dict[EntityType, int] = {
EntityType.Operators: 0,
EntityType.Hooks: 0,
EntityType.Sensors: 0,
EntityType.Transfers: 0,
EntityType.Secrets: 0,
}
OPERATORS_PATTERN = r".*Operator$"
SENSORS_PATTERN = r".*Sensor$"
HOOKS_PATTERN = r".*Hook$"
SECRETS_PATTERN = r".*Backend$"
TRANSFERS_PATTERN = r".*To[A-Z0-9].*Operator$"
WRONG_TRANSFERS_PATTERN = r".*Transfer$|.*TransferOperator$"
ALL_PATTERNS = {
OPERATORS_PATTERN,
SENSORS_PATTERN,
HOOKS_PATTERN,
SECRETS_PATTERN,
TRANSFERS_PATTERN,
WRONG_TRANSFERS_PATTERN,
}
EXPECTED_SUFFIXES: Dict[EntityType, str] = {
EntityType.Operators: "Operator",
EntityType.Hooks: "Hook",
EntityType.Sensors: "Sensor",
EntityType.Secrets: "Backend",
EntityType.Transfers: "Operator",
}
def get_source_airflow_folder() -> str:
"""
Returns source directory for whole airflow (from the main airflow project).
:return: the folder path
"""
return os.path.abspath(SOURCE_DIR_PATH)
def get_source_providers_folder() -> str:
"""
Returns source directory for providers (from the main airflow project).
:return: the folder path
"""
return os.path.join(get_source_airflow_folder(), "airflow", "providers")
def get_target_folder() -> str:
"""
Returns target directory for providers (in the provider_packages folder)
:return: the folder path
"""
return os.path.abspath(os.path.join(dirname(__file__), os.pardir, os.pardir, "provider_packages"))
def get_target_providers_folder() -> str:
"""
Returns target directory for providers (in the provider_packages folder)
:return: the folder path
"""
return os.path.abspath(os.path.join(get_target_folder(), "airflow", "providers"))
def get_target_providers_package_folder(provider_package_id: str) -> str:
"""
Returns target package folder based on package_id
:return: the folder path
"""
return os.path.join(get_target_providers_folder(), *provider_package_id.split("."))
DEPENDENCIES_JSON_FILE = os.path.join(PROVIDERS_PATH, "dependencies.json")
def get_pip_package_name(provider_package_id: str) -> str:
"""
Returns PIP package name for the package id.
:param provider_package_id: id of the package
:return: the name of pip package
"""
return "apache-airflow-providers-" + provider_package_id.replace(".", "-")
def get_wheel_package_name(provider_package_id: str) -> str:
"""
Returns PIP package name for the package id.
:param provider_package_id: id of the package
:return: the name of pip package
"""
return "apache_airflow_providers_" + provider_package_id.replace(".", "_")
def get_long_description(provider_package_id: str) -> str:
"""
Gets long description of the package.
:param provider_package_id: package id
:return: content of the description: README file
"""
package_folder = get_target_providers_package_folder(provider_package_id)
readme_file = os.path.join(package_folder, "README.md")
if not os.path.exists(readme_file):
return ""
with open(readme_file, encoding='utf-8') as file:
readme_contents = file.read()
copying = True
long_description = ""
for line in readme_contents.splitlines(keepends=True):
if line.startswith("**Table of contents**"):
copying = False
continue
header_line = "## Provider package"
if line.startswith(header_line):
copying = True
if copying:
long_description += line
return long_description
def get_install_requirements(provider_package_id: str, version_suffix: str) -> str:
"""
Returns install requirements for the package.
:param provider_package_id: id of the provider package
:param version_suffix: optional version suffix for packages
:return: install requirements of the package
"""
dependencies = PROVIDERS_REQUIREMENTS[provider_package_id]
provider_yaml = get_provider_yaml(provider_package_id)
install_requires = []
if "additional-dependencies" in provider_yaml:
additional_dependencies = provider_yaml['additional-dependencies']
if version_suffix:
# In case we are preparing "rc" or dev0 packages, we should also
# make sure that cross-dependency with Airflow or Airflow Providers will
# contain the version suffix, otherwise we will have conflicting dependencies.
# For example if (in sftp) we have ssh>=2.0.1 and release ssh==2.0.1
# we want to turn this into ssh>=2.0.1.dev0 if we build dev0 version of the packages
# or >=2.0.1rc1 if we build rc1 version of the packages.
for dependency in additional_dependencies:
if dependency.startswith("apache-airflow") and ">=" in dependency:
dependency = (
dependency + ("." if not version_suffix.startswith(".") else "") + version_suffix
)
install_requires.append(dependency)
else:
install_requires.extend(additional_dependencies)
install_requires.extend(dependencies)
prefix = "\n "
return prefix + prefix.join(install_requires)
def get_setup_requirements() -> str:
"""
Returns setup requirements (common for all package for now).
:return: setup requirements
"""
return """
setuptools
wheel
"""
def get_package_extras(provider_package_id: str) -> Dict[str, List[str]]:
"""
Finds extras for the package specified.
:param provider_package_id: id of the package
"""
if provider_package_id == 'providers':
return {}
with open(DEPENDENCIES_JSON_FILE) as dependencies_file:
cross_provider_dependencies: Dict[str, List[str]] = json.load(dependencies_file)
extras_dict = (
{
module: [get_pip_package_name(module)]
for module in cross_provider_dependencies[provider_package_id]
}
if cross_provider_dependencies.get(provider_package_id)
else {}
)
provider_yaml_dict = get_provider_yaml(provider_package_id)
additional_extras = provider_yaml_dict.get('additional-extras')
if additional_extras:
for key in additional_extras:
if key in extras_dict:
extras_dict[key].append(additional_extras[key])
else:
extras_dict[key] = additional_extras[key]
return extras_dict
def get_provider_packages() -> List[str]:
"""
Returns all provider packages.
"""
return list(PROVIDERS_REQUIREMENTS.keys())
def is_imported_from_same_module(the_class: str, imported_name: str) -> bool:
"""
Is the class imported from another module?
:param the_class: the class object itself
:param imported_name: name of the imported class
:return: true if the class was imported from another module
"""
return ".".join(imported_name.split(".")[:-1]) == the_class.__module__
def is_example_dag(imported_name: str) -> bool:
"""
Is the class an example_dag class?
:param imported_name: name where the class is imported from
:return: true if it is an example_dags class
"""
return ".example_dags." in imported_name
def is_from_the_expected_base_package(the_class: Type, expected_package: str) -> bool:
"""
Returns true if the class is from the package expected.
:param the_class: the class object
:param expected_package: package expected for the class
:return:
"""
return the_class.__module__.startswith(expected_package)
def inherits_from(the_class: Type, expected_ancestor: Optional[Type] = None) -> bool:
"""
Returns true if the class inherits (directly or indirectly) from the class specified.
:param the_class: The class to check
:param expected_ancestor: expected class to inherit from
:return: true is the class inherits from the class expected
"""
if expected_ancestor is None:
return False
import inspect
mro = inspect.getmro(the_class)
return the_class is not expected_ancestor and expected_ancestor in mro
def is_class(the_class: Type) -> bool:
"""
Returns true if the object passed is a class
:param the_class: the class to pass
:return: true if it is a class
"""
import inspect
return inspect.isclass(the_class)
def package_name_matches(the_class: Type, expected_pattern: Optional[str] = None) -> bool:
"""
In case expected_pattern is set, it checks if the package name matches the pattern.
.
:param the_class: imported class
:param expected_pattern: the pattern that should match the package
:return: true if the expected_pattern is None or the pattern matches the package
"""
return expected_pattern is None or re.match(expected_pattern, the_class.__module__) is not None
def find_all_entities(
imported_classes: List[str],
base_package: str,
ancestor_match: Type,
sub_package_pattern_match: str,
expected_class_name_pattern: str,
unexpected_class_name_patterns: Set[str],
exclude_class_type: Optional[Type] = None,
false_positive_class_names: Optional[Set[str]] = None,
) -> VerifiedEntities:
"""
Returns set of entities containing all subclasses in package specified.
:param imported_classes: entities imported from providers
:param base_package: base package name where to start looking for the entities
:param sub_package_pattern_match: this string is expected to appear in the sub-package name
:param ancestor_match: type of the object the method looks for
:param expected_class_name_pattern: regexp of class name pattern to expect
:param unexpected_class_name_patterns: set of regexp of class name pattern that are not expected
:param exclude_class_type: exclude class of this type (Sensor are also Operators, so
they should be excluded from the list)
:param false_positive_class_names: set of class names that are wrongly recognised as badly named
"""
found_entities: Set[str] = set()
wrong_entities: List[Tuple[type, str]] = []
for imported_name in imported_classes:
module, class_name = imported_name.rsplit(".", maxsplit=1)
the_class = getattr(importlib.import_module(module), class_name)
if (
is_class(the_class=the_class)
and not is_example_dag(imported_name=imported_name)
and is_from_the_expected_base_package(the_class=the_class, expected_package=base_package)
and is_imported_from_same_module(the_class=the_class, imported_name=imported_name)
and inherits_from(the_class=the_class, expected_ancestor=ancestor_match)
and not inherits_from(the_class=the_class, expected_ancestor=exclude_class_type)
and package_name_matches(the_class=the_class, expected_pattern=sub_package_pattern_match)
):
if not false_positive_class_names or class_name not in false_positive_class_names:
if not re.match(expected_class_name_pattern, class_name):
wrong_entities.append(
(
the_class,
f"The class name {class_name} is wrong. "
f"It should match {expected_class_name_pattern}",
)
)
continue
if unexpected_class_name_patterns:
for unexpected_class_name_pattern in unexpected_class_name_patterns:
if re.match(unexpected_class_name_pattern, class_name):
wrong_entities.append(
(
the_class,
f"The class name {class_name} is wrong. "
f"It should not match {unexpected_class_name_pattern}",
)
)
continue
found_entities.add(imported_name)
return VerifiedEntities(all_entities=found_entities, wrong_entities=wrong_entities)
def convert_classes_to_table(entity_type: EntityType, entities: List[str], full_package_name: str) -> str:
"""
Converts new entities to a Markdown table.
:param entity_type: entity type to convert to markup
:param entities: list of entities
:param full_package_name: name of the provider package
:return: table of new classes
"""
from tabulate import tabulate
headers = [f"New Airflow 2.0 {entity_type.value.lower()}: `{full_package_name}` package"]
table = [(get_class_code_link(full_package_name, class_name, "main"),) for class_name in entities]
return tabulate(table, headers=headers, tablefmt="pipe")
def get_details_about_classes(
entity_type: EntityType,
entities: Set[str],
wrong_entities: List[Tuple[type, str]],
full_package_name: str,
) -> EntityTypeSummary:
"""
Get details about entities.
:param entity_type: type of entity (Operators, Hooks etc.)
:param entities: set of entities found
:param wrong_entities: wrong entities found for that type
:param full_package_name: full package name
:return:
"""
all_entities = list(entities)
all_entities.sort()
TOTALS[entity_type] += len(all_entities)
return EntityTypeSummary(
entities=all_entities,
new_entities_table=convert_classes_to_table(
entity_type=entity_type,
entities=all_entities,
full_package_name=full_package_name,
),
wrong_entities=wrong_entities,
)
def strip_package_from_class(base_package: str, class_name: str) -> str:
"""
Strips base package name from the class (if it starts with the package name).
"""
if class_name.startswith(base_package):
return class_name[len(base_package) + 1 :]
else:
return class_name
def convert_class_name_to_url(base_url: str, class_name) -> str:
"""
Converts the class name to URL that the class can be reached
:param base_url: base URL to use
:param class_name: name of the class
:return: URL to the class
"""
return base_url + os.path.sep.join(class_name.split(".")[:-1]) + ".py"
def get_class_code_link(base_package: str, class_name: str, git_tag: str) -> str:
"""
Provides a Markdown link for the class passed as parameter.
:param base_package: base package to strip from most names
:param class_name: name of the class
:param git_tag: tag to use for the URL link
:return: URL to the class
"""
url_prefix = f'https://github.com/apache/airflow/blob/{git_tag}/'
return (
f'[{strip_package_from_class(base_package, class_name)}]'
f'({convert_class_name_to_url(url_prefix, class_name)})'
)
def print_wrong_naming(entity_type: EntityType, wrong_classes: List[Tuple[type, str]]):
"""
Prints wrong entities of a given entity type if there are any
:param entity_type: type of the class to print
:param wrong_classes: list of wrong entities
"""
if wrong_classes:
console.print(f"\n[red]There are wrongly named entities of type {entity_type}:[/]\n")
for wrong_entity_type, message in wrong_classes:
console.print(f"{wrong_entity_type}: {message}")
def get_package_class_summary(
full_package_name: str, imported_classes: List[str]
) -> Dict[EntityType, EntityTypeSummary]:
"""
Gets summary of the package in the form of dictionary containing all types of entities
:param full_package_name: full package name
:param imported_classes: entities imported_from providers
:return: dictionary of objects usable as context for JINJA2 templates - or None if there are some errors
"""
from airflow.hooks.base import BaseHook
from airflow.models.baseoperator import BaseOperator
from airflow.secrets import BaseSecretsBackend
from airflow.sensors.base import BaseSensorOperator
all_verified_entities: Dict[EntityType, VerifiedEntities] = {
EntityType.Operators: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.operators\..*",
ancestor_match=BaseOperator,
expected_class_name_pattern=OPERATORS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN},
exclude_class_type=BaseSensorOperator,
false_positive_class_names={
'CloudVisionAddProductToProductSetOperator',
'CloudDataTransferServiceGCSToGCSOperator',
'CloudDataTransferServiceS3ToGCSOperator',
'BigQueryCreateDataTransferOperator',
'CloudTextToSpeechSynthesizeOperator',
'CloudSpeechToTextRecognizeSpeechOperator',
},
),
EntityType.Sensors: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.sensors\..*",
ancestor_match=BaseSensorOperator,
expected_class_name_pattern=SENSORS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, SENSORS_PATTERN},
),
EntityType.Hooks: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.hooks\..*",
ancestor_match=BaseHook,
expected_class_name_pattern=HOOKS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {HOOKS_PATTERN},
),
EntityType.Secrets: find_all_entities(
imported_classes=imported_classes,
sub_package_pattern_match=r".*\.secrets\..*",
base_package=full_package_name,
ancestor_match=BaseSecretsBackend,
expected_class_name_pattern=SECRETS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {SECRETS_PATTERN},
),
EntityType.Transfers: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.transfers\..*",
ancestor_match=BaseOperator,
expected_class_name_pattern=TRANSFERS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, TRANSFERS_PATTERN},
),
}
for entity in EntityType:
print_wrong_naming(entity, all_verified_entities[entity].wrong_entities)
entities_summary: Dict[EntityType, EntityTypeSummary] = {}
for entity_type in EntityType:
entities_summary[entity_type] = get_details_about_classes(
entity_type,
all_verified_entities[entity_type].all_entities,
all_verified_entities[entity_type].wrong_entities,
full_package_name,
)
return entities_summary
def render_template(
template_name: str,
context: Dict[str, Any],
extension: str,
autoescape: bool = True,
keep_trailing_newline: bool = False,
) -> str:
"""
Renders template based on its name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir.
:param template_name: name of the template to use
:param context: Jinja2 context
:param extension: Target file extension
:param autoescape: Whether to autoescape HTML
:param keep_trailing_newline: Whether to keep the newline in rendered output
:return: rendered template
"""
import jinja2
template_loader = jinja2.FileSystemLoader(searchpath=MY_DIR_PATH)
template_env = jinja2.Environment(
loader=template_loader,
undefined=jinja2.StrictUndefined,
autoescape=autoescape,
keep_trailing_newline=keep_trailing_newline,
)
template = template_env.get_template(f"{template_name}_TEMPLATE{extension}.jinja2")
content: str = template.render(context)
return content
PR_PATTERN = re.compile(r".*\(#([0-9]+)\)")
class Change(NamedTuple):
"""Stores details about commits"""
full_hash: str
short_hash: str
date: str
version: str
message: str
message_without_backticks: str
pr: Optional[str]
def get_change_from_line(line: str, version: str):
split_line = line.split(" ", maxsplit=3)
message = split_line[3]
pr = None
pr_match = PR_PATTERN.match(message)
if pr_match:
pr = pr_match.group(1)
return Change(
full_hash=split_line[0],
short_hash=split_line[1],
date=split_line[2],
version=version,
message=message,
message_without_backticks=message.replace("`", "'").replace("&39;", "'"),
pr=pr,
)
def convert_git_changes_to_table(
version: str, changes: str, base_url: str, markdown: bool = True
) -> Tuple[str, List[Change]]:
"""
Converts list of changes from its string form to markdown/RST table and array of change information
The changes are in the form of multiple lines where each line consists of:
FULL_COMMIT_HASH SHORT_COMMIT_HASH COMMIT_DATE COMMIT_SUBJECT
The subject can contain spaces but one of the preceding values can, so we can make split
3 times on spaces to break it up.
:param version: Version from which the changes are
:param changes: list of changes in a form of multiple-line string
:param base_url: base url for the commit URL
:param markdown: if True, Markdown format is used else rst
:return: formatted table + list of changes (starting from the latest)
"""
from tabulate import tabulate
lines = changes.split("\n")
headers = ["Commit", "Committed", "Subject"]
table_data = []
changes_list: List[Change] = []
for line in lines:
if line == "":
continue
change = get_change_from_line(line, version)
table_data.append(
(
f"[{change.short_hash}]({base_url}{change.full_hash})"
if markdown
else f"`{change.short_hash} <{base_url}{change.full_hash}>`_",
change.date,
f"`{change.message_without_backticks}`"
if markdown
else f"``{change.message_without_backticks}``",
)
)
changes_list.append(change)
header = ""
if not table_data:
return header, []
table = tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
if not markdown:
header += f"\n\n{version}\n" + "." * len(version) + "\n\n"
release_date = table_data[0][1]
header += f"Latest change: {release_date}\n\n"
return header + table, changes_list
def convert_pip_requirements_to_table(requirements: Iterable[str], markdown: bool = True) -> str:
"""
Converts PIP requirement list to a Markdown table.
:param requirements: requirements list
:param markdown: if True, Markdown format is used else rst
:return: formatted table
"""
from tabulate import tabulate
headers = ["PIP package", "Version required"]
table_data = []
for dependency in requirements:
found = re.match(r"(^[^<=>~]*)([^<=>~]?.*)$", dependency)
if found:
package = found.group(1)
version_required = found.group(2)
if version_required != "":
version_required = f"`{version_required}`" if markdown else f'``{version_required}``'
table_data.append((f"`{package}`" if markdown else f"``{package}``", version_required))
else:
table_data.append((dependency, ""))
return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
def convert_cross_package_dependencies_to_table(
cross_package_dependencies: List[str],
markdown: bool = True,
) -> str:
"""
Converts cross-package dependencies to a Markdown table
:param cross_package_dependencies: list of cross-package dependencies
:param markdown: if True, Markdown format is used else rst
:return: formatted table
"""
from tabulate import tabulate
headers = ["Dependent package", "Extra"]
table_data = []
prefix = "apache-airflow-providers-"
base_url = "https://airflow.apache.org/docs/"
for dependency in cross_package_dependencies:
pip_package_name = f"{prefix}{dependency.replace('.','-')}"
url_suffix = f"{dependency.replace('.','-')}"
if markdown:
url = f"[{pip_package_name}]({base_url}{url_suffix})"
else:
url = f"`{pip_package_name} <{base_url}{prefix}{url_suffix}>`_"
table_data.append((url, f"`{dependency}`" if markdown else f"``{dependency}``"))
return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
LICENCE = """<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
"""
LICENCE_RST = """
.. Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
.. http://www.apache.org/licenses/LICENSE-2.0
.. Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
"""
Keeps information about historical releases.
"""
ReleaseInfo = collections.namedtuple(
"ReleaseInfo", "release_version release_version_no_leading_zeros last_commit_hash content file_name"
)
def strip_leading_zeros(version: str) -> str:
"""
Strips leading zeros from version number.
This converts 1974.04.03 to 1974.4.3 as the format with leading month and day zeros is not accepted
by PIP versioning.
:param version: version number in CALVER format (potentially with leading 0s in date and month)
:return: string with leading 0s after dot replaced.
"""
return ".".join(str(int(i)) for i in version.split("."))
def get_previous_release_info(
previous_release_version: Optional[str], past_releases: List[ReleaseInfo], current_release_version: str
) -> Optional[str]:
"""
Find previous release. In case we are re-running current release we assume that last release was
the previous one. This is needed so that we can generate list of changes since the previous release.
:param previous_release_version: known last release version
:param past_releases: list of past releases
:param current_release_version: release that we are working on currently
:return:
"""
previous_release = None
if previous_release_version == current_release_version:
# Re-running for current release - use previous release as base for git log
if len(past_releases) > 1:
previous_release = past_releases[1].last_commit_hash
else:
previous_release = past_releases[0].last_commit_hash if past_releases else None
return previous_release
def check_if_release_version_ok(
past_releases: List[ReleaseInfo],
current_release_version: str,
) -> Tuple[str, Optional[str]]:
"""
Check if the release version passed is not later than the last release version
:param past_releases: all past releases (if there are any)
:param current_release_version: release version to check
:return: Tuple of current/previous_release (previous might be None if there are no releases)
"""
previous_release_version = past_releases[0].release_version if past_releases else None
if current_release_version == '':
if previous_release_version:
current_release_version = previous_release_version
else:
current_release_version = (datetime.today() + timedelta(days=5)).strftime('%Y.%m.%d')
if previous_release_version:
if Version(current_release_version) < Version(previous_release_version):
console.print(
f"[red]The release {current_release_version} must be not less than "
f"{previous_release_version} - last release for the package[/]"
)
raise Exception("Bad release version")
return current_release_version, previous_release_version
def get_cross_provider_dependent_packages(provider_package_id: str) -> List[str]:
"""
Returns cross-provider dependencies for the package.
:param provider_package_id: package id
:return: list of cross-provider dependencies
"""
with open(os.path.join(PROVIDERS_PATH, "dependencies.json")) as dependencies_file:
dependent_packages = json.load(dependencies_file).get(provider_package_id) or []
return dependent_packages
def make_sure_remote_apache_exists_and_fetch(git_update: bool, verbose: bool):
"""
Make sure that apache remote exist in git. We need to take a log from the apache
repository - not locally.
Also, the local repo might be shallow, so we need to un-shallow it.
This will:
* check if the remote exists and add if it does not
* check if the local repo is shallow, mark it to un-shallow in this case
* fetch from the remote including all tags and overriding local tags in case they are set differently
:param git_update: If the git remote already exists, should we try to update it
:param verbose: print verbose messages while fetching
"""
try:
check_remote_command = ["git", "remote", "get-url", HTTPS_REMOTE]
if verbose:
console.print(f"Running command: '{' '.join(check_remote_command)}'")
subprocess.check_call(
check_remote_command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Remote already exists, don't update it again!
if not git_update:
return
except subprocess.CalledProcessError as ex:
if ex.returncode == 128 or ex.returncode == 2:
remote_add_command = [
"git",
"remote",
"add",
HTTPS_REMOTE,
"https://github.com/apache/airflow.git",
]
if verbose:
console.print(f"Running command: '{' '.join(remote_add_command)}'")
try:
subprocess.check_output(
remote_add_command,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as ex:
console.print("[red]Error: when adding remote:[/]", ex)
sys.exit(128)
else:
raise
if verbose:
console.print("Fetching full history and tags from remote. ")
console.print("This might override your local tags!")
is_shallow_repo = (
subprocess.check_output(["git", "rev-parse", "--is-shallow-repository"], stderr=subprocess.DEVNULL)
== 'true'
)
fetch_command = ["git", "fetch", "--tags", "--force", HTTPS_REMOTE]
if is_shallow_repo:
if verbose:
console.print(
"This will also un-shallow the repository, "
"making all history available and increasing storage!"
)
fetch_command.append("--unshallow")
if verbose:
console.print(f"Running command: '{' '.join(fetch_command)}'")
subprocess.check_call(
fetch_command,
stderr=subprocess.DEVNULL,
)
def get_git_log_command(
verbose: bool, from_commit: Optional[str] = None, to_commit: Optional[str] = None
) -> List[str]:
"""
Get git command to run for the current repo from the current folder (which is the package folder).
:param verbose: whether to print verbose info while getting the command
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
"""
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
git_cmd.extend(['--', '.'])
if verbose:
console.print(f"Command to run: '{' '.join(git_cmd)}'")
return git_cmd
def get_git_tag_check_command(tag: str) -> List[str]:
"""
Get git command to check if tag exits.
:param tag: Tag to check
:return: git command to run
"""
return [
"git",
"rev-parse",
tag,
]
def get_source_package_path(provider_package_id: str) -> str:
"""
Retrieves source package path from package id.
:param provider_package_id: id of the package
:return: path of the providers folder
"""
return os.path.join(PROVIDERS_PATH, *provider_package_id.split("."))
def get_documentation_package_path(provider_package_id: str) -> str:
"""
Retrieves documentation package path from package id.
:param provider_package_id: id of the package
:return: path of the documentation folder
"""
return os.path.join(
DOCUMENTATION_PATH, f"apache-airflow-providers-{provider_package_id.replace('.','-')}"
)
def get_generated_package_path(provider_package_id: str) -> str:
"""
Retrieves generated package path from package id.
:param provider_package_id: id of the package
:return: path of the providers folder
"""
provider_package_path = os.path.join(GENERATED_PROVIDERS_PATH, *provider_package_id.split("."))
return provider_package_path
def get_additional_package_info(provider_package_path: str) -> str:
"""
Returns additional info for the package.
:param provider_package_path: path for the package
:return: additional information for the path (empty string if missing)
"""
additional_info_file_path = os.path.join(provider_package_path, "ADDITIONAL_INFO.md")
if os.path.isfile(additional_info_file_path):
with open(additional_info_file_path) as additional_info_file:
additional_info = additional_info_file.read()
additional_info_lines = additional_info.splitlines(keepends=True)
result = ""
skip_comment = True
for line in additional_info_lines:
if line.startswith(" -->"):
skip_comment = False
continue
if not skip_comment:
result += line
return result
return ""
def is_camel_case_with_acronyms(s: str):
"""
Checks if the string passed is Camel Case (with capitalised acronyms allowed).
:param s: string to check
:return: true if the name looks cool as Class name.
"""
return s != s.lower() and s != s.upper() and "_" not in s and s[0].upper() == s[0]
def check_if_classes_are_properly_named(
entity_summary: Dict[EntityType, EntityTypeSummary]
) -> Tuple[int, int]:
"""
Check if all entities in the dictionary are named properly. It prints names at the output
and returns the status of class names.
:param entity_summary: dictionary of class names to check, grouped by types.
:return: Tuple of 2 ints = total number of entities and number of badly named entities
"""
total_class_number = 0
badly_named_class_number = 0
for entity_type, class_suffix in EXPECTED_SUFFIXES.items():
for class_full_name in entity_summary[entity_type].entities:
_, class_name = class_full_name.rsplit(".", maxsplit=1)
error_encountered = False
if not is_camel_case_with_acronyms(class_name):
console.print(
f"[red]The class {class_full_name} is wrongly named. The "
f"class name should be CamelCaseWithACRONYMS ![/]"
)
error_encountered = True
if not class_name.endswith(class_suffix):
console.print(
f"[red]The class {class_full_name} is wrongly named. It is one of the {entity_type.value}"
f" so it should end with {class_suffix}[/]"
)
error_encountered = True
total_class_number += 1
if error_encountered:
badly_named_class_number += 1
return total_class_number, badly_named_class_number
def get_package_pip_name(provider_package_id: str):
return f"apache-airflow-providers-{provider_package_id.replace('.', '-')}"
def validate_provider_info_with_runtime_schema(provider_info: Dict[str, Any]) -> None:
"""
Validates provider info against the runtime schema. This way we check if the provider info in the
packages is future-compatible. The Runtime Schema should only change when there is a major version
change.
:param provider_info: provider info to validate
"""
with open(PROVIDER_RUNTIME_DATA_SCHEMA_PATH) as schema_file:
schema = json.load(schema_file)
try:
jsonschema.validate(provider_info, schema=schema)
except jsonschema.ValidationError as ex:
console.print("[red]Provider info not validated against runtime schema[/]")
raise Exception(
"Error when validating schema. The schema must be compatible with "
"airflow/provider_info.schema.json.",
ex,
)
def get_provider_yaml(provider_package_id: str) -> Dict[str, Any]:
"""
Retrieves provider info from the provider yaml file. The provider yaml file contains more information
than provider_info that is used at runtime. This method converts the full provider yaml file into
stripped-down provider info and validates it against deprecated 2.0.0 schema and runtime schema.
:param provider_package_id: package id to retrieve provider.yaml from
:return: provider_info dictionary
"""
provider_yaml_file_name = os.path.join(get_source_package_path(provider_package_id), "provider.yaml")
if not os.path.exists(provider_yaml_file_name):
raise Exception(f"The provider.yaml file is missing: {provider_yaml_file_name}")
with open(provider_yaml_file_name) as provider_file:
provider_yaml_dict = safe_load(provider_file)
return provider_yaml_dict
def get_provider_info_from_provider_yaml(provider_package_id: str) -> Dict[str, Any]:
"""
Retrieves provider info from the provider yaml file.
:param provider_package_id: package id to retrieve provider.yaml from
:return: provider_info dictionary
"""
provider_yaml_dict = get_provider_yaml(provider_package_id=provider_package_id)
validate_provider_info_with_runtime_schema(provider_yaml_dict)
return provider_yaml_dict
def get_version_tag(version: str, provider_package_id: str, version_suffix: str = ''):
if version_suffix is None:
version_suffix = ''
return f"providers-{provider_package_id.replace('.','-')}/{version}{version_suffix}"
def print_changes_table(changes_table):
syntax = Syntax(changes_table, "rst", theme="ansi_dark")
console.print(syntax)
def get_all_changes_for_package(
versions: List[str],
provider_package_id: str,
source_provider_package_path: str,
verbose: bool,
) -> Tuple[bool, Optional[Union[List[List[Change]], Change]], str]:
"""
Retrieves all changes for the package.
:param versions: list of versions
:param provider_package_id: provider package id
:param source_provider_package_path: path where package is located
:param verbose: whether to print verbose messages
"""
current_version = versions[0]
current_tag_no_suffix = get_version_tag(current_version, provider_package_id)
if verbose:
console.print(f"Checking if tag '{current_tag_no_suffix}' exist.")
if not subprocess.call(
get_git_tag_check_command(current_tag_no_suffix),
cwd=source_provider_package_path,
stderr=subprocess.DEVNULL,
):
if verbose:
console.print(f"The tag {current_tag_no_suffix} exists.")
# The tag already exists
changes = subprocess.check_output(
get_git_log_command(verbose, HEAD_OF_HTTPS_REMOTE, current_tag_no_suffix),
cwd=source_provider_package_path,
universal_newlines=True,
)
if changes:
provider_details = get_provider_details(provider_package_id)
doc_only_change_file = os.path.join(
provider_details.source_provider_package_path, ".latest-doc-only-change.txt"
)
if os.path.exists(doc_only_change_file):
with open(doc_only_change_file) as f:
last_doc_only_hash = f.read().strip()
try:
changes_since_last_doc_only_check = subprocess.check_output(
get_git_log_command(verbose, HEAD_OF_HTTPS_REMOTE, last_doc_only_hash),
cwd=source_provider_package_path,
universal_newlines=True,
)
if not changes_since_last_doc_only_check:
console.print()
console.print(
"[yellow]The provider has doc-only changes since the last release. Skipping[/]"
)
# Returns 66 in case of doc-only changes
sys.exit(66)
if len(changes) > len(changes_since_last_doc_only_check):
# if doc-only was released after previous release - use it as starting point
# but if before - stay with the releases from last tag.
changes = changes_since_last_doc_only_check
except subprocess.CalledProcessError:
# ignore when the commit mentioned as last doc-only change is obsolete
pass
console.print(f"[yellow]The provider {provider_package_id} has changes since last release[/]")
console.print()
console.print(
"[yellow]Please update version in "
f"'airflow/providers/{provider_package_id.replace('-','/')}/'"
"provider.yaml'[/]\n"
)
console.print("[yellow]Or mark the changes as doc-only[/]")
changes_table, array_of_changes = convert_git_changes_to_table(
"UNKNOWN",
changes,
base_url="https://github.com/apache/airflow/commit/",
markdown=False,
)
print_changes_table(changes_table)
return False, array_of_changes[0], changes_table
else:
console.print(f"No changes for {provider_package_id}")
return False, None, ""
if verbose:
console.print("The tag does not exist. ")
if len(versions) == 1:
console.print(
f"The provider '{provider_package_id}' has never been released but it is ready to release!\n"
)
else:
console.print(f"New version of the '{provider_package_id}' package is ready to be released!\n")
next_version_tag = HEAD_OF_HTTPS_REMOTE
changes_table = ''
current_version = versions[0]
list_of_list_of_changes: List[List[Change]] = []
for version in versions[1:]:
version_tag = get_version_tag(version, provider_package_id)
changes = subprocess.check_output(
get_git_log_command(verbose, next_version_tag, version_tag),
cwd=source_provider_package_path,
universal_newlines=True,
)
changes_table_for_version, array_of_changes_for_version = convert_git_changes_to_table(
current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False
)
changes_table += changes_table_for_version
list_of_list_of_changes.append(array_of_changes_for_version)
next_version_tag = version_tag
current_version = version
changes = subprocess.check_output(
get_git_log_command(verbose, next_version_tag),
cwd=source_provider_package_path,
universal_newlines=True,
)
changes_table_for_version, array_of_changes_for_version = convert_git_changes_to_table(
current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False
)
changes_table += changes_table_for_version
if verbose:
print_changes_table(changes_table)
return True, list_of_list_of_changes if len(list_of_list_of_changes) > 0 else None, changes_table
def get_provider_details(provider_package_id: str) -> ProviderPackageDetails:
provider_info = get_provider_info_from_provider_yaml(provider_package_id)
return ProviderPackageDetails(
provider_package_id=provider_package_id,
full_package_name=f"airflow.providers.{provider_package_id}",
pypi_package_name=f"apache-airflow-providers-{provider_package_id.replace('.', '-')}",
source_provider_package_path=get_source_package_path(provider_package_id),
documentation_provider_package_path=get_documentation_package_path(provider_package_id),
provider_description=provider_info['description'],
versions=provider_info['versions'],
excluded_python_versions=provider_info.get("excluded-python-versions") or [],
)
def get_provider_requirements(provider_package_id: str) -> List[str]:
provider_yaml = get_provider_yaml(provider_package_id)
requirements = (
provider_yaml['additional-dependencies'].copy() if 'additional-dependencies' in provider_yaml else []
)
requirements.extend(PROVIDERS_REQUIREMENTS[provider_package_id])
return requirements
def get_provider_jinja_context(
provider_info: Dict[str, Any],
provider_details: ProviderPackageDetails,
current_release_version: str,
version_suffix: str,
):
verify_provider_package(provider_details.provider_package_id)
changelog_path = verify_changelog_exists(provider_details.provider_package_id)
cross_providers_dependencies = get_cross_provider_dependent_packages(
provider_package_id=provider_details.provider_package_id
)
release_version_no_leading_zeros = strip_leading_zeros(current_release_version)
pip_requirements_table = convert_pip_requirements_to_table(
get_provider_requirements(provider_details.provider_package_id)
)
pip_requirements_table_rst = convert_pip_requirements_to_table(
get_provider_requirements(provider_details.provider_package_id), markdown=False
)
cross_providers_dependencies_table = convert_cross_package_dependencies_to_table(
cross_providers_dependencies
)
cross_providers_dependencies_table_rst = convert_cross_package_dependencies_to_table(
cross_providers_dependencies, markdown=False
)
with open(changelog_path) as changelog_file:
changelog = changelog_file.read()
supported_python_versions = [
p for p in ALL_PYTHON_VERSIONS if p not in provider_details.excluded_python_versions
]
python_requires = "~=3.6"
for p in provider_details.excluded_python_versions:
python_requires += f", !={p}"
context: Dict[str, Any] = {
"ENTITY_TYPES": list(EntityType),
"README_FILE": "README.rst",
"PROVIDER_PACKAGE_ID": provider_details.provider_package_id,
"PACKAGE_PIP_NAME": get_pip_package_name(provider_details.provider_package_id),
"PACKAGE_WHEEL_NAME": get_wheel_package_name(provider_details.provider_package_id),
"FULL_PACKAGE_NAME": provider_details.full_package_name,
"PROVIDER_PATH": provider_details.full_package_name.replace(".", "/"),
"RELEASE": current_release_version,
"RELEASE_NO_LEADING_ZEROS": release_version_no_leading_zeros,
"VERSION_SUFFIX": version_suffix or '',
"ADDITIONAL_INFO": get_additional_package_info(
provider_package_path=provider_details.source_provider_package_path
),
"CROSS_PROVIDERS_DEPENDENCIES": cross_providers_dependencies,
"PIP_REQUIREMENTS": PROVIDERS_REQUIREMENTS[provider_details.provider_package_id],
"PROVIDER_TYPE": "Provider",
"PROVIDERS_FOLDER": "providers",
"PROVIDER_DESCRIPTION": provider_details.provider_description,
"INSTALL_REQUIREMENTS": get_install_requirements(
provider_package_id=provider_details.provider_package_id, version_suffix=version_suffix
),
"SETUP_REQUIREMENTS": get_setup_requirements(),
"EXTRAS_REQUIREMENTS": get_package_extras(provider_package_id=provider_details.provider_package_id),
"CROSS_PROVIDERS_DEPENDENCIES_TABLE": cross_providers_dependencies_table,
"CROSS_PROVIDERS_DEPENDENCIES_TABLE_RST": cross_providers_dependencies_table_rst,
"PIP_REQUIREMENTS_TABLE": pip_requirements_table,
"PIP_REQUIREMENTS_TABLE_RST": pip_requirements_table_rst,
"PROVIDER_INFO": provider_info,
"CHANGELOG_RELATIVE_PATH": relpath(
provider_details.source_provider_package_path,
provider_details.documentation_provider_package_path,
),
"CHANGELOG": changelog,
"SUPPORTED_PYTHON_VERSIONS": supported_python_versions,
"PYTHON_REQUIRES": python_requires,
}
return context
def prepare_readme_file(context):
readme_content = LICENCE_RST + render_template(
template_name="PROVIDER_README", context=context, extension=".rst"
)
readme_file_path = os.path.join(TARGET_PROVIDER_PACKAGES_PATH, "README.rst")
with open(readme_file_path, "wt") as readme_file:
readme_file.write(readme_content)
def confirm(message: str, answer: Optional[str] = None) -> bool:
"""
Ask user to confirm (case-insensitive).
:param message: message to display
:param answer: force answer if set
:return: True if the answer is any form of y/yes. Exits with 65 exit code if any form of q/quit is chosen.
"""
given_answer = answer.lower() if answer is not None else ""
while given_answer not in ["y", "n", "q", "yes", "no", "quit"]:
console.print(f"[yellow]{message}[y/n/q]?[/] ", end='')
given_answer = input("").lower()
if given_answer.lower() in ["q", "quit"]:
# Returns 65 in case user decided to quit
sys.exit(65)
return given_answer in ["y", "yes"]
def mark_latest_changes_as_documentation_only(
provider_details: ProviderPackageDetails, latest_change: Change
):
console.print(
f"Marking last change: {latest_change.short_hash} and all above changes since the last release "
"as doc-only changes!"
)
with open(
os.path.join(provider_details.source_provider_package_path, ".latest-doc-only-change.txt"), "tw"
) as f:
f.write(latest_change.full_hash + "\n")
# exit code 66 marks doc-only change marked
sys.exit(66)
def update_release_notes(
provider_package_id: str,
version_suffix: str,
force: bool,
verbose: bool,
answer: Optional[str],
) -> bool:
"""
Updates generated files (readme, changes and/or setup.cfg/setup.py/manifest.in/provider_info)
:param provider_package_id: id of the package
:param version_suffix: version suffix corresponding to the version in the code
:param force: regenerate already released documentation
:param verbose: whether to print verbose messages
:param answer: force answer to questions if set.
:returns False if the package should be skipped, True if everything generated properly
"""
verify_provider_package(provider_package_id)
provider_details = get_provider_details(provider_package_id)
provider_info = get_provider_info_from_provider_yaml(provider_package_id)
current_release_version = provider_details.versions[0]
jinja_context = get_provider_jinja_context(
provider_info=provider_info,
provider_details=provider_details,
current_release_version=current_release_version,
version_suffix=version_suffix,
)
proceed, latest_change, changes = get_all_changes_for_package(
provider_details.versions,
provider_package_id,
provider_details.source_provider_package_path,
verbose,
)
if not force:
if proceed:
if not confirm("Provider marked for release. Proceed", answer=answer):
return False
elif not latest_change:
console.print()
console.print(
f"[yellow]Provider: {provider_package_id} - skipping documentation generation. No changes![/]"
)
console.print()
return False
else:
if confirm("Are those changes documentation-only?", answer=answer):
if isinstance(latest_change, Change):
mark_latest_changes_as_documentation_only(provider_details, latest_change)
else:
raise ValueError(
"Expected only one change to be present to mark changes "
f"in provider {provider_package_id} as docs-only. "
f"Received {len(latest_change)}."
)
return False
jinja_context["DETAILED_CHANGES_RST"] = changes
jinja_context["DETAILED_CHANGES_PRESENT"] = len(changes) > 0
update_commits_rst(
jinja_context, provider_package_id, provider_details.documentation_provider_package_path
)
return True
def update_setup_files(
provider_package_id: str,
version_suffix: str,
):
"""
Updates generated setup.cfg/setup.py/manifest.in/provider_info for packages
:param provider_package_id: id of the package
:param version_suffix: version suffix corresponding to the version in the code
:returns False if the package should be skipped, True if everything generated properly
"""
verify_provider_package(provider_package_id)
provider_details = get_provider_details(provider_package_id)
provider_info = get_provider_info_from_provider_yaml(provider_package_id)
current_release_version = provider_details.versions[0]
jinja_context = get_provider_jinja_context(
provider_info=provider_info,
provider_details=provider_details,
current_release_version=current_release_version,
version_suffix=version_suffix,
)
console.print()
console.print(f"Generating setup files for {provider_package_id}")
console.print()
prepare_setup_py_file(jinja_context)
prepare_setup_cfg_file(jinja_context)
prepare_get_provider_info_py_file(jinja_context, provider_package_id)
prepare_manifest_in_file(jinja_context)
prepare_readme_file(jinja_context)
return True
def replace_content(file_path, old_text, new_text, provider_package_id):
if new_text != old_text:
_, temp_file_path = tempfile.mkstemp()
try:
if os.path.isfile(file_path):
copyfile(file_path, temp_file_path)
with open(file_path, "wt") as readme_file:
readme_file.write(new_text)
console.print()
console.print(f"Generated {file_path} file for the {provider_package_id} provider")
console.print()
if old_text != "":
subprocess.call(["diff", "--color=always", temp_file_path, file_path])
finally:
os.remove(temp_file_path)
AUTOMATICALLY_GENERATED_MARKER = "AUTOMATICALLY GENERATED"
AUTOMATICALLY_GENERATED_CONTENT = (
f".. THE REMAINDER OF THE FILE IS {AUTOMATICALLY_GENERATED_MARKER}. "
f"IT WILL BE OVERWRITTEN AT RELEASE TIME!"
)
def update_index_rst(
context,
provider_package_id,
target_path,
):
index_update = render_template(
template_name="PROVIDER_INDEX", context=context, extension='.rst', keep_trailing_newline=True
)
index_file_path = os.path.join(target_path, "index.rst")
old_text = ""
if os.path.isfile(index_file_path):
with open(index_file_path) as readme_file_read:
old_text = readme_file_read.read()
new_text = deepcopy(old_text)
lines = old_text.splitlines(keepends=False)
for index, line in enumerate(lines):
if AUTOMATICALLY_GENERATED_MARKER in line:
new_text = "\n".join(lines[:index])
new_text += "\n" + AUTOMATICALLY_GENERATED_CONTENT + "\n"
new_text += index_update
replace_content(index_file_path, old_text, new_text, provider_package_id)
def update_commits_rst(
context,
provider_package_id,
target_path,
):
new_text = render_template(
template_name="PROVIDER_COMMITS", context=context, extension='.rst', keep_trailing_newline=True
)
index_file_path = os.path.join(target_path, "commits.rst")
old_text = ""
if os.path.isfile(index_file_path):
with open(index_file_path) as readme_file_read:
old_text = readme_file_read.read()
replace_content(index_file_path, old_text, new_text, provider_package_id)
@lru_cache(maxsize=None)
def black_mode():
from black import Mode, parse_pyproject_toml, target_version_option_callback
config = parse_pyproject_toml(os.path.join(SOURCE_DIR_PATH, "pyproject.toml"))
target_versions = set(
target_version_option_callback(None, None, tuple(config.get('target_version', ()))),
)
return Mode(
target_versions=target_versions,
line_length=config.get('line_length', Mode.line_length),
is_pyi=bool(config.get('is_pyi', Mode.is_pyi)),
string_normalization=not bool(config.get('skip_string_normalization', not Mode.string_normalization)),
experimental_string_processing=bool(
config.get('experimental_string_processing', Mode.experimental_string_processing)
),
)
def black_format(content) -> str:
from black import format_str
return format_str(content, mode=black_mode())
def prepare_setup_py_file(context):
setup_py_template_name = "SETUP"
setup_py_file_path = os.path.abspath(os.path.join(get_target_folder(), "setup.py"))
setup_py_content = render_template(
template_name=setup_py_template_name, context=context, extension='.py', autoescape=False
)
with open(setup_py_file_path, "wt") as setup_py_file:
setup_py_file.write(black_format(setup_py_content))
def prepare_setup_cfg_file(context):
setup_cfg_template_name = "SETUP"
setup_cfg_file_path = os.path.abspath(os.path.join(get_target_folder(), "setup.cfg"))
setup_cfg_content = render_template(
template_name=setup_cfg_template_name,
context=context,
extension='.cfg',
autoescape=False,
keep_trailing_newline=True,
)
with open(setup_cfg_file_path, "wt") as setup_cfg_file:
setup_cfg_file.write(setup_cfg_content)
def prepare_get_provider_info_py_file(context, provider_package_id: str):
get_provider_template_name = "get_provider_info"
get_provider_file_path = os.path.abspath(
os.path.join(
get_target_providers_package_folder(provider_package_id),
"get_provider_info.py",
)
)
get_provider_content = render_template(
template_name=get_provider_template_name,
context=context,
extension='.py',
autoescape=False,
keep_trailing_newline=True,
)
with open(get_provider_file_path, "wt") as get_provider_file:
get_provider_file.write(black_format(get_provider_content))
def prepare_manifest_in_file(context):
target = os.path.abspath(os.path.join(get_target_folder(), "MANIFEST.in"))
content = render_template(
template_name="MANIFEST",
context=context,
extension='.in',
autoescape=False,
keep_trailing_newline=True,
)
with open(target, "wt") as fh:
fh.write(content)
def get_all_providers() -> List[str]:
"""
Returns all providers for regular packages.
:return: list of providers that are considered for provider packages
"""
return list(PROVIDERS_REQUIREMENTS.keys())
def verify_provider_package(provider_package_id: str) -> None:
"""
Verifies if the provider package is good.
:param provider_package_id: package id to verify
:return: None
"""
if provider_package_id not in get_provider_packages():
console.print(f"[red]Wrong package name: {provider_package_id}[/]")
console.print("Use one of:")
console.print(get_provider_packages())
raise Exception(f"The package {provider_package_id} is not a provider package.")
def verify_changelog_exists(package: str) -> str:
provider_details = get_provider_details(package)
changelog_path = os.path.join(provider_details.source_provider_package_path, "CHANGELOG.rst")
if not os.path.isfile(changelog_path):
console.print(f"[red]ERROR: Missing ${changelog_path}[/]")
console.print("Please add the file with initial content:")
console.print()
syntax = Syntax(
INITIAL_CHANGELOG_CONTENT,
"rst",
theme="ansi_dark",
)
console.print(syntax)
console.print()
raise Exception(f"Missing {changelog_path}")
return changelog_path
@cli.command()
def list_providers_packages():
"""List all provider packages."""
providers = get_all_providers()
for provider in providers:
console.print(provider)
@cli.command()
@option_version_suffix
@option_git_update
@argument_package_id
@option_force
@option_verbose
@click.option(
"-a",
"--answer",
type=click.Choice(['y', 'n', 'q', 'yes', 'no', 'quit']),
help="Force answer to questions.",
envvar='ANSWER',
)
def update_package_documentation(
version_suffix: str,
git_update: bool,
answer: Optional[str],
package_id: str,
force: bool,
verbose: bool,
):
"""
Updates package documentation.
See `list-providers-packages` subcommand for the possible PACKAGE_ID values
"""
provider_package_id = package_id
verify_provider_package(provider_package_id)
with with_group(f"Update release notes for package '{provider_package_id}' "):
console.print("Updating documentation for the latest release version.")
make_sure_remote_apache_exists_and_fetch(git_update, verbose)
if not update_release_notes(
provider_package_id, version_suffix, force=force, verbose=verbose, answer=answer
):
# Returns 64 in case of skipped package
sys.exit(64)
def tag_exists_for_version(provider_package_id: str, current_tag: str, verbose: bool):
provider_details = get_provider_details(provider_package_id)
if verbose:
console.print(f"Checking if tag `{current_tag}` exists.")
if not subprocess.call(
get_git_tag_check_command(current_tag),
cwd=provider_details.source_provider_package_path,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
):
if verbose:
console.print(f"Tag `{current_tag}` exists.")
return True
if verbose:
console.print(f"Tag `{current_tag}` does not exist.")
return False
@cli.command()
@option_version_suffix
@option_git_update
@argument_package_id
@option_verbose
@option_skip_tag_check
def generate_setup_files(
version_suffix: str, git_update: bool, package_id: str, verbose: bool, skip_tag_check: bool
):
"""
Generates setup files for the package.
See `list-providers-packages` subcommand for the possible PACKAGE_ID values
"""
provider_package_id = package_id
with with_group(f"Generate setup files for '{provider_package_id}'"):
if not skip_tag_check:
current_tag = get_current_tag(provider_package_id, version_suffix, git_update, verbose)
if tag_exists_for_version(provider_package_id, current_tag, verbose):
console.print(f"[yellow]The tag {current_tag} exists. Not preparing the package.[/]")
# Returns 1 in case of skipped package
sys.exit(1)
if update_setup_files(provider_package_id, version_suffix):
console.print(f"[green]Generated regular package setup files for {provider_package_id}[/]")
else:
# Returns 64 in case of skipped package
sys.exit(64)
def get_current_tag(provider_package_id: str, suffix: str, git_update: bool, verbose: bool):
verify_provider_package(provider_package_id)
make_sure_remote_apache_exists_and_fetch(git_update, verbose)
provider_info = get_provider_info_from_provider_yaml(provider_package_id)
versions: List[str] = provider_info['versions']
current_version = versions[0]
current_tag = get_version_tag(current_version, provider_package_id, suffix)
return current_tag
def cleanup_remnants(verbose: bool):
if verbose:
console.print("Cleaning remnants")
files = glob.glob("*.egg-info")
for file in files:
shutil.rmtree(file, ignore_errors=True)
files = glob.glob("build")
for file in files:
shutil.rmtree(file, ignore_errors=True)
def verify_setup_cfg_prepared(provider_package):
with open("setup.cfg") as f:
setup_content = f.read()
search_for = f"providers-{provider_package.replace('.','-')} for Apache Airflow"
if search_for not in setup_content:
console.print(
f"[red]The setup.py is probably prepared for another package. "
f"It does not contain [bold]{search_for}[/bold]![/]"
)
console.print(
f"\nRun:\n\n[bold]./dev/provider_packages/prepare_provider_packages.py "
f"generate-setup-files {provider_package}[/bold]\n"
)
raise Exception("Wrong setup!")
@cli.command()
@click.option(
'--package-format',
type=click.Choice(['sdist', 'wheel', 'both']),
default='wheel',
help='Optional format - only used in case of building packages (default: wheel)',
)
@option_git_update
@option_version_suffix
@argument_package_id
@option_verbose
@option_skip_tag_check
def build_provider_packages(
package_format: str,
git_update: bool,
version_suffix: str,
package_id: str,
verbose: bool,
skip_tag_check: bool,
):
"""
Builds provider package.
See `list-providers-packages` subcommand for the possible PACKAGE_ID values
"""
import tempfile
# we cannot use context managers because if the directory gets deleted (which bdist_wheel does),
# the context manager will throw an exception when trying to delete it again
tmp_build_dir = tempfile.TemporaryDirectory().name
tmp_dist_dir = tempfile.TemporaryDirectory().name
try:
provider_package_id = package_id
with with_group(f"Prepare provider package for '{provider_package_id}'"):
if not skip_tag_check and (version_suffix.startswith("rc") or version_suffix == ""):
# For RC and official releases we check if the "officially released" version exists
# and skip the released if it was. This allows to skip packages that have not been
# marked for release. For "dev" suffixes, we always build all packages
released_tag = get_current_tag(provider_package_id, "", git_update, verbose)
if tag_exists_for_version(provider_package_id, released_tag, verbose):
console.print(f"[yellow]The tag {released_tag} exists. Skipping the package.[/]")
return False
console.print(f"Changing directory to {TARGET_PROVIDER_PACKAGES_PATH}")
os.chdir(TARGET_PROVIDER_PACKAGES_PATH)
cleanup_remnants(verbose)
provider_package = package_id
verify_setup_cfg_prepared(provider_package)
console.print(f"Building provider package: {provider_package} in format {package_format}")
command = ["python3", "setup.py", "build", "--build-temp", tmp_build_dir]
if version_suffix is not None:
command.extend(['egg_info', '--tag-build', version_suffix])
if package_format in ['sdist', 'both']:
command.append("sdist")
if package_format in ['wheel', 'both']:
command.extend(["bdist_wheel", "--bdist-dir", tmp_dist_dir])
console.print(f"Executing command: '{' '.join(command)}'")
try:
subprocess.check_call(command, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError as ex:
console.print(ex.output.decode())
raise Exception("The command returned an error %s", command)
console.print(
f"[green]Prepared provider package {provider_package} in format {package_format}[/]"
)
finally:
shutil.rmtree(tmp_build_dir, ignore_errors=True)
shutil.rmtree(tmp_dist_dir, ignore_errors=True)
def verify_provider_classes_for_single_provider(imported_classes: List[str], provider_package_id: str):
"""Verify naming of provider classes for single provider."""
full_package_name = f"airflow.providers.{provider_package_id}"
entity_summaries = get_package_class_summary(full_package_name, imported_classes)
total, bad = check_if_classes_are_properly_named(entity_summaries)
bad += sum(len(entity_summary.wrong_entities) for entity_summary in entity_summaries.values())
if bad != 0:
console.print()
console.print(f"[red]There are {bad} errors of {total} entities for {provider_package_id}[/]")
console.print()
return total, bad
def summarise_total_vs_bad_and_warnings(total: int, bad: int, warns: List[warnings.WarningMessage]) -> bool:
"""Summarises Bad/Good class names for providers and warnings"""
raise_error = False
if bad == 0:
console.print()
console.print(f"[green]OK: All {total} entities are properly named[/]")
console.print()
console.print("Totals:")
console.print()
for entity in EntityType:
console.print(f"{entity.value}: {TOTALS[entity]}")
console.print()
else:
console.print()
console.print(
f"[red]ERROR! There are in total: {bad} entities badly named out of {total} entities[/]"
)
console.print()
raise_error = True
if warns:
if os.environ.get('GITHUB_ACTIONS'):
# Ends group in GitHub Actions so that the errors are immediately visible in CI log
console.print("::endgroup::")
console.print()
console.print("[red]Unknown warnings generated:[/]")
console.print()
for w in warns:
one_line_message = str(w.message).replace('\n', ' ')
console.print(f"{w.filename}:{w.lineno}:[yellow]{one_line_message}[/]")
console.print()
console.print(f"[red]ERROR! There were {len(warns)} warnings generated during the import[/]")
console.print()
console.print("[yellow]Ideally, fix it, so that no warnings are generated during import.[/]")
console.print("[yellow]There are three cases that are legitimate deprecation warnings though:[/]")
console.print("[yellow] 1) when you deprecate whole module or class and replace it in provider[/]")
console.print("[yellow] 2) when 3rd-party module generates Deprecation and you cannot upgrade it[/]")
console.print(
"[yellow] 3) when many 3rd-party module generates same Deprecation warning that "
"comes from another common library[/]"
)
console.print()
console.print(
"[yellow]In case 1), add the deprecation message to "
"the KNOWN_DEPRECATED_DIRECT_IMPORTS in prepare_provider_packages.py[/]"
)
console.print(
"[yellow]In case 2), add the deprecation message together with module it generates to "
"the KNOWN_DEPRECATED_MESSAGES in prepare_provider_packages.py[/]"
)
console.print(
"[yellow]In case 3), add the deprecation message to "
"the KNOWN_COMMON_DEPRECATED_MESSAGES in prepare_provider_packages.py[/]"
)
console.print()
raise_error = True
else:
console.print()
console.print("[green]OK: No warnings generated[/]")
console.print()
if raise_error:
console.print("[red]Please fix the problems listed above [/]")
return False
return True
# The set of known deprecation messages that we know about.
# It contains tuples of "message" and the module that generates the warning - so when the
# Same warning is generated by different module, it is not treated as "known" warning.
KNOWN_DEPRECATED_MESSAGES: Set[Tuple[str, str]] = {
(
'This version of Apache Beam has not been sufficiently tested on Python 3.9. '
'You may encounter bugs or missing features.',
"apache_beam",
),
(
'This version of Apache Beam has not been sufficiently tested on Python 3.10. '
'You may encounter bugs or missing features.',
"apache_beam",
),
(
"Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since"
" Python 3.3,and in 3.9 it will stop working",
"apache_beam",
),
(
'pyarrow.HadoopFileSystem is deprecated as of 2.0.0, please use pyarrow.fs.HadoopFileSystem instead.',
"papermill",
),
(
"You have an incompatible version of 'pyarrow' installed (4.0.1), please install a version that "
"adheres to: 'pyarrow<3.1.0,>=3.0.0; extra == \"pandas\"'",
"apache_beam",
),
(
"You have an incompatible version of 'pyarrow' installed (4.0.1), please install a version that "
"adheres to: 'pyarrow<5.1.0,>=5.0.0; extra == \"pandas\"'",
"snowflake",
),
("dns.hash module will be removed in future versions. Please use hashlib instead.", "dns"),
("PKCS#7 support in pyOpenSSL is deprecated. You should use the APIs in cryptography.", "eventlet"),
("PKCS#12 support in pyOpenSSL is deprecated. You should use the APIs in cryptography.", "eventlet"),
(
"the imp module is deprecated in favour of importlib; see the module's documentation"
" for alternative uses",
"hdfs",
),
("This operator is deprecated. Please use `airflow.providers.tableau.operators.tableau`.", "salesforce"),
(
"You have an incompatible version of 'pyarrow' installed (4.0.1), please install a version that"
" adheres to: 'pyarrow<3.1.0,>=3.0.0; extra == \"pandas\"'",
"snowflake",
),
(
"You have an incompatible version of 'pyarrow' installed (6.0.1), please install a version that"
" adheres to: 'pyarrow<5.1.0,>=5.0.0; extra == \"pandas\"'",
"snowflake",
),
("SelectableGroups dict interface is deprecated. Use select.", "kombu"),
("The module cloudant is now deprecated. The replacement is ibmcloudant.", "cloudant"),
("This module is deprecated. Please use `airflow.operators.empty`.", "dbt"),
("This module is deprecated. Please use `airflow.operators.empty`.", "jdbc"),
("This module is deprecated. Please use `airflow.operators.empty`.", "azure"),
("This module is deprecated. Please use `airflow.operators.empty`.", "qubole"),
("This module is deprecated. Please use `airflow.operators.empty`.", "winrm"),
("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "dbt"),
("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "jdbc"),
("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "azure"),
("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "qubole"),
("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "winrm"),
}
KNOWN_COMMON_DEPRECATED_MESSAGES: Set[str] = {
"distutils Version classes are deprecated. Use packaging.version instead."
}
# The set of warning messages generated by direct importing of some deprecated modules. We should only
# ignore those messages when the warnings are generated directly by importlib - which means that
# we imported it directly during module walk by the importlib library
KNOWN_DEPRECATED_DIRECT_IMPORTS: Set[str] = {
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.batch`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.container_instance`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.container_registry`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.container_volume`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.cosmos`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.data_factory`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.data_lake`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.fileshare`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.operators.batch`.",
"This module is deprecated. "
"Please use `airflow.providers.microsoft.azure.operators.container_instances`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.operators.cosmos`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.secrets.key_vault`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.sensors.cosmos`.",
"This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.dynamodb`.",
"This module is deprecated. Please use `airflow.providers.microsoft.azure.transfers.local_to_wasb`.",
"This module is deprecated. Please use `airflow.providers.tableau.operators.tableau_refresh_workbook`.",
"This module is deprecated. Please use `airflow.providers.tableau.sensors.tableau_job_status`.",
"This module is deprecated. Please use `airflow.providers.tableau.hooks.tableau`.",
"This module is deprecated. Please use `kubernetes.client.models.V1Volume`.",
"This module is deprecated. Please use `kubernetes.client.models.V1VolumeMount`.",
(
"This module is deprecated. Please use `kubernetes.client.models.V1ResourceRequirements`"
" and `kubernetes.client.models.V1ContainerPort`."
),
"This module is deprecated. Please use `kubernetes.client.models.V1EnvVar`.",
'numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header,'
' got 216 from PyObject',
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.step_function`.",
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.step_function`.",
'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.ec2`.',
'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.ec2`.',
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.s3`.",
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.s3`.",
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.dms`.",
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.dms`.",
'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr`.',
'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.emr`.',
"This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.redshift_cluster` "
"or `airflow.providers.amazon.aws.hooks.redshift_sql` as appropriate.",
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.redshift_sql` "
"or `airflow.providers.amazon.aws.operators.redshift_cluster` as appropriate.",
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.redshift_cluster`.",
'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker`.',
'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.sagemaker`.',
'This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.emr`.',
'This module is deprecated. Please use `airflow.providers.opsgenie.hooks.opsgenie`.',
'This module is deprecated. Please use `airflow.providers.opsgenie.operators.opsgenie`.',
'This module is deprecated. Please use `airflow.hooks.redshift_sql` '
'or `airflow.hooks.redshift_cluster` as appropriate.',
'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.redshift_sql` or '
'`airflow.providers.amazon.aws.operators.redshift_cluster` as appropriate.',
'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.redshift_cluster`.',
"This module is deprecated. Please use airflow.providers.amazon.aws.transfers.sql_to_s3`.",
}
def filter_known_warnings(warn: warnings.WarningMessage) -> bool:
msg_string = str(warn.message).replace("\n", " ")
for m in KNOWN_DEPRECATED_MESSAGES:
expected_package_string = "/" + m[1] + "/"
if msg_string == m[0] and warn.filename.find(expected_package_string) != -1:
return False
return True
def filter_direct_importlib_warning(warn: warnings.WarningMessage) -> bool:
msg_string = str(warn.message).replace("\n", " ")
for m in KNOWN_DEPRECATED_DIRECT_IMPORTS:
if msg_string == m and warn.filename.find("/importlib/") != -1:
return False
return True
def filter_known_common_deprecated_messages(warn: warnings.WarningMessage) -> bool:
msg_string = str(warn.message).replace("\n", " ")
for m in KNOWN_COMMON_DEPRECATED_MESSAGES:
if msg_string == m:
return False
return True
@cli.command()
def verify_provider_classes():
"""Verifies names for all provider classes."""
with with_group("Verifies names for all provider classes"):
provider_ids = get_all_providers()
imported_classes, warns = import_all_classes(
provider_ids=provider_ids,
print_imports=True,
paths=[str(PROVIDERS_PATH)],
prefix="airflow.providers.",
)
total = 0
bad = 0
for provider_package_id in provider_ids:
inc_total, inc_bad = verify_provider_classes_for_single_provider(
imported_classes, provider_package_id
)
total += inc_total
bad += inc_bad
warns = list(filter(filter_known_warnings, warns))
warns = list(filter(filter_direct_importlib_warning, warns))
warns = list(filter(filter_known_common_deprecated_messages, warns))
if not summarise_total_vs_bad_and_warnings(total, bad, warns):
sys.exit(1)
def find_insertion_index_for_version(content: List[str], version: str) -> Tuple[int, bool]:
"""
Finds insertion index for the specified version from the .rst changelog content.
:param content: changelog split into separate lines
:param version: version to look for
:return: Tuple : insertion_index, append (whether to append or insert the changelog)
"""
changelog_found = False
skip_next_line = False
index = 0
for index, line in enumerate(content):
if not changelog_found and line.strip() == version:
changelog_found = True
skip_next_line = True
elif not skip_next_line and line and all(char == '.' for char in line):
return index - 2, changelog_found
else:
skip_next_line = False
return index, changelog_found
class ClassifiedChanges(NamedTuple):
"""Stores lists of changes classified automatically"""
fixes: List[Change] = []
features: List[Change] = []
breaking_changes: List[Change] = []
other: List[Change] = []
def get_changes_classified(changes: List[Change]) -> ClassifiedChanges:
"""
Pre-classifies changes based on commit message, it's wildly guessing now,
but if we switch to semantic commits, it could be automated. This list is supposed to be manually
reviewed and re-classified by release manager anyway.
:param changes: list of changes
:return: list of changes classified semi-automatically to the fix/feature/breaking/other buckets
"""
classified_changes = ClassifiedChanges()
for change in changes:
if "fix" in change.message.lower():
classified_changes.fixes.append(change)
elif "add" in change.message.lower():
classified_changes.features.append(change)
elif "breaking" in change.message.lower():
classified_changes.breaking_changes.append(change)
else:
classified_changes.other.append(change)
return classified_changes
@cli.command()
@argument_package_id
@option_verbose
def update_changelog(package_id: str, verbose: bool):
"""Updates changelog for the provider."""
if _update_changelog(package_id, verbose):
sys.exit(64)
def _update_changelog(package_id: str, verbose: bool) -> bool:
"""
Internal update changelog method
:param package_id: package id
:param verbose: verbose flag
:return: true if package is skipped
"""
with with_group("Updates changelog for last release"):
verify_provider_package(package_id)
provider_details = get_provider_details(package_id)
provider_info = get_provider_info_from_provider_yaml(package_id)
current_release_version = provider_details.versions[0]
jinja_context = get_provider_jinja_context(
provider_info=provider_info,
provider_details=provider_details,
current_release_version=current_release_version,
version_suffix='',
)
changelog_path = os.path.join(provider_details.source_provider_package_path, "CHANGELOG.rst")
proceed, changes, _ = get_all_changes_for_package(
provider_details.versions,
package_id,
provider_details.source_provider_package_path,
verbose,
)
if not proceed:
console.print(
f"[yellow]The provider {package_id} is not being released. Skipping the package.[/]"
)
return True
generate_new_changelog(package_id, provider_details, changelog_path, changes)
console.print()
console.print(f"Update index.rst for {package_id}")
console.print()
update_index_rst(jinja_context, package_id, provider_details.documentation_provider_package_path)
return False
def generate_new_changelog(package_id, provider_details, changelog_path, changes):
latest_version = provider_details.versions[0]
with open(changelog_path) as changelog:
current_changelog = changelog.read()
current_changelog_lines = current_changelog.splitlines()
insertion_index, append = find_insertion_index_for_version(current_changelog_lines, latest_version)
if append:
if not changes:
console.print(
f"[green]The provider {package_id} changelog for `{latest_version}` "
"has first release. Not updating the changelog.[/]"
)
return
new_changes = [
change for change in changes[0] if change.pr and "(#" + change.pr + ")" not in current_changelog
]
if not new_changes:
console.print(
f"[green]The provider {package_id} changelog for `{latest_version}` "
"has no new changes. Not updating the changelog.[/]"
)
return
context = {"new_changes": new_changes}
generated_new_changelog = render_template(
template_name='UPDATE_CHANGELOG', context=context, extension=".rst"
)
else:
classified_changes = get_changes_classified(changes[0])
context = {
"version": latest_version,
"version_header": "." * len(latest_version),
"classified_changes": classified_changes,
}
generated_new_changelog = render_template(
template_name='CHANGELOG', context=context, extension=".rst"
)
new_changelog_lines = current_changelog_lines[0:insertion_index]
new_changelog_lines.extend(generated_new_changelog.splitlines())
new_changelog_lines.extend(current_changelog_lines[insertion_index:])
diff = "\n".join(difflib.context_diff(current_changelog_lines, new_changelog_lines, n=5))
syntax = Syntax(diff, "diff")
console.print(syntax)
if not append:
console.print(
f"[green]The provider {package_id} changelog for `{latest_version}` "
"version is missing. Generating fresh changelog.[/]"
)
else:
console.print(
f"[green]Appending the provider {package_id} changelog for" f"`{latest_version}` version.[/]"
)
with open(changelog_path, "wt") as changelog:
changelog.write("\n".join(new_changelog_lines))
changelog.write("\n")
def get_package_from_changelog(changelog_path: str):
folder = Path(changelog_path).parent
package = ''
separator = ''
while not os.path.basename(folder) == 'providers':
package = os.path.basename(folder) + separator + package
separator = '.'
folder = Path(folder).parent
return package
@cli.command()
@argument_changelog_files
@option_git_update
@option_verbose
def update_changelogs(changelog_files: List[str], git_update: bool, verbose: bool):
"""Updates changelogs for multiple packages."""
if git_update:
make_sure_remote_apache_exists_and_fetch(git_update, verbose)
for changelog_file in changelog_files:
package_id = get_package_from_changelog(changelog_file)
_update_changelog(package_id=package_id, verbose=verbose)
def get_prs_for_package(package_id: str) -> List[int]:
pr_matcher = re.compile(r".*\(#([0-9]*)\)``$")
verify_provider_package(package_id)
changelog_path = verify_changelog_exists(package_id)
provider_details = get_provider_details(package_id)
current_release_version = provider_details.versions[0]
prs = []
with open(changelog_path) as changelog_file:
changelog_lines = changelog_file.readlines()
extract_prs = False
skip_line = False
for line in changelog_lines:
if skip_line:
# Skip first "....." header
skip_line = False
continue
if line.strip() == current_release_version:
extract_prs = True
skip_line = True
continue
if extract_prs:
if len(line) > 1 and all(c == '.' for c in line.strip()):
# Header for next version reached
break
if line.startswith('.. Below changes are excluded from the changelog'):
# The reminder of PRs is not important skipping it
break
match_result = pr_matcher.match(line.strip())
if match_result:
prs.append(int(match_result.group(1)))
return prs
PullRequestOrIssue = Union[PullRequest.PullRequest, Issue.Issue]
class ProviderPRInfo(NamedTuple):
provider_details: ProviderPackageDetails
pr_list: List[PullRequestOrIssue]
def is_package_in_dist(dist_files: List[str], package: str) -> bool:
"""Check if package has been prepared in dist folder."""
for file in dist_files:
if file.startswith(f'apache_airflow_providers_{package.replace(".","_")}') or file.startswith(
f'apache-airflow-providers-{package.replace(".","-")}'
):
return True
return False
@cli.command()
@click.option(
'--github-token',
envvar='GITHUB_TOKEN',
help=textwrap.dedent(
"""
Github token used to authenticate.
You can set omit it if you have GITHUB_TOKEN env variable set.
Can be generated with:
https://github.com/settings/tokens/new?description=Read%20sssues&scopes=repo:status"""
),
)
@click.option('--suffix', default='rc1')
@click.option(
'--only-available-in-dist',
is_flag=True,
help='Only consider package ids with packages prepared in the dist folder',
)
@click.option('--excluded-pr-list', type=str, help="Coma-separated list of PRs to exclude from the issue.")
@argument_package_ids
def generate_issue_content(
package_ids: List[str],
github_token: str,
suffix: str,
only_available_in_dist: bool,
excluded_pr_list: str,
):
if not package_ids:
package_ids = get_all_providers()
"""Generates content for issue to test the release."""
with with_group("Generates GitHub issue content with people who can test it"):
if excluded_pr_list:
excluded_prs = [int(pr) for pr in excluded_pr_list.split(",")]
else:
excluded_prs = []
all_prs: Set[int] = set()
provider_prs: Dict[str, List[int]] = {}
if only_available_in_dist:
files_in_dist = os.listdir(str(DIST_PATH))
prepared_package_ids = []
for package_id in package_ids:
if not only_available_in_dist or is_package_in_dist(files_in_dist, package_id):
console.print(f"Extracting PRs for provider {package_id}")
prepared_package_ids.append(package_id)
else:
console.print(f"Skipping extracting PRs for provider {package_id} as it is missing in dist")
continue
prs = get_prs_for_package(package_id)
provider_prs[package_id] = list(filter(lambda pr: pr not in excluded_prs, prs))
all_prs.update(provider_prs[package_id])
g = Github(github_token)
repo = g.get_repo("apache/airflow")
pull_requests: Dict[int, PullRequestOrIssue] = {}
with Progress(console=console) as progress:
task = progress.add_task(f"Retrieving {len(all_prs)} PRs ", total=len(all_prs))
pr_list = list(all_prs)
for i in range(len(pr_list)):
pr_number = pr_list[i]
progress.console.print(
f"Retrieving PR#{pr_number}: " f"https://github.com/apache/airflow/pull/{pr_number}"
)
try:
pull_requests[pr_number] = repo.get_pull(pr_number)
except UnknownObjectException:
# Fallback to issue if PR not found
try:
pull_requests[pr_number] = repo.get_issue(pr_number) # (same fields as PR)
except UnknownObjectException:
console.print(f"[red]The PR #{pr_number} could not be found[/]")
progress.advance(task)
interesting_providers: Dict[str, ProviderPRInfo] = {}
non_interesting_providers: Dict[str, ProviderPRInfo] = {}
for package_id in prepared_package_ids:
pull_request_list = [pull_requests[pr] for pr in provider_prs[package_id] if pr in pull_requests]
provider_details = get_provider_details(package_id)
if pull_request_list:
interesting_providers[package_id] = ProviderPRInfo(provider_details, pull_request_list)
else:
non_interesting_providers[package_id] = ProviderPRInfo(provider_details, pull_request_list)
context = {
'interesting_providers': interesting_providers,
'date': datetime.now(),
'suffix': suffix,
'non_interesting_providers': non_interesting_providers,
}
issue_content = render_template(template_name="PROVIDER_ISSUE", context=context, extension=".md")
console.print()
console.print(
"[green]Below you can find the issue content that you can use "
"to ask contributor to test providers![/]"
)
console.print()
console.print()
console.print(
"Issue title: [yellow]Status of testing Providers that were "
f"prepared on { datetime.now().strftime('%B %d, %Y') }[/]"
)
console.print()
syntax = Syntax(issue_content, "markdown", theme="ansi_dark")
console.print(syntax)
console.print()
users: Set[str] = set()
for provider_info in interesting_providers.values():
for pr in provider_info.pr_list:
users.add("@" + pr.user.login)
console.print("All users involved in the PRs:")
console.print(" ".join(users))
if __name__ == "__main__":
# The cli exit code is:
# * 0 in case of success
# * 1 in case of error
# * 64 in case of skipped package
# * 65 in case user decided to quit
# * 66 in case package has doc-only changes
try:
cli()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(65)
except SystemExit:
os._exit(65)
|
the-stack_0_9045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: shufflenet.py
import argparse
import math
import numpy as np
import os
import cv2
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import imgaug
from tensorpack.tfutils import argscope, get_model_loader, model_utils
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.utils import logger
from tensorpack.utils.gpu import get_num_gpu
from imagenet_utils import ImageNetModel, eval_on_ILSVRC12, get_imagenet_dataflow
@layer_register(log_shape=True)
def DepthConv(x, out_channel, kernel_shape, padding='SAME', stride=1,
W_init=None, activation=tf.identity):
in_shape = x.get_shape().as_list()
in_channel = in_shape[1]
assert out_channel % in_channel == 0, (out_channel, in_channel)
channel_mult = out_channel // in_channel
if W_init is None:
W_init = tf.variance_scaling_initializer(2.0)
kernel_shape = [kernel_shape, kernel_shape]
filter_shape = kernel_shape + [in_channel, channel_mult]
W = tf.get_variable('W', filter_shape, initializer=W_init)
conv = tf.nn.depthwise_conv2d(x, W, [1, 1, stride, stride], padding=padding, data_format='NCHW')
return activation(conv, name='output')
@under_name_scope()
def channel_shuffle(l, group):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
assert in_channel % group == 0, in_channel
l = tf.reshape(l, [-1, in_channel // group, group] + in_shape[-2:])
l = tf.transpose(l, [0, 2, 1, 3, 4])
l = tf.reshape(l, [-1, in_channel] + in_shape[-2:])
return l
@layer_register()
def shufflenet_unit(l, out_channel, group, stride):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
shortcut = l
# "We do not apply group convolution on the first pointwise layer
# because the number of input channels is relatively small."
first_split = group if in_channel > 24 else 1
l = Conv2D('conv1', l, out_channel // 4, 1, split=first_split, activation=BNReLU)
l = channel_shuffle(l, group)
l = DepthConv('dconv', l, out_channel // 4, 3, stride=stride)
l = BatchNorm('dconv_bn', l)
l = Conv2D('conv2', l,
out_channel if stride == 1 else out_channel - in_channel,
1, split=group)
l = BatchNorm('conv2_bn', l)
if stride == 1: # unit (b)
output = tf.nn.relu(shortcut + l)
else: # unit (c)
shortcut = AvgPooling('avgpool', shortcut, 3, 2, padding='SAME')
output = tf.concat([shortcut, tf.nn.relu(l)], axis=1)
return output
@layer_register()
def shufflenet_unit_v2(l, out_channel, stride):
if stride == 1:
shortcut, l = tf.split(l, 2, axis=1)
else:
shortcut, l = l, l
shortcut_channel = int(shortcut.shape[1])
l = Conv2D('conv1', l, out_channel // 2, 1, activation=BNReLU)
l = DepthConv('dconv', l, out_channel // 2, 3, stride=stride)
l = BatchNorm('dconv_bn', l)
l = Conv2D('conv2', l, out_channel - shortcut_channel, 1, activation=BNReLU)
if stride == 2:
shortcut = DepthConv('shortcut_dconv', shortcut, shortcut_channel, 3, stride=2)
shortcut = BatchNorm('shortcut_dconv_bn', shortcut)
shortcut = Conv2D('shortcut_conv', shortcut, shortcut_channel, 1, activation=BNReLU)
output = tf.concat([shortcut, l], axis=1)
output = channel_shuffle(output, 2)
return output
@layer_register(log_shape=True)
def shufflenet_stage(input, channel, num_blocks, group):
l = input
for i in range(num_blocks):
name = 'block{}'.format(i)
if args.v2:
l = shufflenet_unit_v2(name, l, channel, 2 if i == 0 else 1)
else:
l = shufflenet_unit(name, l, channel, group, 2 if i == 0 else 1)
return l
class Model(ImageNetModel):
weight_decay = 4e-5
def get_logits(self, image):
with argscope([Conv2D, MaxPooling, AvgPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'), \
argscope(Conv2D, use_bias=False):
group = args.group
if not args.v2:
# Copied from the paper
channels = {
3: [240, 480, 960],
4: [272, 544, 1088],
8: [384, 768, 1536]
}
mul = group * 4 # #chan has to be a multiple of this number
channels = [int(math.ceil(x * args.ratio / mul) * mul)
for x in channels[group]]
# The first channel must be a multiple of group
first_chan = int(math.ceil(24 * args.ratio / group) * group)
else:
# Copied from the paper
channels = {
0.5: [48, 96, 192],
1.: [116, 232, 464]
}[args.ratio]
first_chan = 24
logger.info("#Channels: " + str([first_chan] + channels))
l = Conv2D('conv1', image, first_chan, 3, strides=2, activation=BNReLU)
l = MaxPooling('pool1', l, 3, 2, padding='SAME')
l = shufflenet_stage('stage2', l, channels[0], 4, group)
l = shufflenet_stage('stage3', l, channels[1], 8, group)
l = shufflenet_stage('stage4', l, channels[2], 4, group)
if args.v2:
l = Conv2D('conv5', l, 1024, 1, activation=BNReLU)
l = GlobalAvgPooling('gap', l)
logits = FullyConnected('linear', l, 1000)
return logits
def get_data(name, batch):
isTrain = name == 'train'
if isTrain:
augmentors = [
# use lighter augs if model is too small
imgaug.GoogleNetRandomCropAndResize(crop_area_fraction=(0.49 if args.ratio < 1 else 0.08, 1.)),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return get_imagenet_dataflow(
args.data, name, batch, augmentors)
def get_config(model, nr_tower):
batch = TOTAL_BATCH_SIZE // nr_tower
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
dataset_train = get_data('train', batch)
dataset_val = get_data('val', batch)
step_size = 1280000 // TOTAL_BATCH_SIZE
max_iter = 3 * 10**5
max_epoch = (max_iter // step_size) + 1
callbacks = [
ModelSaver(),
ScheduledHyperParamSetter('learning_rate',
[(0, 0.5), (max_iter, 0)],
interp='linear', step_based=True),
EstimatedTimeLeft()
]
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
return TrainConfig(
model=model,
dataflow=dataset_train,
callbacks=callbacks,
steps_per_epoch=step_size,
max_epoch=max_epoch,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('-r', '--ratio', type=float, default=0.5, choices=[1., 0.5])
parser.add_argument('--group', type=int, default=8, choices=[3, 4, 8],
help="Number of groups for ShuffleNetV1")
parser.add_argument('--v2', action='store_true', help='Use ShuffleNetV2')
parser.add_argument('--batch', type=int, default=1024, help='total batch size')
parser.add_argument('--load', help='path to load a model from')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--flops', action='store_true', help='print flops and exit')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.v2 and args.group != parser.get_default('group'):
logger.error("group= is not used in ShuffleNetV2!")
if args.batch != 1024:
logger.warn("Total batch size != 1024, you need to change other hyperparameters to get the same results.")
TOTAL_BATCH_SIZE = args.batch
model = Model()
if args.eval:
batch = 128 # something that can run on one gpu
ds = get_data('val', batch)
eval_on_ILSVRC12(model, get_model_loader(args.load), ds)
elif args.flops:
# manually build the graph with batch=1
with TowerContext('', is_training=False):
model.build_graph(
tf.placeholder(tf.float32, [1, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [1], 'label')
)
model_utils.describe_trainable_vars()
tf.profiler.profile(
tf.get_default_graph(),
cmd='op',
options=tf.profiler.ProfileOptionBuilder.float_operation())
logger.info("Note that TensorFlow counts flops in a different way from the paper.")
logger.info("TensorFlow counts multiply+add as two flops, however the paper counts them "
"as 1 flop because it can be executed in one instruction.")
else:
if args.v2:
name = "ShuffleNetV2-{}x".format(args.ratio)
else:
name = "ShuffleNetV1-{}x-g{}".format(args.ratio, args.group)
logger.set_logger_dir(os.path.join('train_log', name))
nr_tower = max(get_num_gpu(), 1)
config = get_config(model, nr_tower)
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_tower))
|
the-stack_0_9046 | from elasticsearch import Elasticsearch
from rdflib import Graph
from constants import CLASS_INDEX, RELATION_INDEX
from constants import ENTITY_INDEX
from constants import LABEL_PRED_LOWER
es = Elasticsearch(['http://geo-qa.cs.upb.de:9200/'])
def indexClasses(filepath):
g = Graph()
g.parse(filepath)
for stmt in g:
if str(stmt[1]).lower() == LABEL_PRED_LOWER and stmt[2]._language.lower() == "en-gb": # Only english labels
addToIndexAlt(CLASS_INDEX, str(stmt[0]), stmt[2]._value)
def indexEntities(filepath):
g = Graph()
g.parse(filepath)
for stmt in g:
if str(stmt[1]).lower() == LABEL_PRED_LOWER: # Only labels
addToIndexAlt(ENTITY_INDEX, str(stmt[0]), stmt[2]._value)
def indexProperties(filepath):
g = Graph()
g.parse(filepath)
for stmt in g:
if str(stmt[1]).lower() == LABEL_PRED_LOWER: # Only english labels
addToIndexAlt(RELATION_INDEX, str(stmt[0]), stmt[2]._value)
def addToIndexAlt(index_name, uri, label):
try:
es.index(index=index_name, body={"uri": uri, "label": label})
print(label)
return True
except:
return 'error'
if __name__ == "__main__":
# indexEntities("./cutomizations/bremen-entitiy-all-labels.nt")
# print("Entities done")
# print()
# indexClasses("./cutomizations/lgdo_2014-07-26.n3")
# print("Classes done")
indexProperties("/home/hardik/Projects/falcon2.0/customizations/props.nt")
print("Properties done")
|
the-stack_0_9047 | # -*- coding: utf-8 -*-
"""Manages custom event formatter helpers."""
class FormattersManager(object):
"""Custom event formatter helpers manager."""
_custom_formatter_helpers = {}
@classmethod
def GetEventFormatterHelper(cls, identifier):
"""Retrieves a custom event formatter helper.
Args:
identifier (str): identifier.
Returns:
CustomEventFormatterHelper: custom event formatter or None if not
available.
"""
identifier = identifier.lower()
return cls._custom_formatter_helpers.get(identifier)
@classmethod
def RegisterEventFormatterHelper(cls, formatter_helper_class):
"""Registers a custom event formatter helper.
The custom event formatter helpers are identified based on their lower
case identifier.
Args:
formatter_helper_class (type): class of the custom event formatter helper.
Raises:
KeyError: if a custom formatter helper is already set for the
corresponding identifier.
"""
identifier = formatter_helper_class.IDENTIFIER.lower()
if identifier in cls._custom_formatter_helpers:
raise KeyError((
'Custom event formatter helper already set for identifier: '
'{0:s}.').format(formatter_helper_class.IDENTIFIER))
cls._custom_formatter_helpers[identifier] = formatter_helper_class()
@classmethod
def RegisterEventFormatterHelpers(cls, formatter_helper_classes):
"""Registers custom event formatter helpers.
The formatter classes are identified based on their lower case data type.
Args:
formatter_helper_classes (list[type]): classes of the custom event
formatter helpers.
Raises:
KeyError: if a custom formatter helper is already set for the
corresponding data type.
"""
for formatter_helper_class in formatter_helper_classes:
cls.RegisterEventFormatterHelper(formatter_helper_class)
|
the-stack_0_9048 | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting firewall rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.firewall_rules import flags
class Delete(base.DeleteCommand):
"""Delete Compute Engine firewall rules.
*{command}* deletes one or more Compute Engine firewall
rules.
"""
FIREWALL_ARG = None
@staticmethod
def Args(parser):
Delete.FIREWALL_ARG = flags.FirewallRuleArgument(plural=True)
Delete.FIREWALL_ARG.AddArgument(parser, operation_type='delete')
parser.display_info.AddCacheUpdater(flags.FirewallsCompleter)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
firewall_refs = Delete.FIREWALL_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(client))
utils.PromptForDeletion(firewall_refs)
requests = []
for firewall_ref in firewall_refs:
requests.append((client.apitools_client.firewalls, 'Delete',
client.messages.ComputeFirewallsDeleteRequest(
**firewall_ref.AsDict())))
return client.MakeRequests(requests)
|
the-stack_0_9050 | from flask import jsonify, request, g, url_for, current_app, abort
from . import api
from ..models import Post, Permission
from .decorators import permission_required
from .. import db
from .errors import forbidden
@api.route('/posts/')
def get_posts():
page = request.args.get('page', 1, type=int)
pagination = Post.query.paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def new_post():
post = Post.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, \
{'Location': url_for('api.get_post', id=post.id, _external=True)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
post = Post.query.get_or_404(id)
if g.current_user != post.author and \
not g.current_user.can(Permission.ADMINISTER):
return forbidden('Insufficient permissions')
post.name = request.json.get('name', post.name)
post.summary = request.json.get('summary', post.summary)
post.body = request.json.get('body', post.body)
db.session.add(post)
return jsonify(post.to_json()) |
the-stack_0_9051 | #!/usr/bin/env python3
#
# author: Michael Brockus
# contact: <mailto:[email protected]>
# license: Apache 2.0 :http://www.apache.org/licenses/LICENSE-2.0
#
# copyright 2020 The Meson-UI development team
#
import subprocess
import logging
color = {
'green': '\x1B[01;32m',
'blue': '\033[94m',
'bold': '\033[1m',
'reset': '\x1B[0m'
}
log_format = (
f'{color["bold"]} cat_log: {color["reset"]}'
f'{color["blue"]} %(funcName)s - {color["reset"]}'
f'{color["bold"]} %(levelname)s: {color["reset"]}'
f'{color["green"]} %(message)s {color["reset"]}'
)
logging.basicConfig(level=logging.INFO, format=log_format)
USER_DEPS: list = [
'git-all',
'libc6-dev',
'gcc',
'g++',
'gobjc',
'gobjc++',
'gfortran',
'ldc',
'rustc',
'default-jre',
'mono-complete'
]
PYPI_DEPS: list = [
'meson==0.53.2',
'cmake==3.16.3',
'ninja==1.9.0',
'pytest==5.3.2',
'pytest-cov==2.8.1',
'codecov==2.0.15',
'PyQt5==5.14.1'
]
def install_user_packages(deps: list, dry_run: bool = False):
for dep in deps:
logging.info(f'installing: {dep}')
subprocess.check_call([
'apt-get', 'install', dep, '--yes', '-qq'])
for dep in deps:
logging.info(f'user dep: {dep}')
def install_pypi_packages(deps: list, dry_run: bool = False):
for dep in deps:
logging.info(f'installing: {dep}')
subprocess.check_call([
'python3', '-m', 'pip', 'install', '--quiet', dep])
for dep in deps:
logging.info(f'pypi dep: {dep}')
def main():
logging.info('Running install commands for both "user" and "python3"')
install_user_packages(USER_DEPS)
install_pypi_packages(PYPI_DEPS)
logging.info('Process done.')
if __name__ == "__main__":
main()
|
the-stack_0_9053 | # qubit number=2
# total number=25
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(2) # number=22
prog += CZ(0,2) # number=23
prog += H(2) # number=24
prog += X(2) # number=12
prog += CNOT(0,2) # number=13
prog += H(1) # number=7
prog += CZ(2,1) # number=8
prog += H(1) # number=9
prog += H(1) # number=18
prog += CZ(2,1) # number=19
prog += H(1) # number=20
prog += Y(1) # number=14
prog += CNOT(2,1) # number=10
prog += Z(2) # number=3
prog += X(1) # number=17
prog += Y(2) # number=5
prog += X(2) # number=21
prog += CNOT(1,0) # number=15
prog += CNOT(1,0) # number=16
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil147.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_0_9054 | from __future__ import division
import operator
import numpy as np
from scipy import stats, interpolate
#==============================================================================
# This library module is full of functions and classes to compute the maximum
# mutual information (Capacity) between an input (x) (voltage) and output (y) (resistance)
# distribution transmitted through a noisy channel (Pyx) (device).
# Free to use and distribute and alter.
# Created by Jesse Engel (Stanford, UC Berkeley) Sept 2, 2014
#==============================================================================
#==============================================================================
# Discrete Distribution Functions
#==============================================================================
def h(p):
"""Shannon Information
"""
info = -1*np.log2(p)
if np.isscalar(info):
if np.isinf(info) or info == -0.:
info = 0
else:
info[np.where(info == -0.)] = 0
info[np.where(np.isinf(info))] = 0
return info
def H(p):
"""Entropy
"""
return p * h(p)
def H2(p):
"""Binary Entropy
"""
return H(p) + H(1-p)
def D_KL(p, q):
'''
Compute the KL Diveregence of two finite distributions p and q
Params
------
p
(array) [np]
q
(array) [nq]
Returns
-------
D_KL
(float) in Bits
'''
if p.ndim == 2:
#D_KL for each row of p
d = p * np.log2(p / np.r_[q])
d[np.logical_not(np.isfinite(d))] = 0
return np.sum(d,1)
else:
d = p * np.log2(p / q)
d[np.logical_not(np.isfinite(d))] = 0
return np.sum(d)
def I(Pyx, Px):
'''
Compute the mutual information of distribution Px traveling through
channel Pyx.
Params
------
Pyx
(array) [ny, nx]
Px
(array) [nx]
Returns
-------
I
(float) in Bits
'''
Pyx = Pyx.T
Py = np.dot(Px, Pyx)
I = np.dot(Px, D_KL(Pyx, Py))
return I
#==============================================================================
# Vectorized Blahut-Arimoto algorithm
#==============================================================================
def blahut_arimoto(Pyx, tolerance = 1e-2, iterations = 1000,
e=np.empty(0), s=0, debug=False, Px0=np.empty(0)):
'''
Blahut-Arimoto algorithm for computing the Capacity of a discrete input-output channel
Based on a matlab code by: Kenneth Shum
http://home.ie.cuhk.edu.hk/~wkshum/wordpress/?p=825
Adapted from Blahut 1972, IEEE Trans. on Info. Theory
Params
----
Pyx
Discrete conditional probability matrix.
(array) [ny, nx]
Keywords
-----
e
Vector of expenses for given x input states
s
Lagrange multiplier. One to one mapping to an average
tolerance
End when IU - IL < tolerance
iterations
Max number of iterations
debug:
Print to console while running
Outputs
----
C
(float) Capacity in bits
Px
(array) [nx] Optimal input distribution
E
(float) Expense. Only output if 'e' is defined
'''
Pyx = Pyx.T # (inputs, outputs)
m, n = Pyx.shape # (m inputs, n outputs)
Px = [np.ones(m)/m, Px0][Px0.any()] # initial distribution for channel input
Py = np.ones(n)/n # initial distribution for channel output
c = np.zeros(m)
energy_constraint = e.any()
D = D_KL(Pyx, Py) #Vector
temp = Pyx / np.c_[np.sum(Pyx,1)]
ind = np.isfinite(temp)
Pyx[ind] = temp[ind]
#Optimizaiton
for i_iter in np.arange(iterations):
if energy_constraint:
c = np.exp( D - s*e )
else:
c = np.exp( D )
#Update
Px = Px * c
Px = Px/np.sum(Px)
Py = np.dot(Px, Pyx)
D = D_KL(Pyx, Py) #Vector
IL = np.log(np.dot(Px, c))
IU = np.log(max(c))
if debug:
if energy_constraint:
E = np.dot(Px, e)
print ('\nE: %.2e' % E)
print ('IL: %.2e IU: %.2e' % (IL, IU))
print ('Iter: %d' % (i_iter+1))
else:
print ('\nIL: %.2e IU: %.2e' % (IL, IU))
print ('Iter: %d' % (i_iter+1))
if tolerance:
if IU-IL < tolerance:
break
C = I(Pyx.T, Px)
if debug:
print ('iterations: %s' % (i_iter+1))
print ('C:', C)
if energy_constraint:
E = np.dot(Px, e)
return C, Px, E
else:
return C, Px
# def rate_distortion()
# Rate-Distortion is for SOURCE compression.
# It calculates the lower bound of required description length to compress and
# reconstruct a GIVEN source (Px (can be multidimensional and dependent, like in images)).
# It does NOT tell you how to achieve that compression 'codebook and code points', except
# for simple cases like independent (iid) gaussian sources. In that case it actually works out that
# doing multidimensional to single dimensional compression (vector quantization) is better than scalar quantization
# Problems of communication theory:
# ------------
# 1) WHAT information should be transmitted? (source coding)
# 2) HOW should it be transmitted? (channel coding)
# These two problems can be separated by Shannon's separation theorem, and the distortion
# will never exceed D(R) as long as R < C.
# But what of joint coding?
#==============================================================================
# Quantization of high-D quantized channel to low-D quantized channel
#==============================================================================
def find_closest(vector, value):
''' Find the closest index of a vector to a value. If value is a vector,
returns an array of indicies that are closest to the values. Rounds down.
'''
if isinstance(value, np.ndarray) or isinstance(value, list):
diff = np.outer(vector, np.ones(len(value))) - np.outer(np.ones(len(vector)), value)
inds = np.argmin( np.abs(diff), axis=0)
else:
inds = np.argmin( np.abs( vector - value) )
return inds
def calc_subsample_inds(x, y, xinputs=None, ydividers=None):
''' Find closest indexes for a discetization of x and y
'''
if np.any(xinputs):
xinds = find_closest(x, xinputs)
else:
xinds = np.arange(x.size)
if np.any(ydividers):
yinds = find_closest(y, ydividers)
else:
yinds = np.arange(y.size)
return xinds, yinds
def subsample(Pyx, xinds, yinds):
''' Subsample a density matrix a locations xinds(columns), and sum between
dividers yinds(row).
'''
Pyx_sub = np.zeros( [len(yinds)+1, len(xinds)])
bounds = np.r_[0, yinds, Pyx.shape[0]]
for i in np.arange(len(bounds)-1):
bl = bounds[i]
bu = bounds[i+1]
Pyx_sub[i,:] = np.sum(Pyx[bl:bu, xinds], axis = 0)
# Normalize
Pyx_sub = Pyx_sub / np.sum(Pyx_sub, axis=0)
return Pyx_sub
def quantize(Pyx, x, y, xinputs=None, ydividers=None):
'''Chops up a matrix Pyx, into xinputs columns, and sums rows between
y dividers
'''
xinds, yinds = calc_subsample_inds(x, y, xinputs, ydividers)
Pyx_sub = subsample(Pyx, xinds, yinds)
x_sub = x[xinds]
y_sub = y[::-1][yinds]
return Pyx_sub, x_sub, y_sub
def trim(Pyx, cum_low=1e-1, cum_high=1e-1, index=False):
'''Returns Pyx only rows where cumsum(P) > cum_low and cumsum(P) < 1 - cum_high'''
low = min( np.where( np.cumsum(Pyx, axis=0) > cum_low )[0])
high = max( np.where( np.cumsum(Pyx, axis=0) < 1-cum_high )[0])
if not index:
return Pyx[low:high, :]
else:
return Pyx[low:high, :], np.arange(low, high+1)
#==============================================================================
# Gaussian Kernel Density Estimate from Data
#==============================================================================
def Q(Varray, Rarray, nx=2000, ny=2000,print_points=True):
'''
Take in all Voltage/Resistance Pairs and return the conditional PDF: Q= P(R|V)
Performs Gaussian Kernel Density Estimate and Linear Interpolation
Params
------
Varray, Rarray
ndarray, same size (n_examples,)
Returns
-------
Q
ndarray (__, __)
'''
V_list = np.sort(np.unique(Varray))
#Gaussian KDE
Pyx_func = []
for i, v in enumerate(V_list):
idx = (Varray == v)
data = Rarray[idx]
if print_points == True:
print ('%0.2f Volts, %d Points' % (v, sum(idx)))
Pyx_func.append(stats.gaussian_kde(data, bw_method='scott' )) #scott, silvermann, scalar
Pyx_func = FunctionList(Pyx_func)
x = np.linspace(V_list.min(), V_list.max(), nx)
y = np.linspace(Rarray.min()*0.7, Rarray.max()*1.3, ny)
# Bivariate Spline
Pyx = np.atleast_2d(Pyx_func(y))
Pyx_interp = interpolate.RectBivariateSpline( V_list, y, Pyx, kx=3, ky=3, s=0)
Pyx_new = np.rot90(Pyx_interp(x,y))
# Normalize (each input needs to end up in an output (column=1))
Pyx_new = Pyx_new / np.sum(Pyx_new, axis=0)
return Pyx_new, x, y
def moments(Varray, Rarray):
'''Returns mean, std of a R(V) dataset'''
V_list = np.sort(np.unique(Varray))
data_mean = np.zeros(V_list.size)
data_std = np.zeros(V_list.size)
Vs = np.zeros(V_list.size)
for i, v in enumerate(V_list):
idx = (Varray == v)
data = Rarray[idx]
data_mean[i] = np.mean(data)
data_std[i] = np.std(data)
Vs[i] = v
return data_mean, data_std, Vs
#==============================================================================
# Classes
#==============================================================================
class FunctionList(object):
def __init__(self, f_list):
"""
FunctionList is a list of function objects that can be
added, multiplied, summed, and dot producted with ints/floats,
functions, np.array()s, and other FunctionLists.
This is a bit of a hack to allow for making an array of functions.
Parameters
----------
f_list : list of functions
Examples
--------
>>> f = lambda x: x
>>> g = FunctionList([f, f])
>>> h=g.dot([1,2])
>>> g(2)
[2, 2]
>>> h(2)
6
"""
if type(f_list) is FunctionList:
self.f_list = f_list.f_list
elif hasattr(f_list, '__call__'):
self.f_list = [f_list]
else:
self.f_list = f_list
def __call__(self, x):
result = []
for f in self.f_list:
result.append( f(x) )
return result
def __add__(self, other):
""" Add the function list, elementwise: Returns a function list
"""
return self.__apply_op(other, op=operator.add)
def __sub__(self, other):
""" Add the function list, elementwise: Returns a function list
"""
return self.__apply_op(other, op=operator.sub)
def __mul__(self, other):
""" Multiply the function list, elementwise: Returns a function list
"""
return self.__apply_op(other, op=operator.mul)
def __div__(self, other):
""" Divide the function list, elementwise: Returns a function list
"""
return self.__apply_op(other, op=operator.div)
def __apply_op(self, other, op=operator.add):
result = []
if type(other) is FunctionList:
for i, f in enumerate(self.f_list):
g = other[i]
result.append( lambda x, f=f, g=g: op(f(x), g(x)) )
elif hasattr(other, '__call__'):
for f in self.f_list:
g = other
result.append( lambda x, f=f, g=g: op(f(x), g(x)) )
elif type(other) in (np.ndarray, list):
for i, f in enumerate(self.f_list):
g = other[i]
result.append( lambda x, f=f, g=g: op(f(x), g) )
elif type(other) in (int, float):
for f in self.f_list:
g = other
result.append( lambda x, f=f, g=g: op(f(x), other) )
else:
print ('Add FunctionList with: FunctionList, ndarray, int, or float')
pass
return FunctionList(result)
def sum(self):
result = self.f_list[0]
for i, g in enumerate(self.f_list[1:]):
f = result
result = lambda x, f=f, g=g: f(x) + g(x)
return result
def dot(self, other):
"""Take the dot product of a function vector and either another
function vector, or a normal vector.
"""
result = self.__mul__(other)
result = result.sum()
return result
def __getitem__(self,index):
return self.f_list[index]
def __setitem__(self,index,value):
self.f_list[index] = value
def __len__(self):
return len(self.f_list)
|
the-stack_0_9055 | """
Support for MQTT JSON lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mqtt_json/
"""
import json
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
Light,
)
from homeassistant.components.mqtt import (
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_UNIQUE_ID,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from homeassistant.const import (
CONF_BRIGHTNESS,
CONF_COLOR_TEMP,
CONF_DEVICE,
CONF_EFFECT,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_RGB,
CONF_WHITE_VALUE,
CONF_XY,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.color as color_util
from . import MQTT_LIGHT_SCHEMA_SCHEMA
from .schema_basic import CONF_BRIGHTNESS_SCALE
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt_json"
DEFAULT_BRIGHTNESS = False
DEFAULT_COLOR_TEMP = False
DEFAULT_EFFECT = False
DEFAULT_FLASH_TIME_LONG = 10
DEFAULT_FLASH_TIME_SHORT = 2
DEFAULT_NAME = "MQTT JSON Light"
DEFAULT_OPTIMISTIC = False
DEFAULT_RGB = False
DEFAULT_WHITE_VALUE = False
DEFAULT_XY = False
DEFAULT_HS = False
DEFAULT_BRIGHTNESS_SCALE = 255
CONF_EFFECT_LIST = "effect_list"
CONF_FLASH_TIME_LONG = "flash_time_long"
CONF_FLASH_TIME_SHORT = "flash_time_short"
CONF_HS = "hs"
# Stealing some of these from the base MQTT configs.
PLATFORM_SCHEMA_JSON = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BRIGHTNESS, default=DEFAULT_BRIGHTNESS): cv.boolean,
vol.Optional(
CONF_BRIGHTNESS_SCALE, default=DEFAULT_BRIGHTNESS_SCALE
): vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_COLOR_TEMP, default=DEFAULT_COLOR_TEMP): cv.boolean,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_EFFECT, default=DEFAULT_EFFECT): cv.boolean,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(
CONF_FLASH_TIME_LONG, default=DEFAULT_FLASH_TIME_LONG
): cv.positive_int,
vol.Optional(
CONF_FLASH_TIME_SHORT, default=DEFAULT_FLASH_TIME_SHORT
): cv.positive_int,
vol.Optional(CONF_HS, default=DEFAULT_HS): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS): vol.All(
vol.Coerce(int), vol.In([0, 1, 2])
),
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_RGB, default=DEFAULT_RGB): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_WHITE_VALUE, default=DEFAULT_WHITE_VALUE): cv.boolean,
vol.Optional(CONF_XY, default=DEFAULT_XY): cv.boolean,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
.extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema)
)
async def async_setup_entity_json(
config: ConfigType, async_add_entities, config_entry, discovery_hash
):
"""Set up a MQTT JSON Light."""
async_add_entities([MqttLightJson(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttLightJson(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
Light,
RestoreEntity,
):
"""Representation of a MQTT JSON light."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize MQTT JSON light."""
self._state = False
self._sub_state = None
self._supported_features = 0
self._topic = None
self._optimistic = False
self._brightness = None
self._color_temp = None
self._effect = None
self._hs = None
self._white_value = None
self._flash_times = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_JSON(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topic = {
key: config.get(key) for key in (CONF_STATE_TOPIC, CONF_COMMAND_TOPIC)
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
brightness = config[CONF_BRIGHTNESS]
if brightness:
self._brightness = 255
else:
self._brightness = None
color_temp = config[CONF_COLOR_TEMP]
if color_temp:
self._color_temp = 150
else:
self._color_temp = None
effect = config[CONF_EFFECT]
if effect:
self._effect = "none"
else:
self._effect = None
white_value = config[CONF_WHITE_VALUE]
if white_value:
self._white_value = 255
else:
self._white_value = None
if config[CONF_HS] or config[CONF_RGB] or config[CONF_XY]:
self._hs = [0, 0]
else:
self._hs = None
self._flash_times = {
key: config.get(key)
for key in (CONF_FLASH_TIME_SHORT, CONF_FLASH_TIME_LONG)
}
self._supported_features = SUPPORT_TRANSITION | SUPPORT_FLASH
self._supported_features |= config[CONF_RGB] and SUPPORT_COLOR
self._supported_features |= brightness and SUPPORT_BRIGHTNESS
self._supported_features |= color_temp and SUPPORT_COLOR_TEMP
self._supported_features |= effect and SUPPORT_EFFECT
self._supported_features |= white_value and SUPPORT_WHITE_VALUE
self._supported_features |= config[CONF_XY] and SUPPORT_COLOR
self._supported_features |= config[CONF_HS] and SUPPORT_COLOR
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
last_state = await self.async_get_last_state()
@callback
def state_received(msg):
"""Handle new MQTT messages."""
values = json.loads(msg.payload)
if values["state"] == "ON":
self._state = True
elif values["state"] == "OFF":
self._state = False
if self._hs is not None:
try:
red = int(values["color"]["r"])
green = int(values["color"]["g"])
blue = int(values["color"]["b"])
self._hs = color_util.color_RGB_to_hs(red, green, blue)
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid RGB color value received")
try:
x_color = float(values["color"]["x"])
y_color = float(values["color"]["y"])
self._hs = color_util.color_xy_to_hs(x_color, y_color)
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid XY color value received")
try:
hue = float(values["color"]["h"])
saturation = float(values["color"]["s"])
self._hs = (hue, saturation)
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid HS color value received")
if self._brightness is not None:
try:
self._brightness = int(
values["brightness"]
/ float(self._config[CONF_BRIGHTNESS_SCALE])
* 255
)
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._color_temp is not None:
try:
self._color_temp = int(values["color_temp"])
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid color temp value received")
if self._effect is not None:
try:
self._effect = values["effect"]
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid effect value received")
if self._white_value is not None:
try:
self._white_value = int(values["white_value"])
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid white value received")
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
},
)
if self._optimistic and last_state:
self._state = last_state.state == STATE_ON
if last_state.attributes.get(ATTR_BRIGHTNESS):
self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS)
if last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
if last_state.attributes.get(ATTR_COLOR_TEMP):
self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP)
if last_state.attributes.get(ATTR_EFFECT):
self._effect = last_state.attributes.get(ATTR_EFFECT)
if last_state.attributes.get(ATTR_WHITE_VALUE):
self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def should_poll(self):
"""No polling needed for a MQTT light."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._config[CONF_NAME]
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
should_update = False
message = {"state": "ON"}
if ATTR_HS_COLOR in kwargs and (
self._config[CONF_HS] or self._config[CONF_RGB] or self._config[CONF_XY]
):
hs_color = kwargs[ATTR_HS_COLOR]
message["color"] = {}
if self._config[CONF_RGB]:
# If there's a brightness topic set, we don't want to scale the
# RGB values given using the brightness.
if self._brightness is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS, self._brightness if self._brightness else 255
)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100
)
message["color"]["r"] = rgb[0]
message["color"]["g"] = rgb[1]
message["color"]["b"] = rgb[2]
if self._config[CONF_XY]:
xy_color = color_util.color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
message["color"]["x"] = xy_color[0]
message["color"]["y"] = xy_color[1]
if self._config[CONF_HS]:
message["color"]["h"] = hs_color[0]
message["color"]["s"] = hs_color[1]
if self._optimistic:
self._hs = kwargs[ATTR_HS_COLOR]
should_update = True
if ATTR_FLASH in kwargs:
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
message["flash"] = self._flash_times[CONF_FLASH_TIME_LONG]
elif flash == FLASH_SHORT:
message["flash"] = self._flash_times[CONF_FLASH_TIME_SHORT]
if ATTR_TRANSITION in kwargs:
message["transition"] = int(kwargs[ATTR_TRANSITION])
if ATTR_BRIGHTNESS in kwargs and self._brightness is not None:
message["brightness"] = int(
kwargs[ATTR_BRIGHTNESS]
/ float(DEFAULT_BRIGHTNESS_SCALE)
* self._config[CONF_BRIGHTNESS_SCALE]
)
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
should_update = True
if ATTR_COLOR_TEMP in kwargs:
message["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
should_update = True
if ATTR_EFFECT in kwargs:
message["effect"] = kwargs[ATTR_EFFECT]
if self._optimistic:
self._effect = kwargs[ATTR_EFFECT]
should_update = True
if ATTR_WHITE_VALUE in kwargs:
message["white_value"] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
should_update = True
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
json.dumps(message),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = True
should_update = True
if should_update:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
message = {"state": "OFF"}
if ATTR_TRANSITION in kwargs:
message["transition"] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
json.dumps(message),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = False
self.async_write_ha_state()
|
the-stack_0_9058 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import logging
import sys
from io import open
from os import path
from time import time
from glob import glob
from textblob import Blobber
from textblob_aptagger import PerceptronTagger
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
__author__ = "Vivek Kulkarni"
__email__ = "[email protected]"
LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s"
def main(args):
f = open(args.filename)
D = {}
tag_set = set([])
tb = Blobber(pos_tagger=PerceptronTagger())
for i, line in enumerate(f):
b1 = tb(line)
for w, t in b1.tags:
tag_set.add(t)
if w not in D:
D[w] = Counter()
D[w][t] = float(D[w][t] + 1)
sorted_pos_tags = sorted(list(tag_set))
rows = []
for w in D.keys():
row = [w]
pos_counts_word = np.array([float(D[w][t]) for t in sorted_pos_tags])
pos_dist_word = pos_counts_word / float(np.sum(pos_counts_word))
assert(np.isclose(np.sum(pos_dist_word), 1.0))
row = row + list(pos_dist_word)
rows.append(row)
header = ['word'] + sorted_pos_tags
print("Set of POS tags in sorted order", header)
df = pd.DataFrame().from_records(rows, columns=header)
print("Dumping the POS distribution.")
df.to_csv(args.outputfile, index=None, encoding='utf-8')
def debug(type_, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type_, value, tb)
else:
import traceback
import pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type_, value, tb)
print("\n")
# ...then start the debugger in post-mortem mode.
pdb.pm()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-f", "--file", dest="filename", help="Input file")
parser.add_argument("-o", "--outputfile", dest="outputfile", help="Output file")
parser.add_argument("-l", "--log", dest="log", help="log verbosity level",
default="INFO")
args = parser.parse_args()
if args.log == 'DEBUG':
sys.excepthook = debug
numeric_level = getattr(logging, args.log.upper(), None)
logging.basicConfig(level=numeric_level, format=LOGFORMAT)
main(args)
|
the-stack_0_9060 | import telraam_data.query as query
import telraam_data.download as download
from .utils import get_data_keys
import datetime as dt
import shutil
import pandas as pd
import pathlib as pl
import random
import pytest
@pytest.fixture()
def one_segment():
all_segments = query.query_active_segments()
segment_idx = random.randrange(1, len(all_segments)) - 1
return all_segments["features"][segment_idx]
@pytest.fixture()
def tmp_path():
path = pl.Path('./tmp/data.csv')
yield path
shutil.rmtree('./tmp/')
def test_list_segments():
# As of April 2020 there were more than 900 active segments.
segments = download.list_segments()
assert len(segments) > 900
def test_list_segments_by_coordinates():
# As of April 2020 there are more than 30 active segments in Schaarbeek
segments = download.list_segments_by_coordinates(lon=4.373, lat=50.867, radius=2)
assert len(segments) > 30
# 1003073114 should be one of them
assert 1003073114 in segments
# 1003063473 should not be one of them
assert 1003063473 not in segments
def test_download_one_segment(one_segment, tmp_path):
segment_id = one_segment["properties"]["segment_id"]
segment_last_time = one_segment["properties"]["last_data_package"]
# Query that segment for the last live day
end_date = dt.datetime.fromisoformat(segment_last_time).date()
start_date = end_date - dt.timedelta(days=1)
df = download.download_one_segment(
segment_id=segment_id,
start_date=start_date,
end_date=end_date,
out_filepath=tmp_path)
required_keys = get_data_keys()
required_keys.remove('date') # 'date' has become the index
# 1. Check returned data
assert len(df) > 0
assert df.index.name == 'date'
assert (df.index >= str(start_date)).all()
assert (df.index <= str(end_date + dt.timedelta(days=1))).all()
assert set(required_keys) == set(required_keys).intersection(df.columns)
assert (df['segment_id'] == segment_id).all()
# 2. Check stored data
df_local = pd.read_csv(tmp_path, parse_dates=["date"], index_col="date")
from ast import literal_eval
df_local.car_speed_hist_0to70plus = df_local.car_speed_hist_0to70plus.apply(literal_eval)
df_local.car_speed_hist_0to120plus = df_local.car_speed_hist_0to120plus.apply(literal_eval)
assert (df_local == df).all().all()
|
the-stack_0_9062 | import time
import paho.mqtt.client as paho
import httplib2
from urllib import urlencode
import json
def call_get_arrivals(line):
h = httplib2.Http(disable_ssl_certificate_validation=True)
# h.add_credentials(intro_username, intro_password)
resp, content = h.request("https://api.tfl.gov.uk/Line/"+line+"/Arrivals")
# print resp
try:
response=json.loads(content)
for i in response:
line = i['lineName']
trainNumber = i['vehicleId']
stationId = i['destinationNaptanId']
stationName = i['destinationName']
expArrival = i['expectedArrival']
timestamp = i['timestamp']
ttl = i['timeToLive']
data = dict(line=line, trainNumber = trainNumber, stationId = stationId, stationName=stationName, timestamp=timestamp, expArrival = expArrival, ttl = ttl)
#print data
client.publish("/tfl/", payload=json.dumps(data),qos=0)
except Exception as inst:
pass
client.loop()
lines = ["victoria","circle","district","northern","jubilee","piccadilly","metropolitan","bakerloo","central" ]
client = paho.Client()
client.connect('mqtt')
while 1==1:
for line in lines:
call_get_arrivals(line)
time.sleep(1)
|
the-stack_0_9065 | import numpy as np
import matplotlib.pylab as plt
import multiprocessing as mp
from matplotlib import figure
from data import *
FIG = plt.figure()
def draw_coord(coord, name, lab=[1.0, 0.0]):
color = 1.0 if lab[0] > lab[1] else -1.0
ret = np.zeros(shape=[L,L,1])
coord_x, coord_y = coord
coord_x_idx = np.argmax(coord_x)
coord_y_idx = np.argmax(coord_y)
ret[coord_x_idx][coord_y_idx][0] = color
draw(ret, name)
def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
def draw_orig(img, name):
ret = np.reshape(img, [L,L,1])
draw(ret, name)
def draw_allob(img, name, ob_prefix):
ret = np.zeros([L,L,1])
for ii in range(L):
for jj in range(L):
labb = img[ii][jj][0] - img[ii][jj][1]
ret[ii][jj][0] = labb
grn_x = []
grn_y = []
red_x = []
red_y = []
for obob in ob_prefix:
ob_c, labb = obob
if labb[0] > labb[1]:
grn_x.append(ob_c[0])
grn_y.append(ob_c[1])
else:
red_x.append(ob_c[0])
red_y.append(ob_c[1])
draw(ret, name, ((grn_y, grn_x), (red_y, red_x)))
def draw_obs(obs, name):
ret_shape = [L, L, 1]
ret = np.zeros(shape=ret_shape)
for ob, lab in obs:
ii, jj = ob
labb = 1.0 if lab[0] > lab[1] else -1.0
# labb = lab[0]
ret[ii][jj][0] = labb
draw(ret, name)
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,L])
ax.set_ylim([0,L])
ax.set_ylim(ax.get_ylim()[::-1])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
def draw_obs_trace(obs, name):
x_coords = []
y_coords = []
anno = []
for i, ob in enumerate(obs):
ob_coord, ob_outcome = ob
x_coords.append(ob_coord[0])
y_coords.append(ob_coord[1])
anno.append("O"+str(i)+str(int(ob_outcome[0])))
draw_annotate(x_coords, y_coords, anno, name)
def draw_all_preds(all_preds, name):
ret_shape = [L, L, 1]
ret = np.zeros(shape=ret_shape)
for qq, labb in all_preds:
i, j = qq
# ret[i][j][0] = 1.0 if labb[0] > labb[1] else 0.0
# ret[i][j][0] = labb[0]
ret[i][j][0] = labb[0]
draw(ret, name)
|
the-stack_0_9066 | import os
import math
import torch
import numpy as np
from PIL import Image, ImageDraw
from torch.utils.data import random_split, DataLoader
from matplotlib import pyplot as plt
from data_utils import MyTestDataset, get_test_transforms, my_collate
from conf.settings import BASE_DIR
from faster_rcnn.predict import predict as faster_predict
from yolo_v3.predict import predict as yolo_predict
from unet.predict import predict as unet_predict
models_path = os.path.join(BASE_DIR, "models")
images_path = os.path.join(BASE_DIR, "images")
if __name__ == "__main__":
torch.manual_seed(0)
from faster_rcnn.models import model as faster
from yolo_v3.models import Darknet
from unet.models import UNet
faster_name = "faster_rcnn_7_30.pt"
yolo_name = "yolo_v3_4_20.pt"
unet_name = "unet_2_15.pt"
split = "stage1_test"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Running on {device}")
print(f"Loading {faster_name}")
faster.load_state_dict(torch.load(os.path.join(models_path, faster_name), map_location=device))
faster.to(device=device)
dataset = MyTestDataset(split=split, transforms=get_test_transforms(rescale_size=(256, 256)))
faster_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False)
print(f"Loading {yolo_name}")
yolo = Darknet(os.path.join(BASE_DIR, "yolo_v3/config/yolov3-custom.cfg"))
yolo.load_state_dict(torch.load(os.path.join(models_path, yolo_name), map_location=device))
yolo.to(device=device)
dataset = MyTestDataset(split=split, transforms=get_test_transforms(rescale_size=(416, 416)))
yolo_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False)
print(f"Loading {unet_name}")
unet = UNet(n_channels=1, n_classes=1)
unet.load_state_dict(torch.load(os.path.join(models_path, unet_name), map_location=device))
unet.to(device=device)
dataset = MyTestDataset(split=split, model="unet")
unet_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False)
for i, ((f_im, f_tar), (y_im, y_tar), (u_im, u_tar)) in enumerate(zip(faster_loader, yolo_loader, unet_loader)):
name = u_tar[0]
try:
f_image, f_x, f_y = faster_predict(faster, image=f_im)
y_image, y_x, y_y = yolo_predict(yolo, image=y_im)
u_image, u_x, u_y = unet_predict(unet, image=u_im)
except:
print(f"Skipping {name}")
continue
fig = plt.figure(dpi=300)
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(f_image, cmap="gray")
ax1.plot(f_x, f_y, 'r+', linewidth=3, markersize=8)
ax1.set_title('Faster R-CNN')
ax1.axis('off')
ax2 = fig.add_subplot(1, 3, 2)
ax2.imshow(y_image, cmap="gray")
ax2.plot(y_x, y_y, 'r+', linewidth=3, markersize=8)
ax2.set_title('YOLOv3')
ax2.axis('off')
ax3 = fig.add_subplot(1, 3, 3)
ax3.imshow(u_image, cmap="gray")
ax3.plot(u_x, u_y, 'r+', linewidth=3, markersize=8)
ax3.set_title('U-Net')
ax3.axis('off')
plt.tight_layout()
plt.show()
# os.makedirs(os.path.join(images_path, f"all/{name}"), exist_ok=True)
#
# fig = plt.figure(dpi=200)
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.imshow(f_image, cmap="gray")
# ax1.plot(f_x, f_y, 'r+', linewidth=3, markersize=12)
# ax1.axis('off')
# # plt.show()
# plt.savefig(os.path.join(images_path, f"all/{name}/f_det_{i}.png"), dpi=200)
#
# fig = plt.figure(dpi=200)
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.imshow(y_image, cmap="gray")
# ax1.plot(y_x, y_y, 'r+', linewidth=3, markersize=12)
# ax1.axis('off')
# # plt.show()
# plt.savefig(os.path.join(images_path, f"all/{name}/y_det_{i}.png"), dpi=200)
#
# fig = plt.figure(dpi=200)
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.imshow(u_image, cmap="gray")
# ax1.plot(u_x, u_y, 'r+', linewidth=3, markersize=12)
# ax1.axis('off')
# # plt.show()
# plt.savefig(os.path.join(images_path, f"all/{name}/u_det_{i}.png"), dpi=200)
#
# plt.close('all')
print(name)
break
|
the-stack_0_9067 | # -*- coding: utf-8 -*-
"""Redundancy."""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "redundancy.wallace"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["rectangular", ["rectangular in shape"]],
["audible", ["audible to the ear"]],
]
return preferred_forms_check(text, redundancies, err, msg)
@memoize
def check_garner(text):
"""Suggest the preferred forms.
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
"""
err = "redundancy.garner"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["adequate", ["adequate enough"]],
["admitted", ["self-admitted"]],
["affidavit", ["sworn affidavit"]],
["agreement", ["mutual agreement"]],
["alumnus", ["former alumnus"]],
["antithetical", ["directly antithetical"]],
["approximately", ["approximately about"]],
["associate", ["associate together(?: in groups)?"]],
["bivouac", ["temporary bivouac", "bivouac camp"]],
["blend", ["blend together"]],
["but", ["but nevertheless"]],
["charged with...", ["accused of a charge"]],
["circumstances of", ["circumstances surrounding"]],
["circumstances", ["surrounding circumstances"]],
["close", ["close proximity"]],
["collaborate", ["collaborate together"]],
["collaborator", ["fellow collaborator"]],
["collaborators", ["fellow collaborators"]],
["collocated", ["collocated together"]],
["colleagues", ["fellow colleagues"]],
["combine", ["combine together"]],
["complacent", ["self-complacent"]],
["confessed", ["self-confessed"]],
["connect", ["connect together"]],
["consensus", ["(?:general )?consensus of opinion"]],
["consolidate", ["consolidate together"]],
["continues to", ["still continues to"]],
["contradictory", ["mutually contradictory"]],
["cooperation", ["mutual cooperation"]],
["couple", ["couple together"]],
["crisis", ["serious crisis"]],
["eliminate", ["entirely eliminate"]],
["especially", ["most especially"]],
["fact", ["actual fact"]],
["facts", ["true facts"]],
["forecast", ["future forecast"]],
["founding fathers", ["founding forefathers"]],
["free", ["free and gratis"]],
["free", ["free gratis"]],
["full", ["completely full"]],
["fundamentals", ["basic fundamentals"]],
["gift", ["free gift"]],
["innovation", ["new innovation"]],
["interact", ["interact with each other"]],
["large", ["large-size"]],
["meld", ["meld together"]],
["merge", ["merge together"]],
["mingle", ["mingle together"]],
["mix", ["mix together"]],
["mutual feelings", ["mutual feelings for eachother"]],
["mutual respect", ["mutual respect for each other"]],
["native citizen", ["native-born citizen"]],
["necessity", ["absolute necessity"]],
["obvious", ["blatantly obvious"]],
["pause", ["pause for a moment"]],
["planning", ["advance planning"]],
["plans", ["future plans"]],
["pooled", ["pooled together"]],
["potable water", ["potable drinking water"]],
["potable water", ["potable drinking water"]],
["recruit", ["new recruit"]],
["reelected", ["reelected for another term"]],
["refer", ["refer back"]],
["regress", ["regress back"]],
["repay them", ["repay them back"]],
["repay", ["repay back"]],
["repeat", ["repeat again"]],
["repeat", ["repeat back"]],
["repeat", ["repeat the same"]],
["repeated", ["repeated the same"]],
["reprieve", ["temporary reprieve"]],
["respite", ["brief respite"]],
["retirement", ["retiral", "retiracy"]],
["retreat", ["retreat back"]],
["return", ["return back"]],
["scrutinize", ["closely scrutinize"]],
["software", ["software program"]],
["surrounded", ["surrounded on all sides"]],
["the nation", ["the whole entire nation"]],
["throughout the", ["throughout the entire"]],
["timpani", ["timpani drum"]],
["twins", ["pair of twins"]],
["vacancy", ["unfilled vacancy"]],
["various", ["various different"]],
["veteran", ["former veteran"]],
["visible", ["visible to the eye"]],
["vocation", ["professional vocation"]],
["while", ["while at the same time"]],
]
return preferred_forms_check(text, redundancies, err, msg)
@memoize
def check_nordquist(text):
"""Suggest the preferred forms.
source: Richard Nordquist
source_url: http://grammar.about.com/bio/Richard-Nordquist-22176.htm
"""
err = "redundancy.nordquist"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["essential", ["absolutely essential"]],
["necessary", ["absolutely necessary"]],
["a.m.", ["a.m. in the morning"]],
["p.m.", ["p.m. at night"]],
]
return preferred_forms_check(text, redundancies, err, msg)
@memoize
def check_atd(text):
"""Check for redundancies from After the Deadline."""
err = "after_the_deadline.redundancy"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
[u"Bō", ["Bo Staff"]],
["Challah", ["Challah bread"]],
["Hallah", ["Hallah bread"]],
["Challah", ["Challah bread"]],
["I", ["I myself", "I personally"]],
["Mount Fuji", ["Mount Fujiyama"]],
["Milky Way", ["Milky Way galaxy"]],
["Rio Grande", ["Rio Grande river"]],
["adage", ["old adage"]],
["add", ["add a further", "add an additional"]],
["advance", ["advance forward"]],
["alternative", ["alternative choice"]],
["amaretto", ["amaretto almond"]],
["annihilate", ["completely annihilate"]],
["anniversary", ["annual anniversary"]],
["anonymous", ["unnamed anonymous"]],
["as", ["equally as"]],
["ascend", ["ascend up"]],
["ask", ["ask the question"]],
["assemble", ["assemble together"]],
["at present the", ["at the present time the"]],
["at this point", ["at this point in time"]],
["attach", ["attach together"]],
["autumn", ["autumn season"]],
["bald", ["bald-headed"]],
["balsa", ["balsa wood"]],
["belongings", ["personal belongings"]],
["benefits", ["desirable benefits"]],
["bento", ["bento box"]],
["best", ["best ever"]],
["bit", ["tiny bit"]],
["blend", ["blend together"]],
["bond", ["common bond"]],
["bonus", ["added bonus", "extra bonus"]],
["bouquet", ["bouquet of flowers"]],
["breakthrough", ["major breakthrough"]],
["bride", ["new bride"]],
["brief", ["brief in duration"]],
["bruin", ["bruin bear"]],
["hot", ["burning hot"]],
["cacophony", ["cacophony of sound"]],
["cameo", ["brief cameo", "cameo appearance"]],
["cancel", ["cancel out"]],
["cash", ["cash money"]],
["chai", ["chai tea"]],
["chance", ["random chance"]],
["charm", ["personal charm"]],
["circle", ["circle around", "round circle"]],
["circulate", ["circulate around"]],
["classify", ["classify into groups"]],
["classmates", ["fellow classmates"]],
["cliche", ["old cliche", "overused cliche"]],
["climb", ["climb up"]],
["clock", ["time clock"]],
["collaborate", ["collaborate together"]],
["collaboration", ["joint collaboration"]],
["colleague", ["fellow colleague"]],
["combine", ["combine together"]],
["commute", ["commute back and forth"]],
["compete", ["compete with each other"]],
["comprise", ["comprise of"]],
["comprises", ["comprises of"]],
["conceived", ["first conceived"]],
["conclusion", ["final conclusion"]],
["confer", ["confer together"]],
["confrontation", ["direct confrontation"]],
# ["confused", ["confused state"]],
["connect", ["connect together", "connect up"]],
["consensus", ["consensus of opinion", "general consensus"]],
["consult", ["consult with"]],
["conversation", ["oral conversation"]],
["cool", ["cool down"]],
["cooperate", ["cooperate together"]],
["cooperation", ["mutual cooperation"]],
["copy", ["duplicate copy"]],
["core", ["inner core"]],
["cost", ["cost the sum of"]],
["could", ["could possibly"]],
["coupon", ["money-saving coupon"]],
["created", ["originally created"]],
["crisis", ["crisis situation"]],
["crouch", ["crouch down"]],
["currently", ["now currently"]],
["custom", ["old custom", "usual custom"]],
["danger", ["serious danger"]],
["dates", ["dates back"]],
["decision", ["definite decision"]],
["depreciate", ["depreciate in value"]],
["descend", ["descend down"]],
["destroy", ["totally destroy"]],
["destroyed", ["completely destroyed"]],
["destruction", ["total destruction"]],
["details", ["specific details"]],
["dilemma", ["difficult dilemma"]],
["disappear", ["disappear from sight"]],
["discovered", ["originally discovered"]],
["dive", ["dive down"]],
["done", ["over and done with"]],
["drawing", ["illustrated drawing"]],
["drop", ["drop down"]],
["dune", ["sand dune"]],
["during", ["during the course of"]],
["dwindle", ["dwindle down"]],
["dwindled", ["dwindled down"]],
["every", ["each and every"]],
["earlier", ["earlier in time"]],
["eliminate", ["completely eliminate", "eliminate altogether",
"entirely eliminate"]],
["ember", ["glowing ember"]],
["embers", ["burning embers"]],
["emergency", ["emergency situation", "unexpected emergency"]],
["empty", ["empty out"]],
["enclosed", ["enclosed herein"]],
["end", ["final end"]],
["engulfed", ["completely engulfed"]],
["enter", ["enter in", "enter into"]],
["equal", ["equal to one another"]],
["eradicate", ["eradicate completely"]],
["essential", ["absolutely essential"]],
["estimated at", ["estimated at about",
"estimated at approximately",
"estimated at around"]],
["etc.", ["and etc."]],
["evolve", ["evolve over time"]],
["exaggerate", ["over exaggerate"]],
["exited", ["exited from"]],
["experience", ["actual experience", "past experience"]],
["experts", ["knowledgeable experts"]],
["extradite", ["extradite back"]],
["face the consequences", ["face up to the consequences"]],
["face the fact", ["face up to the fact"]],
["face the challenge", ["face up to the challenge"]],
["face the problem", ["face up to the problem"]],
["facilitate", ["facilitate easier"]],
["fact", ["established fact"]],
["facts", ["actual facts", "hard facts", "true facts"]],
["fad", ["passing fad"]],
["fall", ["fall down"]],
["fall", ["fall season"]],
["feat", ["major feat"]],
["feel", ["feel inside"]],
["feelings", ["inner feelings"]],
["few", ["few in number"]],
["filled", ["completely filled", "filled to capacity"]],
["first", ["first of all"]],
["first time", ["first time ever"]],
["fist", ["closed fist"]],
["fly", ["fly through the air"]],
["focus", ["focus in", "main focus"]],
["follow", ["follow after"]],
["for example", ["as for example"]],
# ["foremost", ["first and foremost"]],
["forever", ["forever and ever"]],
["free", ["for free"]],
["friend", ["personal friend"]],
["friendship", ["personal friendship"]],
["full", ["full to capacity"]],
["fundamentals", ["basic fundamentals"]],
["fuse", ["fuse together"]],
["gather", ["gather together", "gather up"]],
["get up", ["get up on his feet", "get up on your feet"]],
["gift", ["free gift"]],
["gifts", ["free gifts"]],
["goal", ["ultimate goal"]],
# ["graduate", ["former graduate"]],
["grow", ["grow in size"]],
["guarantee", ["absolute guarantee"]],
["gunman", ["armed gunman"]],
["gunmen", ["armed gunmen"]],
["habitat", ["native habitat"]],
["had done", ["had done previously"]],
["halves", ["two equal halves"]],
# ["has", ["has got"]],
# ["have", ["have got"]],
["haven", ["safe haven"]],
# ["he", ["he himself"]],
["heat", ["heat up"]],
["history", ["past history"]],
["hoist", ["hoist up"]],
["hole", ["empty hole"]],
["honcho", ["head honcho"]],
["ice", ["frozen ice"]],
["ideal", ["perfect ideal"]],
["identical", ["same identical"]],
["identification", ["positive identification"]],
["imports", ["foreign imports"]],
["impulse", ["sudden impulse"]],
["in fact", ["in actual fact"]],
["in the yard", ["outside in the yard"]],
["inclusive", ["all inclusive"]],
["incredible", ["incredible to believe"]],
["incumbent", ["present incumbent"]],
# ["indicted", ["indicted on a charge"]],
["industry", ["private industry"]],
["injuries", ["harmful injuries"]],
["innovation", ["new innovation"]],
["innovative", ["innovative new", "new innovative"]],
# ["input", ["input into"]],
["instinct", ["natural instinct", "naturally instinct"]],
["integrate", ["integrate together",
"integrate with each other"]],
["interdependent", ["interdependent on each other",
"mutually interdependent"]],
["introduced", ["introduced for the first time"]],
["invention", ["new invention"]],
["kneel", ["kneel down"]],
["knots", ["knots per hour"]],
# ["last", ["last of all"]],
# ["later", ["later time"]],
["lift", ["lift up"]],
["lingers", ["still lingers"]],
["look to the future", ["look ahead to the future"]],
["love triangle", ["three-way love triangle"]],
["maintained", ["constantly maintained"]],
["manually", ["manually by hand"]],
["marina", ["boat marina"]],
["may", ["may possibly"]],
["meet", ["meet together", "meet with each other"]],
["memories", ["past memories"]],
["merge", ["merge together"]],
["merged", ["merged together"]],
["meshed", ["meshed together"]],
["midnight", ["twelve midnight"]],
["migraine", ["migraine headache"]],
["minestrone", ["minestrone soup"]],
["mix", ["mix together"]],
["moment", ["brief moment", "moment in time"]],
["monopoly", ["complete monopoly"]],
["mural", ["wall mural"]],
["mutual respect", ["mutual respect for each other"]],
["mutually dependent", ["mutually dependent on each other"]],
["mystery", ["unsolved mystery"]],
# ["naked", ["bare naked"]],
["nape", ["nape of her neck"]],
["necessary", ["absolutely necessary"]],
["never", ["never at any time"]],
["noon", ["12 noon", "12 o'clock noon", "high noon",
"twelve noon"]],
["nostalgia", ["nostalgia for the past"]],
["number of", ["number of different"]],
["opening", ["exposed opening"]],
["my opinion", ["my personal opinion"]],
["opposites", ["exact opposites", "polar opposites"]],
["opposite", ["exact opposite", "polar opposite"]],
["orbits", ["orbits around"]],
["outcome", ["final outcome"]],
["panacea", ["universal panacea"]],
["pending", ["now pending"]],
["penetrate", ["penetrate through"]],
["persists", ["still persists"]],
["pioneer", ["old pioneer"]],
["plan", ["plan ahead", "plan in advance",
"proposed plan"]],
["planning", ["advance planning", "forward planning"]],
["plans", ["future plans"]],
["plan", ["future plan"]],
["point", ["point in time"]],
["point", ["sharp point"]],
["postpone", ["postpone until later"]],
["pouring rain", ["pouring down rain"]],
["preview", ["advance preview"]],
["previously listed", ["previously listed above"]],
["probed", ["probed into"]],
["proceed", ["proceed ahead"]],
["prosthesis", ["artificial prosthesis"]],
# ["protrude", ["protrude out"]],
["proverb", ["old proverb"]],
# ["proximity", ["close proximity"]],
["put off", ["put off until later"]],
# ["raise", ["raise up"]],
["re-elect", ["re-elect for another term"]],
["reason is", ["reason is because"]],
["recur", ["recur again"]],
["recurrence", ["future recurrence"]],
["refer", ["refer back"]],
["reflect", ["reflect back"]],
# ["relevant", ["highly relevant"]],
["remain", ["continue to remain"]],
["remains", ["still remains"]],
["replica", ["exact replica"]],
["reply", ["reply back"]],
# ["requirements", ["necessary requirements"]],
["reservations", ["advance reservations"]],
["retreat", ["retreat back"]],
["revert", ["revert back"]],
["round", ["round in shape"]],
["rule of thumb", ["rough rule of thumb"]],
["rumor", ["unconfirmed rumor"]],
["rustic", ["rustic country"]],
["same", ["exact same", "precise same", "same exact"]],
["sanctuary", ["safe sanctuary"]],
["satisfaction", ["full satisfaction"]],
["scrutinize", ["scrutinize in detail"]],
["scrutiny", ["careful scrutiny", "close scrutiny"]],
["secret", ["secret that cannot be told"]],
["seek", ["seek to find"]],
["separated", ["separated apart from each other"]],
["share", ["share together"]],
["shiny", ["shiny in appearance"]],
["sincere", ["truly sincere"]],
["sink", ["sink down"]],
["skipped", ["skipped over"]],
# ["slow", ["slow speed"]],
# ["small", ["small size"]],
["soft", ["soft in texture", "soft to the touch"]],
["sole", ["sole of the foot"]],
["some time", ["some time to come"]],
["speck", ["small speck"]],
["speed", ["rate of speed"]],
["spell out", ["spell out in detail"]],
["spiked", ["spiked upward", "spiked upwards"]],
["spring", ["spring season"]],
["stranger", ["anonymous stranger"]],
["studio audience", ["live studio audience"]],
["subway", ["underground subway"]],
["sufficient", ["sufficient enough"]],
["summer", ["summer season"]],
["sure", ["absolutely sure"]],
["surprise", ["unexpected surprise"]],
["surround", ["completely surround"]],
["surrounded", ["surrounded on all sides"]],
["tall", ["tall in height", "tall in stature"]],
["telepathy", ["mental telepathy"]],
["ten", ["ten in number"]],
["these", ["these ones"]],
# ["they", ["they themselves"]],
["those", ["those ones"]],
["trench", ["open trench"]],
["truth", ["honest truth"]],
["tundra", ["frozen tundra"]],
["ultimatum", ["final ultimatum"]],
# ["undeniable", ["undeniable truth"]],
["undergraduate", ["undergraduate student"]],
# ["unintentional", ["unintentional mistake"]],
["vacillate", ["vacillate back and forth"]],
["veteran", ["former veteran"]],
["visible", ["visible to the eye"]],
["warn", ["warn in advance"]],
["warning", ["advance warning"]],
["water heater", ["hot water heater"]],
["in which we live", ["in which we live in"]],
["winter", ["winter season"]],
["witness", ["live witness"]],
["yakitori", ["yakitori chicken"]],
["yerba mate", ["yerba mate tea"]],
["yes", ["affirmative yes"]],
]
return preferred_forms_check(text, redundancies, err, msg)
|
the-stack_0_9068 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPhilentropy(RPackage):
"""Similarity and Distance Quantification Between Probability Functions.
Computes 46 optimized distance and similarity measures for comparing
probability functions (Drost (2018) <doi:10.21105/joss.00765>). These
comparisons between probability functions have their foundations in a broad
range of scientific disciplines from mathematics to ecology. The aim of
this package is to provide a core framework for clustering, classification,
statistical inference, goodness-of-fit, non-parametric statistics,
information theory, and machine learning tasks that are based on comparing
univariate or multivariate probability functions."""
cran = "philentropy"
version('0.5.0', sha256='b39e9a825458f3377e23b2a133180566780e89019e9d22a6a5b7ca87c49c412f')
version('0.4.0', sha256='bfd30bf5635aab6a82716299a87d44cf96c7ab7f4ee069843869bcc85c357127')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-dplyr', type=('build', 'run'))
depends_on('r-kernsmooth', type=('build', 'run'))
|
the-stack_0_9069 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""misc helper functions for pyLSV2"""
import struct
from datetime import datetime
def decode_system_parameters(result_set):
"""decode the result system parameter query
:param tuple result_set: bytes returned by the system parameter query command R_PR
:returns: dictionary with system parameter values
:rtype: dict
"""
message_length = len(result_set)
info_list = list()
# as per comment in eclipse plugin, there might be a difference between a programming station and a real machine
if message_length == 120:
info_list = struct.unpack('!14L8B8L2BH4B2L2HL', result_set)
elif message_length == 124:
info_list = struct.unpack('!14L8B8L2BH4B2L2HLL', result_set)
else:
raise ValueError('unexpected length {} of message content {}'.format(
message_length, result_set))
sys_par = dict()
sys_par['Marker_Start'] = info_list[0]
sys_par['Markers'] = info_list[1]
sys_par['Input_Start'] = info_list[2]
sys_par['Inputs'] = info_list[3]
sys_par['Output_Start'] = info_list[4]
sys_par['Outputs'] = info_list[5]
sys_par['Counter_Start'] = info_list[6]
sys_par['Counters'] = info_list[7]
sys_par['Timer_Start'] = info_list[8]
sys_par['Timers'] = info_list[9]
sys_par['Word_Start'] = info_list[10]
sys_par['Words'] = info_list[11]
sys_par['String_Start'] = info_list[12]
sys_par['Strings'] = info_list[13]
sys_par['String_Length'] = info_list[14]
sys_par['Input_Word_Start'] = info_list[22]
sys_par['Input Words'] = info_list[23]
sys_par['Output_Word_Start'] = info_list[24]
sys_par['Output_Words'] = info_list[25]
sys_par['LSV2_Version'] = info_list[30]
sys_par['LSV2_Version_Flags'] = info_list[31]
sys_par['Max_Block_Length'] = info_list[32]
sys_par['HDH_Bin_Version'] = info_list[33]
sys_par['HDH_Bin_Revision'] = info_list[34]
sys_par['ISO_Bin_Version'] = info_list[35]
sys_par['ISO_Bin_Revision'] = info_list[36]
sys_par['HardwareVersion'] = info_list[37]
sys_par['LSV2_Version_Flags_Ex'] = info_list[38]
sys_par['Max_Trace_Line'] = info_list[39]
sys_par['Scope_Channels'] = info_list[40]
sys_par['PW_Encryption_Key'] = info_list[41]
return sys_par
def decode_file_system_info(data_set):
"""decode result from file system entry
:param tuple result_set: bytes returned by the system parameter query command R_FI or CR_DR
:returns: dictionary with file system entry parameters
:rtype: dict
"""
file_info = dict()
file_info['Size'] = struct.unpack('!L', data_set[:4])[0]
file_info['Timestamp'] = datetime.fromtimestamp(struct.unpack('!L', data_set[4:8])[0])
arrtibutes = struct.unpack('!L', data_set[8:12])[0]
file_info['Attributs'] = arrtibutes
file_info['is_file'] = False
file_info['is_directory'] = False
file_info['is_drive'] = False
if arrtibutes > 0:
if bool(arrtibutes & 0x10):
file_info['is_drive'] = True
elif bool(arrtibutes & 0x20):
file_info['is_directory'] = True
else:
file_info['is_file'] = True
file_info['is_write_protected'] = bool(arrtibutes & 0x40)
file_info['Name'] = data_set[12:].decode().strip('\x00').replace('\\', '/')
return file_info
def decode_directory_info(data_set):
"""decode result from directory entry
:param tuple result_set: bytes returned by the system parameter query command R_DI
:returns: dictionary with file system entry parameters
:rtype: dict
"""
dir_info = dict()
dir_info['Free Size'] = struct.unpack('!L', data_set[:4])[0]
attribute_list = list()
for i in range(4, len(data_set[4:132]), 4):
attr = data_set[i:i + 4].decode().strip('\x00')
if len(attr) > 0:
attribute_list.append(attr)
dir_info['Dir_Attributs'] = attribute_list
dir_info['Attributs'] = struct.unpack('!32B', data_set[132:164])
dir_info['Path'] = data_set[164:].decode().strip('\x00').replace('\\', '/')
return dir_info
def decode_tool_information(data_set):
"""decode result from tool info
:param tuple result_set: bytes returned by the system parameter query command R_RI for tool info
:returns: dictionary with tool info values
:rtype: dict
"""
tool_info = dict()
tool_info['Number'] = struct.unpack('!L', data_set[0:4])[0]
tool_info['Index'] = struct.unpack('!H', data_set[4:6])[0]
tool_info['Axis'] = {0: 'X', 1: 'Y', 2: 'Z'}.get(struct.unpack('!H', data_set[6:8])[0], 'unknown')
tool_info['Length'] = struct.unpack('<d', data_set[8:16])[0]
tool_info['Radius'] = struct.unpack('<d', data_set[16:24])[0]
return tool_info
def decode_override_information(data_set):
"""decode result from override info
:param tuple result_set: bytes returned by the system parameter query command R_RI for override info
:returns: dictionary with override info values
:rtype: dict
"""
override_info = dict()
override_info['Feed_override']=struct.unpack('!L', data_set[0:4])[0]/100
override_info['Speed_override']=struct.unpack('!L', data_set[4:8])[0]/100
override_info['Rapid_override']=struct.unpack('!L', data_set[8:12])[0]/100
return override_info
|
the-stack_0_9070 | import pytest
from django.db import transaction
from django.db.utils import IntegrityError
from psqlextra.fields import HStoreField
from . import migrations
from .util import get_fake_model
def test_migration_create_drop_model():
"""Tests whether indexes are properly created
and dropped when creating and dropping a model."""
uniqueness = ["beer", "cookies"]
test = migrations.create_drop_model(
HStoreField(uniqueness=uniqueness), ["CREATE UNIQUE", "DROP INDEX"]
)
with test as calls:
assert len(calls["CREATE UNIQUE"]) == len(uniqueness)
assert len(calls["DROP INDEX"]) == len(uniqueness)
def test_migration_alter_db_table():
"""Tests whether indexes are renamed properly
when renaming the database table."""
test = migrations.alter_db_table(
HStoreField(uniqueness=["beer", "cookie"]),
["RENAME TO", "CREATE INDEX", "DROP INDEX"],
)
with test as calls:
# 1 rename for table, 2 for hstore keys
assert len(calls["RENAME TO"]) == 3
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 0
def test_add_field():
"""Tests whether adding a field properly
creates the indexes."""
test = migrations.add_field(
HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"]
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 1
assert len(calls.get("DROP INDEX", [])) == 0
def test_remove_field():
"""Tests whether removing a field properly
removes the index."""
test = migrations.remove_field(
HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"]
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 1
def test_alter_field_nothing():
"""Tests whether no indexes are dropped when not
changing anything in the uniqueness."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=["beer"]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 0
def test_alter_field_add():
"""Tests whether only one index is created when
adding another key to the uniqueness."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=["beer", "beer1"]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 1
assert len(calls.get("DROP INDEX", [])) == 0
def test_alter_field_remove():
"""Tests whether one index is dropped when removing
a key from uniqueness."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=[]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 1
def test_alter_field_add_together():
"""Tests whether adding one index is created
when adding a "unique together"."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=["beer", ("beer1", "beer2")]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 1
assert len(calls.get("DROP INDEX", [])) == 0
def test_alter_field_remove_together():
"""Tests whether adding one index is dropped
when adding a "unique together"."""
test = migrations.alter_field(
HStoreField(uniqueness=[("beer1", "beer2")]),
HStoreField(uniqueness=[]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 1
def test_rename_field():
"""Tests whether renaming a field doesn't
cause the index to be re-created."""
test = migrations.rename_field(
HStoreField(uniqueness=["beer", "cookies"]),
["RENAME TO", "CREATE INDEX", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("RENAME TO", [])) == 2
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 0
def test_enforcement():
"""Tests whether the constraints are actually
properly enforced."""
model = get_fake_model({"title": HStoreField(uniqueness=["en"])})
# should pass, table is empty and 'ar' does not have to be unique
model.objects.create(title={"en": "unique", "ar": "notunique"})
model.objects.create(title={"en": "elseunique", "ar": "notunique"})
# this should fail, key 'en' must be unique
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(title={"en": "unique", "ar": "notunique"})
def test_enforcement_together():
"""Tests whether unique_together style constraints are
enforced properly."""
model = get_fake_model({"title": HStoreField(uniqueness=[("en", "ar")])})
model.objects.create(title={"en": "unique", "ar": "notunique"})
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(title={"en": "unique", "ar": "notunique"})
model.objects.create(title={"en": "notunique", "ar": "unique"})
|
the-stack_0_9071 | from json import (
JSONDecodeError,
)
import logging
import os
from pathlib import (
Path,
)
import socket
import sys
import threading
from types import (
TracebackType,
)
from typing import (
Any,
Type,
)
from web3._utils.threads import (
Timeout,
)
from web3.types import (
RPCEndpoint,
RPCResponse,
)
from .base import (
JSONBaseProvider,
)
def get_ipc_socket(ipc_path: str, timeout: float=0.1) -> socket.socket:
if sys.platform == 'win32':
# On Windows named pipe is used. Simulate socket with it.
from web3._utils.windows import NamedPipe
return NamedPipe(ipc_path)
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
return sock
class PersistantSocket:
sock = None
def __init__(self, ipc_path: str) -> None:
self.ipc_path = ipc_path
def __enter__(self) -> socket.socket:
if not self.ipc_path:
raise FileNotFoundError("cannot connect to IPC socket at path: %r" % self.ipc_path)
if not self.sock:
self.sock = self._open()
return self.sock
def __exit__(
self, exc_type: Type[BaseException], exc_value: BaseException, traceback: TracebackType
) -> None:
# only close the socket if there was an error
if exc_value is not None:
try:
self.sock.close()
except Exception:
pass
self.sock = None
def _open(self) -> socket.socket:
return get_ipc_socket(self.ipc_path)
def reset(self) -> socket.socket:
self.sock.close()
self.sock = self._open()
return self.sock
# type ignored b/c missing return statement is by design here
def get_default_ipc_path() -> str: # type: ignore
if sys.platform == 'darwin':
ipc_path = os.path.expanduser(os.path.join(
"~",
"Library",
"Ethereum",
"geth.ipc"
))
if os.path.exists(ipc_path):
return ipc_path
ipc_path = os.path.expanduser(os.path.join(
"~",
"Library",
"Application Support",
"io.parity.ethereum",
"jsonrpc.ipc"
))
if os.path.exists(ipc_path):
return ipc_path
base_trinity_path = Path('~').expanduser() / '.local' / 'share' / 'trinity'
ipc_path = str(base_trinity_path / 'mainnet' / 'jsonrpc.ipc')
if Path(ipc_path).exists():
return str(ipc_path)
elif sys.platform.startswith('linux') or sys.platform.startswith('freebsd'):
ipc_path = os.path.expanduser(os.path.join(
"~",
".ethereum",
"geth.ipc"
))
if os.path.exists(ipc_path):
return ipc_path
ipc_path = os.path.expanduser(os.path.join(
"~",
".local",
"share",
"io.parity.ethereum",
"jsonrpc.ipc"
))
if os.path.exists(ipc_path):
return ipc_path
base_trinity_path = Path('~').expanduser() / '.local' / 'share' / 'trinity'
ipc_path = str(base_trinity_path / 'mainnet' / 'jsonrpc.ipc')
if Path(ipc_path).exists():
return str(ipc_path)
elif sys.platform == 'win32':
ipc_path = os.path.join(
"\\\\",
".",
"pipe",
"geth.ipc"
)
if os.path.exists(ipc_path):
return ipc_path
ipc_path = os.path.join(
"\\\\",
".",
"pipe",
"jsonrpc.ipc"
)
if os.path.exists(ipc_path):
return ipc_path
else:
raise ValueError(
"Unsupported platform '{0}'. Only darwin/linux/win32/freebsd are "
"supported. You must specify the ipc_path".format(sys.platform)
)
# type ignored b/c missing return statement is by design here
def get_dev_ipc_path() -> str: # type: ignore
if sys.platform == 'darwin':
tmpdir = os.environ.get('TMPDIR', '')
ipc_path = os.path.expanduser(os.path.join(
tmpdir,
"geth.ipc"
))
if os.path.exists(ipc_path):
return ipc_path
elif sys.platform.startswith('linux') or sys.platform.startswith('freebsd'):
ipc_path = os.path.expanduser(os.path.join(
"/tmp",
"geth.ipc"
))
if os.path.exists(ipc_path):
return ipc_path
elif sys.platform == 'win32':
ipc_path = os.path.join(
"\\\\",
".",
"pipe",
"geth.ipc"
)
if os.path.exists(ipc_path):
return ipc_path
ipc_path = os.path.join(
"\\\\",
".",
"pipe",
"jsonrpc.ipc"
)
if os.path.exists(ipc_path):
return ipc_path
else:
raise ValueError(
"Unsupported platform '{0}'. Only darwin/linux/win32/freebsd are "
"supported. You must specify the ipc_path".format(sys.platform)
)
class IPCProvider(JSONBaseProvider):
logger = logging.getLogger("web3.providers.IPCProvider")
_socket = None
def __init__(self, ipc_path: str=None, timeout: int=10, *args: Any, **kwargs: Any) -> None:
if ipc_path is None:
self.ipc_path = get_default_ipc_path()
elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):
self.ipc_path = str(Path(ipc_path).expanduser().resolve())
else:
raise TypeError("ipc_path must be of type string or pathlib.Path")
self.timeout = timeout
self._lock = threading.Lock()
self._socket = PersistantSocket(self.ipc_path)
super().__init__()
def __str__(self) -> str:
return f"<{self.__class__.__name__} {self.ipc_path}>"
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
self.logger.debug("Making request IPC. Path: %s, Method: %s",
self.ipc_path, method)
request = self.encode_rpc_request(method, params)
with self._lock, self._socket as sock:
try:
sock.sendall(request)
except BrokenPipeError:
# one extra attempt, then give up
sock = self._socket.reset()
sock.sendall(request)
raw_response = b""
with Timeout(self.timeout) as timeout:
while True:
try:
raw_response += sock.recv(4096)
except socket.timeout:
timeout.sleep(0)
continue
if raw_response == b"":
timeout.sleep(0)
elif has_valid_json_rpc_ending(raw_response):
try:
response = self.decode_rpc_response(raw_response)
except JSONDecodeError:
timeout.sleep(0)
continue
else:
return response
else:
timeout.sleep(0)
continue
# A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification
def has_valid_json_rpc_ending(raw_response: bytes) -> bool:
stripped_raw_response = raw_response.rstrip()
for valid_ending in [b"}", b"]"]:
if stripped_raw_response.endswith(valid_ending):
return True
else:
return False
|
the-stack_0_9072 |
# Thsi is my python cheet sheet
# Basic vector maths
def SumVector(a,b):
sum = [(a[0]+b[0]),(a[1]+b[1])]
return sum
def ProVector(a,s):
pro = [(s*a[0]),(s*a[1])]
return pro
a = [-1,2]
b = [4,5]
s = 10
print(f"Sum of a and b is:{SumVector(a,b)}")
print(f"Product of a and s is:{ProVector(a,s)}")
# Introduction to python dictionaries
prices = {'apple':4.99,'orange':3.99,'banana':2.99}
askf = input('apple , orange or banana ')
print(f'the price of {askf} is {prices[askf.lower()]}')
|
the-stack_0_9073 | # -*- coding: utf-8 -*-
# Copyright 2015 Fanficdownloader team, 2019 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re
import codecs
# py2 vs py3 transition
from . import six
from .six.moves import configparser
from .six.moves.configparser import DEFAULTSECT, MissingSectionHeaderError, ParsingError
if six.PY2:
ConfigParser = configparser.SafeConfigParser
else: # PY3
ConfigParser = configparser.ConfigParser
from .six.moves import urllib
from .six.moves.urllib.parse import (urlencode, quote_plus)
from .six.moves.urllib.request import (build_opener, HTTPCookieProcessor, Request)
from .six.moves.urllib.error import HTTPError
from .six.moves import http_cookiejar as cl
from .six import text_type as unicode
from .six import string_types as basestring
from .six import ensure_binary, ensure_text
import time
import logging
import sys
import pickle
from . import exceptions
logger = logging.getLogger(__name__)
try:
import chardet
except ImportError:
chardet = None
from .gziphttp import GZipProcessor
from .htmlcleanup import reduce_zalgo
# All of the writers(epub,html,txt) and adapters(ffnet,twlt,etc)
# inherit from Configurable. The config file(s) uses ini format:
# [sections] with key:value settings.
#
# [defaults]
# titlepage_entries: category,genre, status
# [www.whofic.com]
# titlepage_entries: category,genre, status,dateUpdated,rating
# [epub]
# titlepage_entries: category,genre, status,datePublished,dateUpdated,dateCreated
# [www.whofic.com:epub]
# titlepage_entries: category,genre, status,datePublished
# [overrides]
# titlepage_entries: category
# Work around for fact that py3 apparently doesn't allow/ignore
# recursive imports like py2 does.
try:
from . import adapters
except ImportError:
import sys
if "fanficfare.adapters" in sys.modules:
adapters = sys.modules["fanficfare.adapters"]
elif "calibre_plugins.fanficfare_plugin.fanficfare.adapters" in sys.modules:
adapters = sys.modules["calibre_plugins.fanficfare_plugin.fanficfare.adapters"]
def re_compile(regex,line):
try:
return re.compile(regex,re.DOTALL)
except Exception as e:
raise exceptions.RegularExpresssionFailed(e,regex,line)
# fall back labels.
titleLabels = {
'category':'Category',
'genre':'Genre',
'language':'Language',
'status':'Status',
'series':'Series',
'characters':'Characters',
'ships':'Relationships',
'datePublished':'Published',
'dateUpdated':'Updated',
'dateCreated':'Packaged',
'rating':'Rating',
'warnings':'Warnings',
'numChapters':'Chapters',
'numWords':'Words',
'words_added':'Words Added', # logpage only
'site':'Site',
'publisher':'Publisher',
'storyId':'Story ID',
'authorId':'Author ID',
'extratags':'Extra Tags',
'title':'Title',
'storyUrl':'Story URL',
'sectionUrl':'Story URL Section',
'description':'Summary',
'author':'Author',
'authorUrl':'Author URL',
'formatname':'File Format',
'formatext':'File Extension',
'siteabbrev':'Site Abbrev',
'version':'Downloader Version'
}
formatsections = ['html','txt','epub','mobi']
othersections = ['defaults','overrides']
def get_valid_sections():
sites = adapters.getConfigSections()
sitesections = list(othersections)
for section in sites:
sitesections.append(section)
# also allows [www.base_efiction] and [www.base_xenforoforum]. Not
# likely to matter.
if section.startswith('www.'):
# add w/o www if has www
sitesections.append(section[4:])
else:
# add w/ www if doesn't www
sitesections.append('www.%s'%section)
allowedsections = []
allowedsections.extend(formatsections)
for section in sitesections:
allowedsections.append(section)
for f in formatsections:
allowedsections.append('%s:%s'%(section,f))
return allowedsections
def get_valid_list_entries():
return list(['category',
'genre',
'characters',
'ships',
'warnings',
'extratags',
'author',
'authorId',
'authorUrl',
'lastupdate',
])
boollist=['true','false']
base_xenforo2_list=['base_xenforo2forum',
'forums.sufficientvelocity.com',
]
base_xenforo_list=base_xenforo2_list+['base_xenforoforum',
'forums.spacebattles.com',
'forum.questionablequesting.com',
'www.alternatehistory.com',
]
def get_valid_set_options():
'''
dict() of names of boolean options, but as a tuple with
valid sites, valid formats and valid values (None==all)
This is to further restrict keywords to certain sections and/or
values. get_valid_keywords() below is the list of allowed
keywords. Any keyword listed here must also be listed there.
This is what's used by the code when you save personal.ini in
plugin that stops and points out possible errors in keyword
*values*. It doesn't flag 'bad' keywords. Note that it's
separate from color highlighting and most keywords need to be
added to both.
'''
valdict = {'collect_series':(None,None,boollist),
'include_titlepage':(None,None,boollist),
'include_tocpage':(None,None,boollist),
'is_adult':(None,None,boollist),
'keep_style_attr':(None,None,boollist),
'keep_title_attr':(None,None,boollist),
'make_firstimage_cover':(None,None,boollist),
'never_make_cover':(None,None,boollist),
'nook_img_fix':(None,None,boollist),
'replace_br_with_p':(None,None,boollist),
'replace_hr':(None,None,boollist),
'sort_ships':(None,None,boollist),
'strip_chapter_numbers':(None,None,boollist),
'mark_new_chapters':(None,None,boollist+['latestonly']),
'titlepage_use_table':(None,None,boollist),
'use_ssl_unverified_context':(None,None,boollist),
'continue_on_chapter_error':(None,None,boollist),
'conditionals_use_lists':(None,None,boollist),
'dedup_chapter_list':(None,None,boollist),
'add_chapter_numbers':(None,None,boollist+['toconly']),
'check_next_chapter':(['fanfiction.net'],None,boollist),
'tweak_fg_sleep':(['fanfiction.net'],None,boollist),
'skip_author_cover':(['fanfiction.net'],None,boollist),
'fix_fimf_blockquotes':(['fimfiction.net'],None,boollist),
'fail_on_password':(['fimfiction.net'],None,boollist),
'keep_prequel_in_description':(['fimfiction.net'],None,boollist),
'include_author_notes':(['fimfiction.net'],None,boollist),
'do_update_hook':(['fimfiction.net',
'archiveofourown.org'],None,boollist),
'always_login':(['archiveofourown.org']+base_xenforo_list,None,boollist),
'use_archived_author':(['archiveofourown.org'],None,boollist),
'use_view_full_work':(['archiveofourown.org'],None,boollist),
'remove_authorfootnotes_on_update':(['archiveofourown.org'],None,boollist),
'force_login':(['phoenixsong.net'],None,boollist),
'non_breaking_spaces':(['fictionmania.tv'],None,boollist),
'download_text_version':(['fictionmania.tv'],None,boollist),
'universe_as_series':(['storiesonline.net','finestories.com','scifistories.com'],None,boollist),
'strip_text_links':(['bloodshedverse.com','asexstories.com'],None,boollist),
'centeredcat_to_characters':(['tthfanfic.org'],None,boollist),
'pairingcat_to_characters_ships':(['tthfanfic.org'],None,boollist),
'romancecat_to_characters_ships':(['tthfanfic.org'],None,boollist),
'use_meta_keywords':(['literotica.com'],None,boollist),
'chapter_categories_use_all':(['literotica.com'],None,boollist),
'clean_chapter_titles':(['literotica.com'],None,boollist),
'description_in_chapter':(['literotica.com'],None,boollist),
'inject_chapter_title':(['asianfanfics.com','storiesonline.net','finestories.com','scifistories.com'],None,boollist),
'auto_sub':(['asianfanfics.com'],None,boollist),
# eFiction Base adapters allow bulk_load
# kept forgetting to add them, so now it's automatic.
'bulk_load':(adapters.get_bulk_load_sites(),
None,boollist),
'include_logpage':(None,['epub'],boollist+['smart']),
'logpage_at_end':(None,['epub'],boollist),
'calibre_series_meta':(None,['epub'],boollist),
'windows_eol':(None,['txt'],boollist),
'include_images':(None,['epub','html'],boollist),
'grayscale_images':(None,['epub','html'],boollist),
'no_image_processing':(None,['epub','html'],boollist),
'normalize_text_links':(None,['epub','html'],boollist),
'internalize_text_links':(None,['epub','html'],boollist),
'capitalize_forumtags':(base_xenforo_list,None,boollist),
'minimum_threadmarks':(base_xenforo_list,None,None),
'first_post_title':(base_xenforo_list,None,None),
'always_include_first_post':(base_xenforo_list,None,boollist),
'always_reload_first_chapter':(base_xenforo_list,None,boollist),
'always_use_forumtags':(base_xenforo_list,None,boollist),
'use_reader_mode':(base_xenforo_list,None,boollist),
'author_avatar_cover':(base_xenforo_list,None,boollist),
'remove_spoilers':(base_xenforo_list+['royalroad.com'],None,boollist),
'legend_spoilers':(base_xenforo_list+['royalroad.com', 'fiction.live'],None,boollist),
'apocrypha_to_omake':(base_xenforo_list,None,boollist),
'replace_failed_smilies_with_alt_text':(base_xenforo_list,None,boollist),
'use_threadmark_wordcounts':(base_xenforo_list,None,boollist),
'always_include_first_post_chapters':(base_xenforo_list,None,boollist),
'order_threadmarks_by_date':(base_xenforo_list,None,boollist),
'use_threadmarks_description':(base_xenforo2_list,None,boollist),
'use_threadmarks_status':(base_xenforo2_list,None,boollist),
'use_threadmarks_cover':(base_xenforo2_list,None,boollist),
'skip_sticky_first_posts':(base_xenforo2_list,None,boollist),
'fix_pseudo_html': (['webnovel.com'], None, boollist),
'fix_excess_space': (['novelonlinefull.com', 'novelall.com'], ['epub', 'html'], boollist),
'dedup_order_chapter_list': (['wuxiaworld.co'], None, boollist),
'show_nsfw_cover_images': (['fiction.live'], None, boollist),
'show_timestamps': (['fiction.live'], None, boollist),
'show_nsfw_cover_images': (['fiction.live'], None, boollist)
}
return dict(valdict)
def get_valid_scalar_entries():
return list(['series',
'seriesUrl',
'language',
'status',
'datePublished',
'dateUpdated',
'dateCreated',
'rating',
'numChapters',
'numWords',
'words_added', # logpage only.
'site',
'publisher',
'storyId',
'title',
'titleHTML',
'storyUrl',
'sectionUrl',
'description',
'formatname',
'formatext',
'siteabbrev',
'version',
# internal stuff.
'authorHTML',
'seriesHTML',
'langcode',
'output_css',
'cover_image',
])
def get_valid_entries():
return get_valid_list_entries() + get_valid_scalar_entries()
# *known* keywords -- or rather regexps for them.
def get_valid_keywords():
'''
Among other things, this list is used by the color highlighting in
personal.ini editing in plugin. Note that it's separate from
value checking and most keywords need to be added to both.
'''
return list(['(in|ex)clude_metadata_(pre|post)',
'add_chapter_numbers',
'add_genre_when_multi_category',
'add_category_when_multi_category',
'adult_ratings',
'allow_unsafe_filename',
'always_overwrite',
'anthology_tags',
'anthology_title_pattern',
'background_color',
'bulk_load',
'chapter_end',
'chapter_start',
'chapter_title_strip_pattern',
'chapter_title_def_pattern',
'chapter_title_add_pattern',
'chapter_title_new_pattern',
'chapter_title_addnew_pattern',
'title_chapter_range_pattern',
'mark_new_chapters',
'check_next_chapter',
'skip_author_cover',
'collect_series',
'comma_entries',
'connect_timeout',
'convert_images_to',
'cover_content',
'cover_exclusion_regexp',
'custom_columns_settings',
'dateCreated_format',
'datePublished_format',
'dateUpdated_format',
'default_cover_image',
'description_limit',
'do_update_hook',
'use_archived_author',
'use_view_full_work',
'always_login',
'exclude_notes',
'remove_authorfootnotes_on_update',
'exclude_editor_signature',
'extra_logpage_entries',
'extra_subject_tags',
'extra_titlepage_entries',
'extra_valid_entries',
'extratags',
'extracategories',
'extragenres',
'extracharacters',
'extraships',
'extrawarnings',
'fail_on_password',
'file_end',
'file_start',
'fileformat',
'find_chapters',
'fix_fimf_blockquotes',
'keep_prequel_in_description',
'include_author_notes',
'force_login',
'generate_cover_settings',
'grayscale_images',
'image_max_size',
'include_images',
'include_logpage',
'logpage_at_end',
'calibre_series_meta',
'include_subject_tags',
'include_titlepage',
'include_tocpage',
'chardet_confidence_limit',
'is_adult',
'join_string_authorHTML',
'keep_style_attr',
'keep_title_attr',
'keep_html_attrs',
'replace_tags_with_spans',
'keep_empty_tags',
'keep_summary_html',
'logpage_end',
'logpage_entries',
'logpage_entry',
'logpage_start',
'logpage_update_end',
'logpage_update_start',
'make_directories',
'make_firstimage_cover',
'make_linkhtml_entries',
'max_fg_sleep',
'max_fg_sleep_at_downloads',
'min_fg_sleep',
'never_make_cover',
'cover_min_size',
'no_image_processing',
'non_breaking_spaces',
'download_text_version',
'nook_img_fix',
'output_css',
'output_filename',
'output_filename_safepattern',
'password',
'post_process_cmd',
'rating_titles',
'remove_transparency',
'replace_br_with_p',
'replace_hr',
'replace_xbr_with_hr',
'replace_metadata',
'slow_down_sleep_time',
'sort_ships',
'sort_ships_splits',
'strip_chapter_numbers',
'strip_chapter_numeral',
'strip_text_links',
'centeredcat_to_characters',
'pairingcat_to_characters_ships',
'romancecat_to_characters_ships',
'use_meta_keywords',
'chapter_categories_use_all',
'clean_chapter_titles',
'conditionals_use_lists',
'description_in_chapter',
'inject_chapter_title',
'auto_sub',
'titlepage_end',
'titlepage_entries',
'titlepage_entry',
'titlepage_no_title_entry',
'titlepage_start',
'titlepage_use_table',
'titlepage_wide_entry',
'tocpage_end',
'tocpage_entry',
'tocpage_start',
'tweak_fg_sleep',
'universe_as_series',
'use_ssl_unverified_context',
'user_agent',
'username',
'website_encodings',
'wide_titlepage_entries',
'windows_eol',
'wrap_width',
'zip_filename',
'zip_output',
'capitalize_forumtags',
'continue_on_chapter_error',
'chapter_title_error_mark',
'minimum_threadmarks',
'first_post_title',
'always_include_first_post',
'always_reload_first_chapter',
'always_use_forumtags',
'use_reader_mode',
'author_avatar_cover',
'reader_posts_per_page',
'remove_spoilers',
'legend_spoilers',
'apocrypha_to_omake',
'skip_threadmarks_categories',
'normalize_text_links',
'internalize_text_links',
'replace_failed_smilies_with_alt_text',
'use_threadmark_wordcounts',
'always_include_first_post_chapters',
'order_threadmarks_by_date',
'use_threadmarks_description',
'use_threadmarks_status',
'use_threadmarks_cover',
'skip_sticky_first_posts',
'datethreadmark_format',
'fix_pseudo_html',
'fix_excess_space',
'dedup_order_chapter_list',
'ignore_chapter_url_list',
'dedup_chapter_list',
'show_timestamps',
'show_nsfw_cover_images',
'show_spoiler_tags',
'max_zalgo',
'epub_version',
])
# *known* entry keywords -- or rather regexps for them.
def get_valid_entry_keywords():
return list(['%s_(label|format)',
'(default_value|include_in|join_string|keep_in_order)_%s',])
# Moved here for test_config.
def make_generate_cover_settings(param):
vlist = []
for line in param.splitlines():
if "=>" in line:
try:
(template,regexp,setting) = [ x.strip() for x in line.split("=>") ]
re_compile(regexp,line)
vlist.append((template,regexp,setting))
except Exception as e:
raise exceptions.PersonalIniFailed(e,line,param)
return vlist
class Configuration(ConfigParser):
def __init__(self, sections, fileform, lightweight=False):
site = sections[-1] # first section is site DN.
ConfigParser.__init__(self)
self.lightweight = lightweight
self.use_pagecache = False # default to false for old adapters.
self.linenos=dict() # key by section or section,key -> lineno
## [injected] section has even less priority than [defaults]
self.sectionslist = ['defaults','injected']
## add other sections (not including site DN) after defaults,
## but before site-specific.
for section in sections[:-1]:
self.addConfigSection(section)
if site.startswith("www."):
sitewith = site
sitewithout = site.replace("www.","")
else:
sitewith = "www."+site
sitewithout = site
self.addConfigSection(sitewith)
self.addConfigSection(sitewithout)
if fileform:
self.addConfigSection(fileform)
## add other sections:fileform (not including site DN)
## after fileform, but before site-specific:fileform.
for section in sections[:-1]:
self.addConfigSection(section+":"+fileform)
self.addConfigSection(sitewith+":"+fileform)
self.addConfigSection(sitewithout+":"+fileform)
self.addConfigSection("overrides")
self.listTypeEntries = get_valid_list_entries()
self.validEntries = get_valid_entries()
self.url_config_set = False
self.override_sleep = None
self.cookiejar = self.get_empty_cookiejar()
self.opener = build_opener(HTTPCookieProcessor(self.cookiejar),GZipProcessor())
self.pagecache = self.get_empty_pagecache()
self.save_cache_file = None
self.save_cookiejar_file = None
def section_url_names(self,domain,section_url_f):
## domain is passed as a method to limit the damage if/when an
## adapter screws up _section_url
domain = domain.replace('www.','') ## let's not confuse the issue any more than it is.
try:
## OrderDict (the default for ConfigParser) has to be
## reconstructed completely because removing and re-adding
## a section would mess up the order.
## assumes _dict and _sections from ConfigParser parent.
self._sections = self._dict((section_url_f(k) if (domain in k and 'http' in k) else k, v) for k, v in six.viewitems(self._sections))
# logger.debug(self._sections.keys())
except Exception as e:
logger.warning("Failed to perform section_url_names: %s"%e)
def addUrlConfigSection(self,url):
if not self.lightweight: # don't need when just checking for normalized URL.
# replace if already set once.
if self.url_config_set:
self.sectionslist[self.sectionslist.index('overrides')+1]=url
else:
self.addConfigSection(url,'overrides')
self.url_config_set=True
def addConfigSection(self,section,before=None):
if section not in self.sectionslist: # don't add if already present.
if before is None:
self.sectionslist.insert(0,section)
else:
## because sectionslist is hi-pri first, lo-pri last,
## 'before' means after in the list.
self.sectionslist.insert(self.sectionslist.index(before)+1,section)
def isListType(self,key):
return key in self.listTypeEntries or self.hasConfig("include_in_"+key)
def isValidMetaEntry(self, key):
return key in self.getValidMetaList()
def getValidMetaList(self):
return self.validEntries + self.getConfigList("extra_valid_entries")
# used by adapters & writers, non-convention naming style
def hasConfig(self, key):
return self.has_config(self.sectionslist, key)
def has_config(self, sections, key):
for section in sections:
try:
self.get(section,key)
#print("found %s in section [%s]"%(key,section))
return True
except:
try:
self.get(section,key+"_filelist")
#print("found %s_filelist in section [%s]"%(key,section))
return True
except:
try:
self.get(section,"add_to_"+key)
#print("found add_to_%s in section [%s]"%(key,section))
return True
except:
pass
return False
# used by adapters & writers, non-convention naming style
def getConfig(self, key, default=""):
return self.get_config(self.sectionslist,key,default)
def get_config(self, sections, key, default=""):
val = default
val_files = []
if not key.endswith("_filelist"):
## <key>_filelist overrides <key>, but add_to_<key> is
## still used. By using self.get_config_list(),
## add_to_<key>_filelist also works. (But not
## <key>_filelist_filelist--that way lies madness--and
## infinite recursion.) self.get_config_list() also does
## the list split & clean up.
val_files = self.get_config_list(sections, key+"_filelist")
file_val = False
if val_files:
val = ''
for v in val_files:
try:
val = val + self._fetchUrl(v)
file_val = True
except:
pass
if not file_val:
logger.warning("All files for (%s) failed! Using (%s) instead. Filelist: (%s)"%
(key+"_filelist",key,val_files))
if not file_val:
for section in sections:
try:
val = self.get(section,key)
if val and val.lower() == "false":
val = False
#print("getConfig(%s)=[%s]%s" % (key,section,val))
break
except (configparser.NoOptionError, configparser.NoSectionError) as e:
pass
for section in sections[::-1]:
# 'martian smiley' [::-1] reverses list by slicing whole list with -1 step.
try:
val = val + self.get(section,"add_to_"+key)
#print("getConfig(add_to_%s)=[%s]%s" % (key,section,val))
except (configparser.NoOptionError, configparser.NoSectionError) as e:
pass
return val
# split and strip each.
def get_config_list(self, sections, key, default=[]):
vlist = re.split(r'(?<!\\),',self.get_config(sections,key)) # don't split on \,
vlist = [x for x in [ v.strip().replace(r'\,',',') for v in vlist ] if x !='']
#print("vlist("+key+"):"+unicode(vlist))
if not vlist:
return default
else:
return vlist
# used by adapters & writers, non-convention naming style
def getConfigList(self, key, default=[]):
return self.get_config_list(self.sectionslist, key, default)
# Moved here for test_config.
def get_generate_cover_settings(self):
return make_generate_cover_settings(self.getConfig('generate_cover_settings'))
def get_lineno(self,section,key=None):
if key:
return self.linenos.get(section+','+key,None)
else:
return self.linenos.get(section,None)
## Copied from Python 2.7 library so as to make read utf8.
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = codecs.open(filename,encoding='utf-8')
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
## Copied from Python 2.7 library so as to make it save linenos too.
#
# Regular expressions for parsing section headers and options.
#
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
self.linenos[sectname]=lineno
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
if not e:
e = ParsingError(fpname)
e.append(lineno, u'(Line outside section) '+line)
#raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line) # OPTCRE instead of
# _optcre so it works
# with python 2.6
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
cursect[optname] = optval
self.linenos[cursect['__name__']+','+optname]=lineno
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def test_config(self):
errors=[]
## too complicated right now to enforce
## get_valid_set_options() warnings on teststory and
## [storyUrl] sections.
allow_all_sections_re = re.compile(r'^(teststory:(defaults|[0-9]+)|https?://.*)$')
allowedsections = get_valid_sections()
clude_metadata_re = re.compile(r'(add_to_)?(in|ex)clude_metadata_(pre|post)$')
replace_metadata_re = re.compile(r'(add_to_)?replace_metadata$')
from .story import set_in_ex_clude, make_replacements
custom_columns_settings_re = re.compile(r'(add_to_)?custom_columns_settings$')
generate_cover_settings_re = re.compile(r'(add_to_)?generate_cover_settings$')
valdict = get_valid_set_options()
for section in self.sections():
allow_all_section = allow_all_sections_re.match(section)
if section not in allowedsections and not allow_all_section:
errors.append((self.get_lineno(section),"Bad Section Name: [%s]"%section))
else:
sitename = section.replace('www.','')
if ':' in sitename:
formatname = sitename[sitename.index(':')+1:]
sitename = sitename[:sitename.index(':')]
elif sitename in formatsections:
formatname = sitename
sitename = None
elif sitename in othersections:
formatname = None
sitename = None
## check each keyword in section. Due to precedence
## order of sections, it's possible for bad lines to
## never be used.
for keyword,value in self.items(section):
try:
## check regex bearing keywords first. Each
## will raise exceptions if flawed.
if clude_metadata_re.match(keyword):
set_in_ex_clude(value)
if replace_metadata_re.match(keyword):
make_replacements(value)
if generate_cover_settings_re.match(keyword):
make_generate_cover_settings(value)
# if custom_columns_settings_re.match(keyword):
#custom_columns_settings:
# cliches=>#acolumn
# themes=>#bcolumn,a
# timeline=>#ccolumn,n
# "FanFiction"=>#collection
if not allow_all_section:
def make_sections(x):
return '['+'], ['.join(x)+']'
if keyword in valdict:
(valsites,valformats,vals)=valdict[keyword]
if valsites != None and sitename != None and sitename not in valsites:
errors.append((self.get_lineno(section,keyword),"%s not valid in section [%s] -- only valid in %s sections."%(keyword,section,make_sections(valsites))))
if valformats != None and formatname != None and formatname not in valformats:
errors.append((self.get_lineno(section,keyword),"%s not valid in section [%s] -- only valid in %s sections."%(keyword,section,make_sections(valformats))))
if vals != None and value not in vals:
errors.append((self.get_lineno(section,keyword),"%s not a valid value for %s"%(value,keyword)))
## skipping output_filename_safepattern
## regex--not used with plugin and this isn't
## used with CLI/web yet.
except Exception as e:
errors.append((self.get_lineno(section,keyword),"Error:%s in (%s:%s)"%(e,keyword,value)))
return errors
#### methods for fetching. Moved here from base_adapter when
#### *_filelist feature was added.
@staticmethod
def get_empty_cookiejar():
return cl.LWPCookieJar()
@staticmethod
def get_empty_pagecache():
return {}
def get_cookiejar(self):
return self.cookiejar
def set_cookiejar(self,cj,save_cookiejar_file=None):
self.cookiejar = cj
self.save_cookiejar_file = save_cookiejar_file
saveheaders = self.opener.addheaders
self.opener = build_opener(HTTPCookieProcessor(self.cookiejar),GZipProcessor())
self.opener.addheaders = saveheaders
def load_cookiejar(self,filename):
'''
Needs to be called after adapter create, but before any fetchs
are done. Takes file *name*.
'''
self.get_cookiejar().load(filename, ignore_discard=True, ignore_expires=True)
def get_pagecache(self):
return self.pagecache
def set_pagecache(self,d,save_cache_file=None):
self.save_cache_file = save_cache_file
self.pagecache=d
def _get_cachekey(self, url, parameters=None, headers=None):
keylist=[url]
if parameters != None:
keylist.append('&'.join('{0}={1}'.format(key, val) for key, val in sorted(parameters.items())))
if headers != None:
keylist.append('&'.join('{0}={1}'.format(key, val) for key, val in sorted(headers.items())))
return unicode('?'.join(keylist))
def _has_cachekey(self,cachekey):
return self.use_pagecache and cachekey in self.get_pagecache()
def _get_from_pagecache(self,cachekey):
if self.use_pagecache:
return self.get_pagecache().get(cachekey)
else:
return None
def _set_to_pagecache(self,cachekey,data,redirectedurl):
if self.use_pagecache:
self.get_pagecache()[cachekey] = (data,ensure_text(redirectedurl))
if self.save_cache_file:
with open(self.save_cache_file,'wb') as jout:
pickle.dump(self.get_pagecache(),jout,protocol=2)
if self.save_cookiejar_file:
self.get_cookiejar().save(self.save_cookiejar_file)
## website encoding(s)--in theory, each website reports the character
## encoding they use for each page. In practice, some sites report it
## incorrectly. Each adapter has a default list, usually "utf8,
## Windows-1252" or "Windows-1252, utf8". The special value 'auto'
## will call chardet and use the encoding it reports if it has +90%
## confidence. 'auto' is not reliable. 1252 is a superset of
## iso-8859-1. Most sites that claim to be iso-8859-1 (and some that
## claim to be utf8) are really windows-1252.
def _decode(self,data):
if not hasattr(data,'decode'):
## py3 str() from pickle doesn't have .decode and is
## already decoded.
return data
decode = self.getConfigList('website_encodings',
default=["utf8",
"Windows-1252",
"iso-8859-1"])
for code in decode:
try:
logger.debug("Encoding:%s"%code)
errors=None
if ':' in code:
(code,errors)=code.split(':')
if code == "auto":
if not chardet:
logger.info("chardet not available, skipping 'auto' encoding")
continue
detected = chardet.detect(data)
#print(detected)
if detected['confidence'] > float(self.getConfig("chardet_confidence_limit",0.9)):
logger.debug("using chardet detected encoding:%s(%s)"%(detected['encoding'],detected['confidence']))
code=detected['encoding']
else:
logger.debug("chardet confidence too low:%s(%s)"%(detected['encoding'],detected['confidence']))
continue
if errors == 'ignore': # only allow ignore.
return data.decode(code,errors='ignore')
else:
return data.decode(code)
except Exception as e:
logger.debug("code failed:"+code)
logger.debug(e)
pass
logger.info("Could not decode story, tried:%s Stripping non-ASCII."%decode)
try:
# python2
return "".join([x for x in data if ord(x) < 128])
except TypeError:
# python3
return "".join([chr(x) for x in data if x < 128])
def _progressbar(self):
if self.getConfig('progressbar'):
sys.stdout.write('.')
sys.stdout.flush()
def _do_reduce_zalgo(self,data):
max_zalgo = int(self.getConfig('max_zalgo',-1))
if max_zalgo > -1:
logger.debug("Applying max_zalgo:%s"%max_zalgo)
try:
return reduce_zalgo(data,max_zalgo)
except Exception as e:
logger.warning("reduce_zalgo failed(%s), continuing."%e)
return data
# Assumes application/x-www-form-urlencoded. parameters, headers are dict()s
def _postUrl(self, url,
parameters={},
headers={},
extrasleep=None,
usecache=True):
'''
When should cache be cleared or not used? logins...
extrasleep is primarily for ffnet adapter which has extra
sleeps. Passed into fetchs so it can be bypassed when
cache hits.
'''
url = quote_plus(ensure_binary(url),safe=';/?:@&=+$,%&#')
if self.getConfig('force_https'): ## For developer testing only.
url = url.replace("http:","https:")
cachekey=self._get_cachekey(url, parameters, headers)
if usecache and self._has_cachekey(cachekey) and not cachekey.startswith('file:'):
logger.debug("#####################################\npagecache(POST) HIT: %s"%safe_url(cachekey))
data,redirecturl = self._get_from_pagecache(cachekey)
return data
logger.debug("#####################################\npagecache(POST) MISS: %s"%safe_url(cachekey))
if not cachekey.startswith('file:'): # don't sleep for file: URLs.
self.do_sleep(extrasleep)
## Request assumes POST when data!=None. Also assumes data
## is application/x-www-form-urlencoded.
if 'Content-type' not in headers:
headers['Content-type']='application/x-www-form-urlencoded'
if 'Accept' not in headers:
headers['Accept']="text/html,*/*"
# logger.debug("POST http login for SB xf2test %s"%url)
# if "xf2test" in url:
# import base64
# base64string = base64.encodestring(b"sbreview2019:Fs2PwuVE9").replace(b'\n', b'')
# headers['Authorization']=b"Basic %s" % base64string
# logger.debug("http login for SB xf2test")
req = Request(url,
data=ensure_binary(urlencode(parameters)),
headers=headers)
## Specific UA because too many sites are blocking the default python UA.
self.opener.addheaders = [('User-Agent', self.getConfig('user_agent')),
('X-Clacks-Overhead','GNU Terry Pratchett')]
data = self._do_reduce_zalgo(self._decode(self.opener.open(req,None,float(self.getConfig('connect_timeout',30.0))).read()))
self._progressbar()
## postURL saves data to the pagecache *after* _decode() while
## fetchRaw saves it *before* _decode()--because raw.
self._set_to_pagecache(cachekey,data,url)
return data
def _fetchUrl(self, url,
parameters=None,
usecache=True,
extrasleep=None):
return self._fetchUrlOpened(url,
parameters,
usecache,
extrasleep)[0]
def _fetchUrlRawOpened(self, url,
parameters=None,
extrasleep=None,
usecache=True,
referer=None):
'''
When should cache be cleared or not used? logins...
extrasleep is primarily for ffnet adapter which has extra
sleeps. Passed into fetchs so it can be bypassed when
cache hits.
'''
if not url.startswith('file:'): # file fetches fail on + for space
url = quote_plus(ensure_binary(url),safe=';/?:@&=+$,%&#')
if self.getConfig('force_https'): ## For developer testing only.
url = url.replace("http:","https:")
cachekey=self._get_cachekey(url, parameters)
if usecache and self._has_cachekey(cachekey) and not cachekey.startswith('file:'):
logger.debug("#####################################\npagecache(GET) HIT: %s"%safe_url(cachekey))
data,redirecturl = self._get_from_pagecache(cachekey)
class FakeOpened:
def __init__(self,data,url):
self.data=data
self.url=url
def geturl(self): return self.url
def read(self): return self.data
return (data,FakeOpened(data,redirecturl))
logger.debug("#####################################\npagecache(GET) MISS: %s"%safe_url(cachekey))
# print(self.get_pagecache().keys())
if not cachekey.startswith('file:'): # don't sleep for file: URLs.
self.do_sleep(extrasleep)
## Specific UA because too many sites are blocking the default python UA.
headers = [('User-Agent', self.getConfig('user_agent')),
## starslibrary.net throws a "HTTP Error 403: Bad
## Behavior" over the X-Clacks-Overhead. Which
## both against standard and rather a dick-move.
#('X-Clacks-Overhead','GNU Terry Pratchett'),
]
if referer:
## hpfanficarchive.com complains about Referer: None.
## Could have defaulted to "" instead, but this way it's
## not present at all
headers.append(('Referer',referer))
# logger.debug("GET http login for SB xf2test %s"%url)
# if "xf2test" in url:
# import base64
# base64string = base64.encodestring(b"sbreview2019:Fs2PwuVE9").replace(b'\n', b'')
# headers.append(('Authorization',b"Basic %s" % base64string))
# logger.debug("http login for SB xf2test")
self.opener.addheaders = headers
if parameters != None:
opened = self.opener.open(url,
ensure_binary(urlencode(parameters)),
float(self.getConfig('connect_timeout',30.0)))
else:
opened = self.opener.open(url,
None,
float(self.getConfig('connect_timeout',30.0)))
self._progressbar()
data = opened.read()
## postURL saves data to the pagecache *after* _decode() while
## fetchRaw saves it *before* _decode()--because raw.
self._set_to_pagecache(cachekey,data,opened.url)
return (data,opened)
def set_sleep(self,val):
logger.debug("\n===========\n set sleep time %s\n==========="%val)
self.override_sleep = val
def do_sleep(self,extrasleep=None):
if extrasleep:
time.sleep(float(extrasleep))
if self.override_sleep:
time.sleep(float(self.override_sleep))
elif self.getConfig('slow_down_sleep_time'):
time.sleep(float(self.getConfig('slow_down_sleep_time')))
# parameters is a dict()
def _fetchUrlOpened(self, url,
parameters=None,
usecache=True,
extrasleep=None,
referer=None):
excpt=None
if url.startswith("file://"):
# only one try for file:s.
sleeptimes = [0]
else:
sleeptimes = [0, 0.5, 4, 9]
for sleeptime in sleeptimes:
time.sleep(sleeptime)
try:
(data,opened)=self._fetchUrlRawOpened(url,
parameters=parameters,
usecache=usecache,
extrasleep=extrasleep,
referer=referer)
return (self._do_reduce_zalgo(self._decode(data)),opened)
except HTTPError as he:
excpt=he
if he.code in (403,404,410):
logger.debug("Caught an exception reading URL: %s Exception %s."%(unicode(safe_url(url)),unicode(he)))
break # break out on 404
## trekfanfiction.net has started returning the page,
## but with a 500 code. We can use the HTTPError as
## the 'opened' in such case.
if he.code == 500 and 'trekfanfiction.net' in url:
data = he.read()
return (self._do_reduce_zalgo(self._decode(data)),he)
except Exception as e:
excpt=e
logger.debug("Caught an exception reading URL: %s sleeptime(%s) Exception %s."%(unicode(safe_url(url)),sleeptime,unicode(e)))
raise
logger.debug("Giving up on %s" %safe_url(url))
logger.debug(excpt, exc_info=True)
raise(excpt)
# extended by adapter, writer and story for ease of calling configuration.
class Configurable(object):
def __init__(self, configuration):
self.configuration = configuration
## use_pagecache() is on adapters--not all have been updated
## to deal with caching correctly
if hasattr(self, 'use_pagecache'):
self.configuration.use_pagecache = self.use_pagecache()
def section_url_names(self,domain,section_url_f):
return self.configuration.section_url_names(domain,section_url_f)
def get_configuration(self):
return self.configuration
def is_lightweight(self):
return self.configuration.lightweight
def addUrlConfigSection(self,url):
self.configuration.addUrlConfigSection(url)
def isListType(self,key):
return self.configuration.isListType(key)
def isValidMetaEntry(self, key):
return self.configuration.isValidMetaEntry(key)
def getValidMetaList(self):
return self.configuration.getValidMetaList()
def hasConfig(self, key):
return self.configuration.hasConfig(key)
def has_config(self, sections, key):
return self.configuration.has_config(sections, key)
def getConfig(self, key, default=""):
return self.configuration.getConfig(key,default)
def get_config(self, sections, key, default=""):
return self.configuration.get_config(sections,key,default)
def getConfigList(self, key, default=[]):
return self.configuration.getConfigList(key,default)
def get_config_list(self, sections, key):
return self.configuration.get_config_list(sections,key)
def get_label(self, entry):
if self.hasConfig(entry+"_label"):
label=self.getConfig(entry+"_label")
elif entry in titleLabels:
label=titleLabels[entry]
else:
label=entry.title()
return label
def do_sleep(self,extrasleep=None):
return self.configuration.do_sleep(extrasleep)
def set_decode(self,decode):
self.configuration.decode = decode
def _postUrl(self, url,
parameters={},
headers={},
extrasleep=None,
usecache=True):
return self.configuration._postUrl(url,
parameters,
headers,
extrasleep,
usecache)
def _fetchUrlRawOpened(self, url,
parameters=None,
extrasleep=None,
usecache=True,
referer=None):
return self.configuration._fetchUrlRawOpened(url,
parameters,
extrasleep,
usecache,
referer=referer)
def _fetchUrlOpened(self, url,
parameters=None,
usecache=True,
extrasleep=None,
referer=None):
return self.configuration._fetchUrlOpened(url,
parameters,
usecache,
extrasleep,
referer=referer)
def _fetchUrl(self, url,
parameters=None,
usecache=True,
extrasleep=None,
referer=None):
return self._fetchUrlOpened(url,
parameters,
usecache,
extrasleep,
referer=referer)[0]
def _fetchUrlRaw(self, url,
parameters=None,
extrasleep=None,
usecache=True,
referer=None):
return self._fetchUrlRawOpened(url,
parameters,
extrasleep,
usecache,
referer=referer)[0]
# .? for AO3's ']' in param names.
safe_url_re = re.compile(r'(?P<attr>(pass(word)?|name|login).?=)[^&]*(?P<amp>&|$)',flags=re.MULTILINE)
def safe_url(url):
# return url with password attr (if present) obscured.
return re.sub(safe_url_re,r'\g<attr>XXXXXXXX\g<amp>',url)
|
the-stack_0_9076 | from collections import Counter, defaultdict
from copy import deepcopy
from random import Random
import pytest
from hypothesis import assume, event
from hypothesis.stateful import (
Bundle,
RuleBasedStateMachine,
consumes,
initialize,
invariant,
rule,
)
from hypothesis.strategies import builds, composite, integers, random_module, randoms
from raiden.constants import GENESIS_BLOCK_NUMBER
from raiden.settings import DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
from raiden.tests.utils import factories
from raiden.transfer import channel, node
from raiden.transfer.events import EventPaymentSentFailed
from raiden.transfer.mediated_transfer.events import SendLockedTransfer, SendSecretReveal
from raiden.transfer.mediated_transfer.state_change import (
ActionInitInitiator,
ReceiveSecretRequest,
TransferDescriptionWithSecretState,
)
from raiden.transfer.state import ChainState, PaymentNetworkState, TokenNetworkState
from raiden.transfer.state_change import (
Block,
ContractReceiveChannelNew,
ContractReceiveChannelSettled,
)
from raiden.utils import random_secret, sha3
from raiden.utils.typing import BlockNumber
@composite
def secret(draw):
return draw(builds(random_secret))
def event_types_match(events, *expected_types):
return Counter([type(event) for event in events]) == Counter(expected_types)
def transferred_amount(state):
return 0 if not state.balance_proof else state.balance_proof.transferred_amount
partners = Bundle('partners')
# shared bundle of ChainStateStateMachine and all mixin classes
class ChainStateStateMachine(RuleBasedStateMachine):
def __init__(self, address=None):
self.address = address or factories.make_address()
self.replay_path = False
self.address_to_channel = dict()
self.address_to_privkey = dict()
self.our_previous_deposit = defaultdict(int)
self.partner_previous_deposit = defaultdict(int)
self.our_previous_transferred = defaultdict(int)
self.partner_previous_transferred = defaultdict(int)
self.our_previous_unclaimed = defaultdict(int)
self.partner_previous_unclaimed = defaultdict(int)
self.expected_expiry = dict()
super().__init__()
def new_channel(self):
"""Create a new partner address with private key and channel. The
private key and channels are listed in the instance's dictionaries,
the address is returned and should be added to the partners Bundle.
"""
partner_privkey, partner_address = factories.make_privkey_address()
self.address_to_privkey[partner_address] = partner_privkey
self.address_to_channel[partner_address] = factories.make_channel(
our_balance=1000,
partner_balance=1000,
token_network_identifier=self.token_network_id,
our_address=self.address,
partner_address=partner_address,
)
return partner_address
def new_channel_with_transaction(self):
partner_address = self.new_channel()
channel_new_state_change = ContractReceiveChannelNew(
transaction_hash=factories.make_transaction_hash(),
token_network_identifier=self.token_network_id,
channel_state=self.address_to_channel[partner_address],
block_number=self.block_number,
block_hash=factories.make_block_hash(),
)
node.state_transition(self.chain_state, channel_new_state_change)
return partner_address
@initialize(
target=partners,
block_number=integers(min_value=GENESIS_BLOCK_NUMBER + 1),
random=randoms(),
random_seed=random_module(),
)
def initialize(self, block_number, random, random_seed):
self.random_seed = random_seed
self.block_number = block_number
self.block_hash = factories.make_block_hash()
self.random = random
self.private_key, self.address = factories.make_privkey_address()
self.chain_state = ChainState(
pseudo_random_generator=self.random,
block_number=self.block_number,
block_hash=self.block_hash,
our_address=self.address,
chain_id=factories.UNIT_CHAIN_ID,
)
self.token_network_id = factories.make_address()
self.token_id = factories.make_address()
self.token_network_state = TokenNetworkState(self.token_network_id, self.token_id)
self.payment_network_id = factories.make_payment_network_identifier()
self.payment_network_state = PaymentNetworkState(
self.payment_network_id,
[self.token_network_state],
)
self.chain_state.identifiers_to_paymentnetworks[
self.payment_network_id
] = self.payment_network_state
return self.new_channel_with_transaction()
def event(self, description):
""" Wrapper for hypothesis' event function.
hypothesis.event raises an exception when invoked outside of hypothesis
context, so skip it when we are replaying a failed path.
"""
if not self.replay_path:
event(description)
@invariant()
def monotonicity(self):
""" Check monotonicity properties as given in Raiden specification """
for address, netting_channel in self.address_to_channel.items():
# constraint (1TN)
assert netting_channel.our_total_deposit >= self.our_previous_deposit[address]
assert netting_channel.partner_total_deposit >= self.partner_previous_deposit[address]
self.our_previous_deposit[address] = netting_channel.our_total_deposit
self.partner_previous_deposit[address] = netting_channel.partner_total_deposit
# TODO add constraint (2TN) when withdrawal is implemented
# constraint (3R) and (4R)
our_transferred = transferred_amount(netting_channel.our_state)
partner_transferred = transferred_amount(netting_channel.partner_state)
our_unclaimed = channel.get_amount_unclaimed_onchain(netting_channel.our_state)
partner_unclaimed = channel.get_amount_unclaimed_onchain(
netting_channel.partner_state,
)
assert our_transferred >= self.our_previous_transferred[address]
assert partner_transferred >= self.partner_previous_transferred[address]
assert (
our_unclaimed + our_transferred >=
self.our_previous_transferred[address] + self.our_previous_unclaimed[address]
)
assert (
partner_unclaimed + partner_transferred >=
self.our_previous_transferred[address] + self.our_previous_unclaimed[address]
)
self.our_previous_transferred[address] = our_transferred
self.partner_previous_transferred[address] = partner_transferred
self.our_previous_unclaimed[address] = our_unclaimed
self.partner_previous_unclaimed[address] = partner_unclaimed
@invariant()
def channel_state_invariants(self):
""" Check the invariants for the channel state given in the Raiden specification """
for netting_channel in self.address_to_channel.values():
our_state = netting_channel.our_state
partner_state = netting_channel.partner_state
our_transferred_amount = 0
if our_state.balance_proof:
our_transferred_amount = our_state.balance_proof.transferred_amount
assert our_transferred_amount >= 0
partner_transferred_amount = 0
if partner_state.balance_proof:
partner_transferred_amount = partner_state.balance_proof.transferred_amount
assert partner_transferred_amount >= 0
assert channel.get_distributable(our_state, partner_state) >= 0
assert channel.get_distributable(partner_state, our_state) >= 0
our_deposit = netting_channel.our_total_deposit
partner_deposit = netting_channel.partner_total_deposit
total_deposit = our_deposit + partner_deposit
our_amount_locked = channel.get_amount_locked(our_state)
our_balance = channel.get_balance(our_state, partner_state)
partner_amount_locked = channel.get_amount_locked(partner_state)
partner_balance = channel.get_balance(partner_state, our_state)
# invariant (5.1R), add withdrawn amounts when implemented
assert 0 <= our_amount_locked <= our_balance
assert 0 <= partner_amount_locked <= partner_balance
assert our_amount_locked <= total_deposit
assert partner_amount_locked <= total_deposit
our_transferred = partner_transferred_amount - our_transferred_amount
netted_transferred = our_transferred + partner_amount_locked - our_amount_locked
# invariant (6R), add withdrawn amounts when implemented
assert 0 <= our_deposit + our_transferred - our_amount_locked <= total_deposit
assert 0 <= partner_deposit - our_transferred - partner_amount_locked <= total_deposit
# invariant (7R), add withdrawn amounts when implemented
assert - our_deposit <= netted_transferred <= partner_deposit
class InitiatorMixin:
def __init__(self):
super().__init__()
self.used_secrets = set()
self.processed_secret_requests = set()
self.initiated = set()
self.failing_path_2 = False
def _action_init_initiator(self, transfer: TransferDescriptionWithSecretState):
channel = self.address_to_channel[transfer.target]
if transfer.secrethash not in self.expected_expiry:
self.expected_expiry[transfer.secrethash] = self.block_number + 10
return ActionInitInitiator(
transfer,
[factories.route_from_channel(channel)],
)
def _receive_secret_request(self, transfer: TransferDescriptionWithSecretState):
secrethash = sha3(transfer.secret)
return ReceiveSecretRequest(
payment_identifier=transfer.payment_identifier,
amount=transfer.amount,
expiration=self.expected_expiry[transfer.secrethash],
secrethash=secrethash,
sender=transfer.target,
)
def _new_transfer_description(self, target, payment_id, amount, secret):
self.used_secrets.add(secret)
return TransferDescriptionWithSecretState(
payment_network_identifier=self.payment_network_id,
payment_identifier=payment_id,
amount=amount,
token_network_identifier=self.token_network_id,
initiator=self.address,
target=target,
secret=secret,
)
def _invalid_authentic_secret_request(self, previous, action):
result = node.state_transition(self.chain_state, action)
if action.secrethash in self.processed_secret_requests or self._is_removed(previous):
assert not result.events
else:
self.processed_secret_requests.add(action.secrethash)
def _unauthentic_secret_request(self, action):
result = node.state_transition(self.chain_state, action)
assert not result.events
def _available_amount(self, partner_address):
netting_channel = self.address_to_channel[partner_address]
return channel.get_distributable(netting_channel.our_state, netting_channel.partner_state)
def _assume_channel_opened(self, action):
if not self.failing_path_2:
needed_channel = self.address_to_channel[action.transfer.target]
assume(channel.get_status(needed_channel) == channel.CHANNEL_STATE_OPENED)
def _is_removed(self, action):
expiry = self.expected_expiry[action.transfer.secrethash]
return self.block_number >= expiry + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
init_initiators = Bundle('init_initiators')
@rule(
target=init_initiators,
partner=partners,
payment_id=integers(min_value=1),
amount=integers(min_value=1, max_value=100),
secret=secret(), # pylint: disable=no-value-for-parameter
)
def valid_init_initiator(self, partner, payment_id, amount, secret):
assume(amount <= self._available_amount(partner))
assume(secret not in self.used_secrets)
transfer = self._new_transfer_description(partner, payment_id, amount, secret)
action = self._action_init_initiator(transfer)
result = node.state_transition(self.chain_state, action)
assert event_types_match(result.events, SendLockedTransfer)
self.initiated.add(transfer.secret)
self.expected_expiry[transfer.secrethash] = self.block_number + 10
return action
@rule(
partner=partners,
payment_id=integers(min_value=1),
excess_amount=integers(min_value=1),
secret=secret(), # pylint: disable=no-value-for-parameter
)
def exceeded_capacity_init_initiator(self, partner, payment_id, excess_amount, secret):
amount = self._available_amount(partner) + excess_amount
transfer = self._new_transfer_description(partner, payment_id, amount, secret)
action = self._action_init_initiator(transfer)
result = node.state_transition(self.chain_state, action)
assert event_types_match(result.events, EventPaymentSentFailed)
self.event('ActionInitInitiator failed: Amount exceeded')
@rule(
previous_action=init_initiators,
partner=partners,
payment_id=integers(min_value=1),
amount=integers(min_value=1),
)
def used_secret_init_initiator(self, previous_action, partner, payment_id, amount):
assume(not self._is_removed(previous_action))
secret = previous_action.transfer.secret
transfer = self._new_transfer_description(partner, payment_id, amount, secret)
action = self._action_init_initiator(transfer)
result = node.state_transition(self.chain_state, action)
assert not result.events
self.event('ActionInitInitiator failed: Secret already in use.')
@rule(previous_action=init_initiators)
def replay_init_initator(self, previous_action):
assume(not self._is_removed(previous_action))
result = node.state_transition(self.chain_state, previous_action)
assert not result.events
@rule(previous_action=init_initiators)
def valid_secret_request(self, previous_action):
action = self._receive_secret_request(previous_action.transfer)
self._assume_channel_opened(previous_action)
result = node.state_transition(self.chain_state, action)
if action.secrethash in self.processed_secret_requests:
assert not result.events
self.event('Valid SecretRequest dropped due to previous invalid one.')
elif self._is_removed(previous_action):
assert not result.events
self.event('Ohterwise valid SecretRequest dropped due to expired lock.')
else:
assert event_types_match(result.events, SendSecretReveal)
self.event('Valid SecretRequest accepted.')
self.processed_secret_requests.add(action.secrethash)
@rule(previous_action=init_initiators, amount=integers())
def wrong_amount_secret_request(self, previous_action, amount):
assume(amount != previous_action.transfer.amount)
self._assume_channel_opened(previous_action)
transfer = deepcopy(previous_action.transfer)
transfer.amount = amount
action = self._receive_secret_request(transfer)
self._invalid_authentic_secret_request(previous_action, action)
@rule(
previous_action=init_initiators,
secret=secret(), # pylint: disable=no-value-for-parameter
)
def secret_request_with_wrong_secrethash(self, previous_action, secret):
assume(sha3(secret) != sha3(previous_action.transfer.secret))
self._assume_channel_opened(previous_action)
transfer = deepcopy(previous_action.transfer)
transfer.secret = secret
action = self._receive_secret_request(transfer)
return self._unauthentic_secret_request(action)
@rule(previous_action=init_initiators, payment_identifier=integers())
def secret_request_with_wrong_payment_id(self, previous_action, payment_identifier):
assume(payment_identifier != previous_action.transfer.payment_identifier)
self._assume_channel_opened(previous_action)
transfer = deepcopy(previous_action.transfer)
transfer.payment_identifier = payment_identifier
action = self._receive_secret_request(transfer)
self._unauthentic_secret_request(action)
class OnChainMixin:
block_number: BlockNumber
@rule(number=integers(min_value=1, max_value=50))
def new_blocks(self, number):
events = list()
for _ in range(number):
block_state_change = Block(
block_number=self.block_number + 1,
gas_limit=1,
block_hash=factories.make_keccak_hash(),
)
result = node.state_transition(self.chain_state, block_state_change)
events.extend(result.events)
self.block_number += 1
@rule(target=partners)
def open_channel(self):
return self.new_channel_with_transaction()
@rule(partner=consumes(partners))
def settle_channel(self, partner):
channel = self.address_to_channel[partner]
channel_settled_state_change = ContractReceiveChannelSettled(
transaction_hash=factories.make_transaction_hash(),
token_network_identifier=channel.token_network_identifier,
channel_identifier=channel.identifier,
block_number=self.block_number + 1,
block_hash=factories.make_block_hash(),
)
node.state_transition(self.chain_state, channel_settled_state_change)
class InitiatorStateMachine(InitiatorMixin, ChainStateStateMachine):
pass
class OnChainStateMachine(OnChainMixin, ChainStateStateMachine):
pass
class MultiChannelInitiatorStateMachine(InitiatorMixin, OnChainMixin, ChainStateStateMachine):
pass
TestInitiator = InitiatorStateMachine.TestCase
TestOnChain = OnChainStateMachine.TestCase
TestMultiChannelInitiator = MultiChannelInitiatorStateMachine.TestCase
def test_regression_malicious_secret_request_handled_properly():
state = InitiatorStateMachine()
state.replay_path = True
v1 = state.initialize(block_number=1, random=Random(), random_seed=None)
v2 = state.valid_init_initiator(
partner=v1,
amount=1,
payment_id=1,
secret=b'\x00' * 32,
)
state.wrong_amount_secret_request(amount=0, previous_action=v2)
state.replay_init_initator(previous_action=v2)
state.teardown()
@pytest.mark.skip
def test_try_secret_request_after_settle_channel():
state = MultiChannelInitiatorStateMachine()
state.replay_path = True
state.failing_path_2 = True
v1 = state.initialize(block_number=1, random=Random(), random_seed=None)
v2 = state.valid_init_initiator(amount=1, partner=v1, payment_id=1, secret=b'\x91' * 32)
state.settle_channel(partner=v1)
state.valid_secret_request(previous_action=v2)
state.teardown()
|
the-stack_0_9078 | from lost.db import state
# def add_user(data_man, user):
# '''add user to user meta
# Args:
# db_man (obj): Project database manager.
# user (obj): User object
# '''
# user = model.User(idx=user.id, user_name=user.username,
# first_name=user.first_name, last_name=user.last_name,
# email=user.email)
# data_man.save_obj(user)
# def add_superuser(data_man, user):
# '''add superuser to user meta
# Args:
# db_man (obj): Project database manager.
# user (obj): User object
# '''
# user = model.User(idx=user.id)
# data_man.save_obj(user)
# def update_user(data_man, user):
# '''update existing user in user meta
# Args:
# db_man (obj): Project database manager.
# user (obj): User object
# '''
# usermeta = data_man.get_user_meta(user_id=user.id)
# usermeta.first_name = user.first_name
# usermeta.last_name = user.last_name
# usermeta.user_name = user.username
# usermeta.email = user.email
# data_man.save_obj(usermeta)
def release_user_annos(dbm, user_id):
'''Release locked annos for a specific user.
Args:
dbm (object): DBMan object.
user_id (int): ID of the user to release locked annos.
'''
print('Was Here! User id is: {}'.format(user_id))
for anno_task in dbm.get_anno_task(state=state.AnnoTask.IN_PROGRESS):
locked_annos = dbm.get_locked_img_annos(anno_task.idx)
print('locked annos')
print(locked_annos)
for anno in locked_annos:
print('UserID: {}, AnnoID: {}'.format(anno.user_id, anno.idx))
locked_user_annos = [anno for anno in locked_annos if anno.user_id == user_id]
print(locked_user_annos)
for anno in locked_user_annos:
anno.state = state.Anno.UNLOCKED
anno.timestamp_lock = None
anno.user_id = None
dbm.add(anno)
locked_annos = dbm.get_locked_two_d_annos(anno_task.idx)
print('locked 2d annos')
print(locked_annos)
for anno in locked_annos:
print('UserID: {} AnnoID: {}'.format(anno.user_id, anno.idx))
locked_user_annos = [anno for anno in locked_annos if anno.user_id == user_id]
print(locked_user_annos)
for anno in locked_user_annos:
anno.state = state.Anno.UNLOCKED
anno.timestamp_lock = None
anno.user_id = None
dbm.add(anno)
dbm.commit() |
the-stack_0_9079 | import os
import numpy as np
import tifffile as tiff
from PIL import Image
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from torchvision.transforms import Compose, Resize, ToTensor
from pytorch_unet.processing.augments import augmentations
class DataTransformer(Dataset):
"""Dataset loader to pass to the pytorch DataLoader.
Note:
This is an abstract class representing a dataset. You don't have to write a function like this but it helps
in applying transformations to the dataset and in supplying the dataset to the data loader, which is where
all these transformations are actually applied.
Arguments:
train_filename (string) : is the path to the training data.
labels_filename (string) : is the path to the labels for the training data.
image_transform (tensor) : is data in the tensor format to be used to apply transformation.
image_augmentation (tensor) : is a set of transformations to be applied to the data.
Returns:
the dataset.
"""
def __init__(self, train_filename, labels_filename, image_transform=None, image_augmentation=None):
self.train_filename = train_filename
self.labels_filename = labels_filename
self.image_transform = image_transform
self.image_augmentation = image_augmentation
self.len_train = tiff.imread(self.train_filename).shape[0]
def __len__(self):
return self.len_train
def _read_data(self, index):
return Image.fromarray((tiff.imread(self.train_filename))[index])
def _read_labels(self, index):
return Image.fromarray((tiff.imread(self.labels_filename))[index])
def __getitem__(self, index):
if self.labels_filename is not None:
images = self._read_data(index)
labels = self._read_labels(index)
if self.image_augmentation is not None:
x = np.array(images)
y = np.array(labels)
data = {'input': x, 'mask': y}
aug_data = self.image_augmentation(data)
trans_images = aug_data['input']
trans_labels = aug_data['mask']
if self.image_augmentation is None:
trans_images = self.image_transform(images)
trans_labels = self.image_transform(labels)
return [trans_images, trans_labels]
if self.labels_filename is None:
images = self._read_data(index)
trans_images = self.image_transform(images)
return trans_images
def load_data(args):
"""Load data from here and return.
Note:
Compose Composes several transforms together and if augmentation is chosen you compose an additional
bunch of transforms to be applied to the train data and you send this to the DataTransformer class
which returns the data set that is used in the data loader. The data loader then takes in this dataset with a
batch size and sampler. Sampler is defines the strategy to draw samples from the dataset. Here for training
data random sampling is used and for validation sequential is used. You can also write a custom sampler class
if you want.
:param args:
main_dir (string) : path to the main directory from the args.
image_size (int) : size of the image to be resized.
transform_prob (float) : probability to apply transformations on the data.
batch_size (int) : batch size to be used in the data loader.
:return:
the train loader and validation loader to be used for training and validating.
"""
# get data set file path
data_path = os.path.join(args.main_dir, 'data', 'train-volume.tif')
labels_path = os.path.join(args.main_dir, 'data', 'train-labels.tif')
# compose the transforms for the train set
train_data = Compose([Resize(args.image_size), ToTensor()])
# choose between augmentations for train data
if args.augment:
train_augment = augmentations(args)
train_transform = DataTransformer(data_path, labels_path, image_transform=train_data,
image_augmentation=train_augment)
else:
# transforming the train data and returning a 4D tensor
train_transform = DataTransformer(data_path, labels_path, image_transform=train_data, image_augmentation=None)
# transform for validation data
val_data = Compose([Resize(args.image_size), ToTensor()])
val_transform = DataTransformer(data_path, labels_path, image_transform=val_data, image_augmentation=None)
# split the train and validation indices
train_indices, validation_indices = train_test_split(range(len(train_transform)), test_size=0.15)
# call the sampler for the train and validation data
train_samples = RandomSampler(train_indices)
validation_samples = SequentialSampler(validation_indices)
# load train and validation data
train_loader = DataLoader(train_transform, batch_size=args.batch_size, sampler=train_samples)
val_loader = DataLoader(val_transform, batch_size=args.batch_size, sampler=validation_samples)
return train_loader, val_loader
|
the-stack_0_9080 | """
Module containing raster blocks for spatial operations.
"""
import math
from scipy import ndimage
import numpy as np
from osgeo import ogr
from dask_geomodeling.utils import (
EPSG3857,
EPSG4326,
POLYGON,
get_sr,
Extent,
get_dtype_min,
get_footprint,
get_index,
shapely_transform,
)
from dask_geomodeling.raster.reduction import reduce_rasters, check_statistic
from .base import BaseSingle, RasterBlock
from shapely.geometry import Point
__all__ = ["Dilate", "Smooth", "MovingMax", "HillShade", "Place"]
def expand_request_pixels(request, radius=1):
""" Expand request by `radius` pixels. Returns None for non-vals requests
or point requests. """
if request["mode"] != "vals": # do nothing with time and meta requests
return None
width, height = request["width"], request["height"]
x1, y1, x2, y2 = request["bbox"]
pwidth, pheight = x2 - x1, y2 - y1
if pwidth == 0 or pheight == 0: # cannot dilate a point request
return None
amount_x = pwidth / width * radius
amount_y = pheight / height * radius
new_request = request.copy()
new_request["bbox"] = (x1 - amount_x, y1 - amount_x, x2 + amount_y, y2 + amount_y)
new_request["width"] += 2 * radius
new_request["height"] += 2 * radius
return new_request
def expand_request_meters(request, radius_m=1):
"""
Expand request by `radius_m` meters, rounded so that an integer number of
pixels is added to all sides.
Returns a tuple of:
- new request with adapted bbox, width and height
- the radius transformed to pixels as a (y, x) tuple of floats
- the added margins as a (y, x) tuple of integers
"""
sr = get_sr(request["projection"])
bbox = request["bbox"]
# throughout, variables in the projected unit ( = meters, mostly) are
# suffixed by _m, in pixels by _px
if sr.IsGeographic():
# expand geographic bbox in EPSG3857
extent_geom = Extent(bbox, sr)
bbox = extent_geom.transformed(EPSG3857).bbox
else:
# most Projected projections are in meters, but to be sure:
radius_m /= sr.GetLinearUnits()
# compute the initial zoom factors: how much to expand the bbox to obtain
# margins of exactly 'radius' (in meters)
x1, y1, x2, y2 = bbox
shape_m = y2 - y1, x2 - x1
if shape_m[0] > 0 and shape_m[1] > 0:
# Resolution in pixels per meter:
resolution = request["height"] / shape_m[0], request["width"] / shape_m[1]
# How many pixels to add:
radius_px = [radius_m * res for res in resolution]
# How many pixels to add, rounded to integers:
margins_px = [int(round(r)) for r in radius_px]
# How many meters to add (based on rounded pixels):
margins_m = [m / res for m, res in zip(margins_px, resolution)]
else:
# There is no resolution. Add MARGIN_THRESHOLD pixels to the request.
radius_px = margins_px = [Smooth.MARGIN_THRESHOLD] * 2
# Expand the request with radius_m exactly.
margins_m = [radius_m] * 2
# assemble the request
new_request = request.copy()
new_request["bbox"] = (
x1 - margins_m[1],
y1 - margins_m[0],
x2 + margins_m[1],
y2 + margins_m[0],
)
if sr.IsGeographic():
# transform back to original projection
extent_proj = Extent(new_request["bbox"], EPSG3857)
new_request["bbox"] = extent_proj.transformed(sr).bbox
new_request["height"] += 2 * margins_px[0]
new_request["width"] += 2 * margins_px[1]
return new_request, radius_px
class Dilate(BaseSingle):
"""
Perform spatial dilation on specific cell values.
Cells with values in the supplied list are spatially dilated by one cell
in each direction, including diagonals.
Dilation is performed in the order of the values parameter.
Args:
store (RasterBlock): Raster to perform dilation on.
values (list): Only cells with these values are dilated.
Returns:
RasterBlock where cells in values list are dilated.
See also:
https://en.wikipedia.org/wiki/Dilation_%28morphology%29
"""
def __init__(self, store, values):
values = np.asarray(values, dtype=store.dtype)
super().__init__(store, values.tolist())
@property
def values(self):
return self.args[1]
def get_sources_and_requests(self, **request):
new_request = expand_request_pixels(request, radius=1)
if new_request is None: # not an expandable request: do nothing
return [(self.store, request)]
else:
return [(self.store, new_request), (self.values, None)]
@staticmethod
def process(data, values=None):
if data is None or values is None or "values" not in data:
return data
original = data["values"]
dilated = original.copy()
for value in np.asarray(values, dtype=original.dtype):
dilated[ndimage.binary_dilation(original == value)] = value
dilated = dilated[:, 1:-1, 1:-1]
return {"values": dilated, "no_data_value": data["no_data_value"]}
class MovingMax(BaseSingle):
"""
Apply a spatial maximum filter to the data using a circular footprint.
This can be used for visualization of sparse data.
Args:
store (RasterBlock): Raster to which the filter is applied
size (integer): Diameter of the circular footprint. This should always be
an odd number larger than 1.
Returns:
RasterBlock with maximum values inside the footprint of each input cell.
"""
def __init__(self, store, size):
# round size to nearest odd integer
size = int(2 * round((size - 1) / 2) + 1)
if size < 3:
raise ValueError("The size should be odd and larger than 1")
super(MovingMax, self).__init__(store, size)
@property
def size(self):
return self.args[1]
def get_sources_and_requests(self, **request):
size = self.size
new_request = expand_request_pixels(request, radius=int(size // 2))
if new_request is None: # not an expandable request: do nothing
return [(self.store, request)]
else:
return [(self.store, new_request), (size, None)]
@staticmethod
def process(data, size=None):
if data is None or size is None or "values" not in data:
return data
radius = int(size // 2)
footprint = get_footprint(size)[np.newaxis]
# put absolute minimum on no data pixels
array = data["values"].copy()
minimum = get_dtype_min(array.dtype)
no_data_mask = array == data["no_data_value"]
array[no_data_mask] = minimum
# apply maximum filter
filtered = ndimage.maximum_filter(array, footprint=footprint)
# replace absolute minimum with original fillvalue
filtered[(filtered == minimum) & no_data_mask] = data["no_data_value"]
# cut out the result
filtered = filtered[:, radius:-radius, radius:-radius]
return {"values": filtered, "no_data_value": data["no_data_value"]}
class Smooth(BaseSingle):
"""
Smooth the values from a raster spatially using Gaussian smoothing.
Args:
store (RasterBlock): Raster to be smoothed
size (number): The extent of the smoothing in meters. The 'sigma' value
for the Gaussian kernal equals ``size / 3``.
fill (number): 'no data' are replaced by this value during smoothing,
defaults to 0.
Returns:
RasterBlock with spatially smoothed values.
See Also:
https://en.wikipedia.org/wiki/Gaussian_blur
"""
MARGIN_THRESHOLD = 6
def __init__(self, store, size, fill=0):
for x in (size, fill):
if not isinstance(x, (int, float)):
raise TypeError("'{}' object is not allowed".format(type(x)))
super(Smooth, self).__init__(store, size, fill)
@property
def size(self):
return self.args[1]
@property
def fill(self):
return self.args[2]
def get_sources_and_requests(self, **request):
if request["mode"] != "vals": # do nothing with time and meta requests
return [(self.store, request)]
new_request, size = expand_request_meters(request, self.size)
# check how many pixels will be added by the request
if any([s > self.MARGIN_THRESHOLD for s in size]):
smooth_mode = "zoom"
# rescale the size
zoom = [new_request[x] / request[x] for x in ("height", "width")]
size = [s / z for s, z in zip(size, zoom)]
# request the original (not expanded) shape
new_request["height"] = request["height"]
new_request["width"] = request["width"]
else:
smooth_mode = "exact"
process_kwargs = dict(smooth_mode=smooth_mode, fill=self.fill, size=size)
return [(self.store, new_request), (process_kwargs, None)]
@staticmethod
def process(data, process_kwargs=None):
if data is None or process_kwargs is None:
return data
smooth_mode = process_kwargs["smooth_mode"]
size_px = process_kwargs["size"]
fill = process_kwargs["fill"]
# fill in nodata values
values = data["values"].copy()
no_data_value = data["no_data_value"]
values[values == no_data_value] = fill
# compute the sigma
sigma = 0, size_px[0] / 3, size_px[1] / 3
ndimage.gaussian_filter(
values, sigma, output=values, mode="constant", cval=fill
)
# remove the margins
if smooth_mode == "exact":
my, mx = [int(round(s)) for s in size_px]
values = values[:, my : values.shape[1] - my, mx : values.shape[2] - mx]
else:
_, ny, nx = values.shape
zy, zx = [1 - 2 * size_px[0] / ny, 1 - 2 * size_px[1] / nx]
values = ndimage.affine_transform(
values,
order=0,
matrix=np.diag([1, zy, zx]),
offset=[0, size_px[0], size_px[1]],
)
return {"values": values, "no_data_value": no_data_value}
class HillShade(BaseSingle):
"""
Calculate a hillshade from the raster values.
Args:
store (RasterBlock): Raster to which the hillshade algorithm is applied.
altitude (number): Light source altitude in degrees, defaults to 45.
azimuth (number): Light source azimuth in degrees, defaults to 315.
fill (number): Fill value to be used for 'no data' values.
Returns:
Hillshaded raster
See also:
https://pro.arcgis.com/en/pro-app/tool-reference/3d-analyst/how-hillshade-works.htm
"""
def __init__(self, store, altitude=45, azimuth=315, fill=0):
for x in (altitude, azimuth, fill):
if not isinstance(x, (int, float)):
raise TypeError("'{}' object is not allowed".format(type(x)))
super(HillShade, self).__init__(store, float(altitude), float(azimuth), fill)
@property
def altitude(self):
return self.args[1]
@property
def azimuth(self):
return self.args[2]
@property
def fill(self):
return self.args[3]
@property
def dtype(self):
return np.dtype("u1")
@property
def fillvalue(self):
return 256 # on purpose, it does not exist in bytes
@staticmethod
def process(data, process_kwargs=None):
"""
Adapted from:
https://github.com/OSGeo/gdal/blob/2.0/gdal/apps/gdaldem.cpp#L481
Edges are not implemented, result clips one pixel from array.
"""
if process_kwargs is None:
return data
array = data["values"].copy()
array[array == data["no_data_value"]] = process_kwargs["fill"]
xres, yres = process_kwargs["resolution"]
alt = math.radians(process_kwargs["altitude"])
az = math.radians(process_kwargs["azimuth"])
zsf = 1 / 8 # vertical scale factor
square_zsf = zsf * zsf
# gradient
s0 = slice(None, None), slice(None, -2), slice(None, -2)
s1 = slice(None, None), slice(None, -2), slice(1, -1)
s2 = slice(None, None), slice(None, -2), slice(2, None)
s3 = slice(None, None), slice(1, -1), slice(None, -2)
s4 = slice(None, None), slice(1, -1), slice(1, -1)
s5 = slice(None, None), slice(1, -1), slice(2, None)
s6 = slice(None, None), slice(2, None), slice(None, -2)
s7 = slice(None, None), slice(2, None), slice(1, -1)
s8 = slice(None, None), slice(2, None), slice(2, None)
# angle calculation
y = np.empty(array.shape, dtype="f4")
y[s4] = (
array[s0]
+ 2 * array[s1]
+ array[s2]
- array[s6]
- 2 * array[s7]
- array[s8]
) / yres
x = np.empty(array.shape, dtype="f4")
x[s4] = (
array[s0]
+ 2 * array[s3]
+ array[s6]
- array[s2]
- 2 * array[s5]
- array[s8]
) / xres
with np.errstate(all="ignore"):
xx_plus_yy = x * x + y * y
aspect = np.arctan2(y, x)
# shading
cang = (
math.sin(alt)
- math.cos(alt) * zsf * np.sqrt(xx_plus_yy) * np.sin(aspect - az)
) / np.sqrt(1 + square_zsf * xx_plus_yy)
cang = cang[..., 1:-1, 1:-1]
result = np.where(cang <= 0, 0, 255 * cang).astype("u1")
return {"values": result, "no_data_value": 256}
def get_sources_and_requests(self, **request):
new_request = expand_request_pixels(request, radius=1)
if new_request is None: # not an expandable request: do nothing
return [(self.store, request)]
# determine resolution
bbox = request["bbox"]
resolution = (
(bbox[2] - bbox[0]) / request["width"],
(bbox[3] - bbox[1]) / request["height"],
)
process_kwargs = dict(
resolution=resolution,
altitude=self.altitude,
azimuth=self.azimuth,
fill=self.fill,
)
return [(self.store, new_request), (process_kwargs, None)]
class Place(BaseSingle):
"""Place an input raster at given coordinates
Note that if the store's projection is different from the requested one,
the data will be reprojected before placing it at a different position.
Args:
store (RasterBlock): Raster that will be placed.
place_projection (str): The projection in which this operation is done.
This also specifies the projection of the ``anchor`` and
``coordinates`` args.
anchor (list of 2 numbers): The anchor into the source raster that will
be placed at given coordinates.
coordinates (list of lists of 2 numbers): The target coordinates. The
center of the bbox will be placed on each of these coordinates.
statistic (str): What method to use to merge overlapping rasters. One of:
{"last", "first", "count", "sum", "mean", "min",
"max", "argmin", "argmax", "product", "std", "var", "p<number>"}
Returns:
RasterBlock with the source raster placed
"""
def __init__(self, store, place_projection, anchor, coordinates, statistic="last"):
if not isinstance(store, RasterBlock):
raise TypeError("'{}' object is not allowed".format(type(store)))
try:
get_sr(place_projection)
except RuntimeError:
raise ValueError(
"'{}' is not a valid projection string".format(place_projection)
)
anchor = list(anchor)
if len(anchor) != 2:
raise ValueError("Expected 2 numbers in the 'anchor' parameter")
for x in anchor:
if not isinstance(x, (int, float)):
raise TypeError("'{}' object is not allowed".format(type(x)))
if coordinates is None or len(coordinates) == 0:
coordinates = []
else:
coordinates = np.asarray(coordinates, dtype=float)
if coordinates.ndim != 2 or coordinates.shape[1] != 2:
raise ValueError(
"Expected a list of lists of 2 numbers in the "
"'coordinates' parameter"
)
coordinates = coordinates.tolist()
check_statistic(statistic)
super().__init__(store, place_projection, anchor, coordinates, statistic)
@property
def place_projection(self):
return self.args[1]
@property
def anchor(self):
return self.args[2]
@property
def coordinates(self):
return self.args[3]
@property
def statistic(self):
return self.args[4]
@property
def projection(self):
"""The native projection of this block.
Only returns something if the place projection equals the store
projection"""
store_projection = self.store.projection
if store_projection is None:
return
if get_sr(self.place_projection).IsSame(get_sr(store_projection)):
return store_projection
@property
def geo_transform(self):
"""The native geo_transform of this block
Returns None if the store projection and place projections differ."""
if self.projection is not None:
return self.store.geo_transform
@property
def extent(self):
geometry = self.geometry
if geometry is None:
return
if not geometry.GetSpatialReference().IsSame(EPSG4326):
geometry = geometry.Clone()
geometry.TransformTo(EPSG4326)
x1, x2, y1, y2 = geometry.GetEnvelope()
return x1, y1, x2, y2
@property
def geometry(self):
"""Combined geometry in this block's native projection. """
store_geometry = self.store.geometry
if store_geometry is None:
return
sr = get_sr(self.place_projection)
if not store_geometry.GetSpatialReference().IsSame(sr):
store_geometry = store_geometry.Clone()
store_geometry.TransformTo(sr)
_x1, _x2, _y1, _y2 = store_geometry.GetEnvelope()
p, q = self.anchor
P, Q = zip(*self.coordinates)
x1, x2 = _x1 + min(P) - p, _x2 + max(P) - p
y1, y2 = _y1 + min(Q) - q, _y2 + max(Q) - q
return ogr.CreateGeometryFromWkt(POLYGON.format(x1, y1, x2, y2), sr)
def get_sources_and_requests(self, **request):
if request["mode"] != "vals":
return ({"mode": request["mode"]}, None), (self.store, request)
# transform the anchor and coordinates into the requested projection
anchor = shapely_transform(
Point(self.anchor), self.place_projection, request["projection"]
).coords[0]
coordinates = [
shapely_transform(
Point(coord), self.place_projection, request["projection"]
).coords[0]
for coord in self.coordinates
]
# transform the source's extent
extent_geometry = self.store.geometry
if extent_geometry is None:
# no geometry means: no data
return (({"mode": "null"}, None),)
sr = get_sr(request["projection"])
if not extent_geometry.GetSpatialReference().IsSame(sr):
extent_geometry = extent_geometry.Clone()
extent_geometry.TransformTo(sr)
xmin, xmax, ymin, ymax = extent_geometry.GetEnvelope()
# compute the requested cellsize
x1, y1, x2, y2 = request["bbox"]
size_x = (x2 - x1) / request["width"]
size_y = (y2 - y1) / request["height"]
# point requests: never request the full source extent
if size_x > 0 and size_y > 0:
# check what the full source extent would require
full_height = math.ceil((ymax - ymin) / size_y)
full_width = math.ceil((xmax - xmin) / size_x)
if full_height * full_width <= request["width"] * request["height"]:
_request = request.copy()
_request["width"] = full_width
_request["height"] = full_height
_request["bbox"] = (
xmin,
ymin,
xmin + full_width * size_x,
ymin + full_height * size_y,
)
process_kwargs = {
"mode": "warp",
"anchor": anchor,
"coordinates": coordinates,
"src_bbox": _request["bbox"],
"dst_bbox": request["bbox"],
"cellsize": (size_x, size_y),
"statistic": self.statistic,
}
return [(process_kwargs, None), (self.store, _request)]
# generate a new (backwards shifted) bbox for each coordinate
sources_and_requests = []
filtered_coordinates = []
for _x, _y in coordinates:
bbox = [
x1 + anchor[0] - _x,
y1 + anchor[1] - _y,
x2 + anchor[0] - _x,
y2 + anchor[1] - _y,
]
# check the overlap with the source's extent
# Note that raster cells are defined [xmin, xmax) and (ymin, ymax]
# so points precisely at xmax or ymin certainly do not have data.
if bbox[0] >= xmax or bbox[1] > ymax or bbox[2] < xmin or bbox[3] <= ymin:
continue
filtered_coordinates.append((_x, _y))
_request = request.copy()
_request["bbox"] = bbox
sources_and_requests.append((self.store, _request))
if len(sources_and_requests) == 0:
# No coordinates inside: we still need to return an array
# of the correct shape. Send a time request to get the depth.
_request = request.copy()
_request["mode"] = "time"
process_kwargs = {
"mode": "empty",
"dtype": self.dtype,
"fillvalue": self.fillvalue,
"width": request["width"],
"height": request["height"],
"statistic": self.statistic,
}
return [(process_kwargs, None), (self.store, _request)]
process_kwargs = {"mode": "group", "statistic": self.statistic}
return [(process_kwargs, None)] + sources_and_requests
@staticmethod
def process(process_kwargs, *multi):
if process_kwargs["mode"] in {"meta", "time"}:
return multi[0]
if process_kwargs["mode"] == "null":
return
if process_kwargs["mode"] == "empty":
data = multi[0]
if data is None:
return
out_shape = (
len(data["time"]),
process_kwargs["height"],
process_kwargs["width"],
)
out_no_data_value = process_kwargs["fillvalue"]
out_dtype = process_kwargs["dtype"]
stack = []
elif process_kwargs["mode"] == "group":
# We have a bunch of arrays that are already shifted. Stack them.
stack = [data for data in multi if data is not None]
if len(stack) == 0:
return # instead of returning nodata (because inputs are None)
elif process_kwargs["mode"] == "warp":
# There is a single 'source' raster that we are going to shift
# multiple times into the result. The cellsize is already correct.
data = multi[0]
if data is None:
return
out_no_data_value = data["no_data_value"]
source = data["values"]
out_dtype = source.dtype
# convert the anchor to pixels (indices inside 'source')
anchor = process_kwargs["anchor"]
src_bbox = process_kwargs["src_bbox"]
size_x, size_y = process_kwargs["cellsize"]
anchor_px = (
(anchor[0] - src_bbox[0]) / size_x,
(anchor[1] - src_bbox[1]) / size_y,
)
# compute the output shape
x1, y1, x2, y2 = process_kwargs["dst_bbox"]
coordinates = process_kwargs["coordinates"]
dst_h = round((y2 - y1) / size_y)
dst_w = round((x2 - x1) / size_x)
src_d, src_h, src_w = source.shape
out_shape = (src_d, dst_h, dst_w)
# determine what indices in 'source' have data
k, j, i = np.where(get_index(source, out_no_data_value))
# place the data on each coordinate
stack = []
for x, y in coordinates:
if i.size == 0: # shortcut: no data at all to place
break
# transform coordinate into pixels (indices in 'values')
coord_px = (x - x1) / size_x, (y - y1) / size_y
di = round(coord_px[0] - anchor_px[0])
dj = round(coord_px[1] - anchor_px[1])
# because of the y-axis inversion: dj is measured from the
# other side of the array. if you draw it, you'll arrive at:
dj = dst_h - src_h - dj
if di <= -src_w or di >= dst_w or dj <= -src_h or dj >= dst_h:
# skip as it would shift completely outside
continue
elif 0 <= di <= (dst_w - src_w) and 0 <= dj <= (dst_h - src_h):
# complete place
values = np.full(out_shape, out_no_data_value, out_dtype)
values[k, j + dj, i + di] = source[k, j, i]
stack.append({"values": values, "no_data_value": out_no_data_value})
else:
# partial place
i_s = i + di
j_s = j + dj
m = (i_s >= 0) & (j_s >= 0) & (i_s < dst_w) & (j_s < dst_h)
if not m.any():
continue
values = np.full(out_shape, out_no_data_value, out_dtype)
values[k[m], j_s[m], i_s[m]] = source[k[m], j[m], i[m]]
stack.append({"values": values, "no_data_value": out_no_data_value})
# merge the values_stack
if len(stack) == 0:
return {
"values": np.full(out_shape, out_no_data_value, out_dtype),
"no_data_value": out_no_data_value,
}
else:
return reduce_rasters(stack, process_kwargs["statistic"])
|
the-stack_0_9081 | import functools
import itertools
from collections import (OrderedDict,
abc,
deque)
from operator import is_not
from typing import (Any,
Hashable,
Iterable,
MutableMapping,
Sequence,
Sized,
Tuple,
Type)
from .functional import flatmap
from .hints import (Domain,
Map,
Operator,
Range)
@functools.singledispatch
def capacity(iterable: Iterable[Any]) -> int:
"""
Returns number of elements in iterable.
>>> capacity(range(0))
0
>>> capacity(range(10))
10
"""
counter = itertools.count()
# order matters: if `counter` goes first,
# then it will be incremented even for empty `iterable`
deque(zip(iterable, counter),
maxlen=0)
return next(counter)
@capacity.register(abc.Sized)
def _(iterable: Sized) -> int:
"""
Returns number of elements in sized iterable.
"""
return len(iterable)
def first(iterable: Iterable[Domain]) -> Domain:
"""
Returns first element of iterable.
>>> first(range(10))
0
"""
try:
return next(iter(iterable))
except StopIteration as error:
raise ValueError('Argument supposed to be non-empty.') from error
def last(iterable: Iterable[Domain]) -> Domain:
"""
Returns last element of iterable.
>>> last(range(10))
9
"""
try:
return deque(iterable,
maxlen=1)[0]
except IndexError as error:
raise ValueError('Argument supposed to be non-empty.') from error
def cut(iterable: Iterable[Domain],
*,
slice_: slice) -> Iterable[Domain]:
"""
Selects elements from iterable based on given slice.
Slice fields supposed to be unset or non-negative
since it is hard to evaluate negative indices/step for arbitrary iterable
which may be potentially infinite
or change previous elements if iterating made backwards.
"""
yield from itertools.islice(iterable,
slice_.start, slice_.stop, slice_.step)
def cutter(slice_: slice) -> Operator[Iterable[Domain]]:
"""
Returns function that selects elements from iterable based on given slice.
>>> to_first_triplet = cutter(slice(3))
>>> list(to_first_triplet(range(10)))
[0, 1, 2]
>>> to_second_triplet = cutter(slice(3, 6))
>>> list(to_second_triplet(range(10)))
[3, 4, 5]
>>> cut_out_every_third = cutter(slice(0, None, 3))
>>> list(cut_out_every_third(range(10)))
[0, 3, 6, 9]
"""
result = functools.partial(cut,
slice_=slice_)
result.__doc__ = ('Selects elements from iterable {slice}.'
.format(slice=_slice_to_description(slice_)))
return result
def _slice_to_description(slice_: slice) -> str:
"""Generates human readable representation of `slice` object."""
slice_description_parts = []
start_is_specified = bool(slice_.start)
if start_is_specified:
slice_description_parts.append('starting from position {start}'
.format(start=slice_.start))
step_is_specified = slice_.step is not None
if step_is_specified:
slice_description_parts.append('with step {step}'
.format(step=slice_.step))
if slice_.stop is not None:
stop_description_part = ('stopping at position {stop}'
.format(stop=slice_.stop))
if start_is_specified or step_is_specified:
stop_description_part = 'and ' + stop_description_part
slice_description_parts.append(stop_description_part)
return ' '.join(slice_description_parts)
def chopper(size: int) -> Map[Iterable[Domain], Iterable[Sequence[Domain]]]:
"""
Returns function that splits iterable into chunks of given size.
>>> in_three = chopper(3)
>>> list(map(tuple, in_three(range(10))))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
"""
result = functools.partial(chop,
size=size)
result.__doc__ = ('Splits iterable into chunks of size {size}.\n'
.format(size=size))
return result
@functools.singledispatch
def chop(iterable: Iterable[Domain],
*,
size: int) -> Iterable[Sequence[Domain]]:
"""
Splits iterable into chunks of given size.
"""
iterator = iter(iterable)
yield from iter(lambda: tuple(itertools.islice(iterator, size)), ())
@chop.register(abc.Sequence)
def _(iterable: Sequence[Domain],
*,
size: int) -> Iterable[Sequence[Domain]]:
"""
Splits sequence into chunks of given size.
"""
if not size:
return
for start in range(0, len(iterable), size):
yield iterable[start:start + size]
# deque do not support slice notation
chop.register(deque, chop.registry[object])
in_two = chopper(2)
in_three = chopper(3)
in_four = chopper(4)
def slide(iterable: Iterable[Domain],
*,
size: int) -> Iterable[Tuple[Domain, ...]]:
"""
Slides over iterable with window of given size.
"""
iterator = iter(iterable)
initial = tuple(itertools.islice(iterator, size))
def shift(previous: Tuple[Domain, ...],
element: Domain) -> Tuple[Domain, ...]:
return previous[1:] + (element,)
yield from itertools.accumulate(itertools.chain([initial], iterator),
shift)
def slider(size: int) -> Map[Iterable[Domain], Iterable[Tuple[Domain, ...]]]:
"""
Returns function that slides over iterable with window of given size.
>>> pairwise = slider(2)
>>> list(pairwise(range(10)))
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)]
"""
result = functools.partial(slide,
size=size)
result.__doc__ = ('Slides over iterable with window of size {size}.'
.format(size=size))
return result
pairwise = slider(2)
triplewise = slider(3)
quadruplewise = slider(4)
def header(size: int) -> Operator[Iterable[Domain]]:
"""
Returns function that selects elements from the beginning of iterable.
Resulted iterable will have size not greater than given one.
>>> to_first_pair = header(2)
>>> list(to_first_pair(range(10)))
[0, 1]
"""
result = cutter(slice(size))
result.__doc__ = ('Selects {size} elements from the beginning of iterable.'
.format(size=size))
return result
@functools.singledispatch
def trail(iterable: Iterable[Domain],
*,
size: int) -> Iterable[Domain]:
"""
Selects elements from the end of iterable.
Resulted iterable will have size not greater than given one.
"""
return deque(iterable,
maxlen=size)
@trail.register(abc.Sequence)
def _(iterable: Sequence[Domain],
*,
size: int) -> Sequence[Domain]:
"""
Selects elements from the end of sequence.
Resulted sequence will have size not greater than given one.
"""
return iterable[-size:] if size else iterable[:size]
# deque do not support slice notation
trail.register(deque, trail.registry[object])
def trailer(size: int) -> Operator[Iterable[Domain]]:
"""
Returns function that selects elements from the end of iterable.
Resulted iterable will have size not greater than given one.
>>> to_last_pair = trailer(2)
>>> list(to_last_pair(range(10)))
[8, 9]
"""
result = functools.partial(trail,
size=size)
result.__doc__ = ('Selects {size} elements from the end of iterable.'
.format(size=size))
return result
def mapper(map_: Map) -> Map[Iterable[Domain], Iterable[Range]]:
"""
Returns function that applies given map to the each element of iterable.
>>> to_str = mapper(str)
>>> list(to_str(range(10)))
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
"""
return functools.partial(map, map_)
def flatmapper(map_: Map[Domain, Iterable[Range]]
) -> Map[Iterable[Domain], Iterable[Range]]:
"""
Returns function that applies map to the each element of iterable
and flattens results.
>>> relay = flatmapper(range)
>>> list(relay(range(5)))
[0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
"""
return functools.partial(flatmap, map_)
Group = Tuple[Hashable, Iterable[Domain]]
def group_by(iterable: Iterable[Domain],
*,
key: Map[Domain, Hashable],
mapping_cls: Type[MutableMapping]) -> Iterable[Group]:
"""
Groups iterable elements based on given key.
"""
groups = mapping_cls()
for element in iterable:
groups.setdefault(key(element), []).append(element)
yield from groups.items()
def grouper(key: Map[Domain, Hashable],
*,
mapping_cls: Type[MutableMapping] = OrderedDict
) -> Map[Iterable[Domain], Iterable[Group]]:
"""
Returns function that groups iterable elements based on given key.
>>> group_by_absolute_value = grouper(abs)
>>> list(group_by_absolute_value(range(-5, 5)))
[(5, [-5]), (4, [-4, 4]), (3, [-3, 3]), (2, [-2, 2]), (1, [-1, 1]), (0, [0])]
>>> def modulo_two(number: int) -> int:
... return number % 2
>>> group_by_evenness = grouper(modulo_two)
>>> list(group_by_evenness(range(10)))
[(0, [0, 2, 4, 6, 8]), (1, [1, 3, 5, 7, 9])]
"""
return functools.partial(group_by,
key=key,
mapping_cls=mapping_cls)
def expand(object_: Domain) -> Iterable[Domain]:
"""
Wraps object into iterable.
>>> list(expand(0))
[0]
"""
yield object_
def flatten(iterable: Iterable[Iterable[Domain]]) -> Iterable[Domain]:
"""
Returns plain iterable from iterable of iterables.
>>> list(flatten([range(5), range(10, 20)]))
[0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
"""
yield from itertools.chain.from_iterable(iterable)
def interleave(iterable: Iterable[Iterable[Domain]]) -> Iterable[Domain]:
"""
Interleaves elements from given iterable of iterables.
>>> list(interleave([range(5), range(10, 20)]))
[0, 10, 1, 11, 2, 12, 3, 13, 4, 14, 15, 16, 17, 18, 19]
"""
iterators = itertools.cycle(map(iter, iterable))
while True:
try:
for iterator in iterators:
yield next(iterator)
except StopIteration:
is_not_exhausted = functools.partial(is_not, iterator)
iterators = itertools.cycle(itertools.takewhile(is_not_exhausted,
iterators))
else:
return
|
the-stack_0_9084 | from .fhirbase import fhirbase
class EnrollmentRequest(fhirbase):
"""
This resource provides the insurance enrollment details to the insurer
regarding a specified coverage.
Args:
resourceType: This is a EnrollmentRequest resource
identifier: The Response business identifier.
status: The status of the resource instance.
created: The date when this resource was created.
insurer: The Insurer who is target of the request.
provider: The practitioner who is responsible for the services
rendered to the patient.
organization: The organization which is responsible for the services
rendered to the patient.
subject: Patient Resource.
coverage: Reference to the program or plan identification, underwriter
or payor.
"""
__name__ = 'EnrollmentRequest'
def __init__(self, dict_values=None):
self.resourceType = 'EnrollmentRequest'
# type: str
# possible values: EnrollmentRequest
self.status = None
# type: str
self.created = None
# type: str
self.insurer = None
# reference to Reference: identifier
self.provider = None
# reference to Reference: identifier
self.organization = None
# reference to Reference: identifier
self.subject = None
# reference to Reference: identifier
self.coverage = None
# reference to Reference: identifier
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'EnrollmentRequest',
'child_variable': 'subject'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'EnrollmentRequest',
'child_variable': 'coverage'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'EnrollmentRequest',
'child_variable': 'insurer'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'EnrollmentRequest',
'child_variable': 'identifier'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'EnrollmentRequest',
'child_variable': 'organization'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'EnrollmentRequest',
'child_variable': 'provider'},
]
|
the-stack_0_9086 | import unittest
from unittest.mock import patch, call
import argparse
from deba.commands.test import add_subcommand
class TestCommandTestCase(unittest.TestCase):
@patch("builtins.print")
def test_run(self, mock_print):
parser = argparse.ArgumentParser("deba")
subparsers = parser.add_subparsers()
add_subcommand(subparsers)
args = parser.parse_args(
["test", r"`*_df`.to_csv(r'.+\.csv')", r"my_df.to_csv('my_data.csv')"]
)
args.exec(
None,
args,
)
mock_print.assert_has_calls([call('Extracted "my_data.csv"')])
args = parser.parse_args(
["test", r"`*_df`.to_csv(r'.+\.csv')", r"my_df.t_csv('my_data.csv')"]
)
with self.assertRaises(SystemExit) as cm:
args.exec(
None,
args,
)
mock_print.assert_has_calls([call("Does not match")])
self.assertEqual(
cm.exception.args,
(1,),
)
|
the-stack_0_9091 | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from botocore.exceptions import ClientError
from mock import Mock, patch
from nose.tools import assert_equal
from stream_alert.classifier.clients.firehose import FirehoseClient
class TestFirehoseClient(object):
"""Test class for FirehoseClient"""
# pylint: disable=protected-access,no-self-use,attribute-defined-outside-init
def setup(self):
"""Setup before each method"""
with patch('boto3.client'): # patch to speed up unit tests slightly
self._client = FirehoseClient()
def teardown(self):
"""Teardown after each method"""
FirehoseClient._ENABLED_LOGS.clear()
@property
def _sample_payloads(self):
return [
Mock(
log_schema_type='log_type_01_sub_type_01',
parsed_records=[
{
'unit_key_01': 1,
'unit_key_02': 'test'
},
{
'unit_key_01': 2,
'unit_key_02': 'test'
}
]
),
Mock(
log_schema_type='log_type_02_sub_type_01',
parsed_records=[
{
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super': 'secret'
}
}
]
)
]
@classmethod
def _sample_raw_records(cls, count=2):
return [
{'key_{}'.format(i): 'value_{}'.format(i)}
for i in range(count)
]
def test_records_to_json_list(self):
"""FirehoseClient - Records JSON Lines"""
records = self._sample_raw_records()
expected_result = [
'{"key_0":"value_0"}\n',
'{"key_1":"value_1"}\n'
]
result = FirehoseClient._records_to_json_list(records)
assert_equal(result, expected_result)
def test_record_batches(self):
"""FirehoseClient - Record Batches"""
records = self._sample_raw_records()
expected_result = [
[
'{"key_0":"value_0"}\n',
'{"key_1":"value_1"}\n'
]
]
result = list(FirehoseClient._record_batches(records))
assert_equal(result, expected_result)
@patch.object(FirehoseClient, '_log_failed')
def test_record_batches_rec_too_large(self, failure_mock):
"""FirehoseClient - Record Batches, Record Too Large"""
records = [
{'key': 'test' * 1000 * 1000}
]
result = list(FirehoseClient._record_batches(records))
assert_equal(result, [[]])
failure_mock.assert_called_with(1)
def test_record_batches_max_batch_count(self):
"""FirehoseClient - Record Batches, Max Batch Count"""
records = self._sample_raw_records(count=501)
result = list(FirehoseClient._record_batches(records))
assert_equal(len(result), 2)
assert_equal(len(result[0]), 500)
assert_equal(len(result[1]), 1)
def test_record_batches_max_batch_size(self):
"""FirehoseClient - Record Batches, Max Batch Size"""
records = [
{'key_{}'.format(i): 'test' * 100000}
for i in range(10)
]
result = list(FirehoseClient._record_batches(records))
assert_equal(len(result), 2)
assert_equal(len(result[0]), 9)
assert_equal(len(result[1]), 1)
batch_size_01 = sum(len(rec) for rec in result[0])
batch_size_02 = sum(len(rec) for rec in result[1])
assert_equal(batch_size_01 < FirehoseClient.MAX_BATCH_SIZE, True)
assert_equal(batch_size_02 < FirehoseClient.MAX_BATCH_SIZE, True)
assert_equal(batch_size_01 + batch_size_02 > FirehoseClient.MAX_BATCH_SIZE, True)
def test_sanitize_keys(self):
"""FirehoseClient - Sanitize Keys"""
test_event = {
'date': 'January 01, 3005',
'data': {
'super-duper': 'secret',
'do_not_sanitize_me': 1,
'example-key': 2,
'moar**data': 3,
'even.more': 4
}
}
expected_sanitized_event = {
'date': 'January 01, 3005',
'data': {
'super_duper': 'secret',
'do_not_sanitize_me': 1,
'example_key': 2,
'moar__data': 3,
'even_more': 4
}
}
sanitized_event = FirehoseClient.sanitize_keys(test_event)
assert_equal(sanitized_event, expected_sanitized_event)
def test_strip_successful_records(self):
"""FirehoseClient - Strip Successful Records"""
batch = [{'test': 'success'}, {'other': 'failure'}, {'other': 'info'}]
response = {
'FailedPutCount': 1,
'RequestResponses': [
{'RecordId': 'rec_id_01'},
{'ErrorCode': 10, 'ErrorMessage': 'foo'},
{'RecordId': 'rec_id_03'}
]
}
expected_batch = [{'other': 'failure'}]
FirehoseClient._strip_successful_records(batch, response)
assert_equal(batch, expected_batch)
def test_categorize_records(self):
"""FirehoseClient - Categorize Records"""
FirehoseClient._ENABLED_LOGS = {
'log_type_01_sub_type_01': 'log_type_01:sub_type_01',
'log_type_02_sub_type_01': 'log_type_02:sub_type_01'
}
payloads = self._sample_payloads
result = self._client._categorize_records(payloads)
expected_result = {
'log_type_01_sub_type_01': payloads[0].parsed_records,
'log_type_02_sub_type_01': payloads[1].parsed_records
}
assert_equal(dict(result), expected_result)
def test_categorize_records_none_enabled(self):
"""FirehoseClient - Categorize Records, None Enabled"""
payloads = self._sample_payloads
result = self._client._categorize_records(payloads)
assert_equal(dict(result), dict())
def test_categorize_records_subset_enabled(self):
"""FirehoseClient - Categorize Records, Subset Enabled"""
FirehoseClient._ENABLED_LOGS = {
'log_type_01_sub_type_01': 'log_type_01:sub_type_01'
}
payloads = self._sample_payloads
result = self._client._categorize_records(payloads)
expected_result = {
'log_type_01_sub_type_01': payloads[0].parsed_records
}
assert_equal(dict(result), expected_result)
@patch.object(FirehoseClient, '_log_failed')
def test_finalize_failures(self, failure_mock):
"""FirehoseClient - Finalize, With Failures"""
response = {
'FailedPutCount': 1,
'RequestResponses': [
{'RecordId': 'rec_id_01'},
{'ErrorCode': 10, 'ErrorMessage': 'foo'},
{'RecordId': 'rec_id_03'}
]
}
FirehoseClient._finalize(response, 'stream_name', 3)
failure_mock.assert_called_with(1)
@patch('logging.Logger.info')
def test_finalize_success(self, log_mock):
"""FirehoseClient - Finalize, Success"""
request_id = 'success_id'
stream_name = 'stream_name'
count = 3
response = {
'ResponseMetadata': {
'RequestId': request_id
}
}
FirehoseClient._finalize(response, stream_name, count)
log_mock.assert_called_with(
'Successfully sent %d message(s) to firehose %s with RequestId \'%s\'',
count,
stream_name,
request_id
)
def test_send_batch(self):
"""FirehoseClient - Send Batch"""
records = [
'{"unit_key_02":"test","unit_key_01":1}\n',
'{"unit_key_02":"test","unit_key_01":2}\n'
]
stream_name = 'test_stream_name'
expected_second_call = [
{'Data': records[1]}
]
with patch.object(self._client, '_client') as boto_mock:
boto_mock.put_record_batch.side_effect = [
{
'FailedPutCount': 1,
'RequestResponses': [
{'RecordId': 'rec_id_01'},
{'ErrorCode': 10, 'ErrorMessage': 'foo'}
]
},
{
'FailedPutCount': 0,
'RequestResponses': [
{'RecordId': 'rec_id_02'},
]
}
]
self._client._send_batch(stream_name, records)
boto_mock.put_record_batch.assert_called_with(
DeliveryStreamName=stream_name,
Records=expected_second_call
)
@patch('logging.Logger.exception')
@patch.object(FirehoseClient, 'MAX_BACKOFF_ATTEMPTS', 1)
def test_send_batch_error(self, log_mock):
"""FirehoseClient - Send Batch, Error"""
stream_name = 'test_stream_name'
with patch.object(self._client, '_client') as boto_mock:
error = ClientError({'Error': {'Code': 10}}, 'InvalidRequestException')
boto_mock.put_record_batch.side_effect = error
self._client._send_batch(stream_name, ['data'])
log_mock.assert_called_with('Firehose request failed')
def test_firehose_log_name(self):
"""FirehoseClient - Firehose Log Name"""
expected_result = 'test_log_type_name'
result = FirehoseClient.firehose_log_name('test*log.type-name')
assert_equal(result, expected_result)
def test_enabled_log_source(self):
"""FirehoseClient - Enabled Log Source"""
log = 'enabled_log'
FirehoseClient._ENABLED_LOGS = {
log: 'enabled:log'
}
assert_equal(FirehoseClient.enabled_log_source(log), True)
def test_enabled_log_source_false(self):
"""FirehoseClient - Enabled Log Source, False"""
log = 'enabled_log'
assert_equal(FirehoseClient.enabled_log_source(log), False)
def test_load_enabled_sources(self):
"""FirehoseClient - Load Enabled Log Sources"""
logs_config = {
'log_type_01:sub_type_01': {},
'log_type_01:sub_type_02': {}, # This log type should is not enabled
'log_type_02:sub_type_01': {},
'log_type_02:sub_type_02': {},
}
firehose_config = {
'enabled_logs': [
'log_type_01:sub_type_01', # One log for log_type_01
'log_type_02' # All of log_type_02
]
}
expected_result = {
'log_type_01_sub_type_01': 'log_type_01:sub_type_01',
'log_type_02_sub_type_01': 'log_type_02:sub_type_01',
'log_type_02_sub_type_02': 'log_type_02:sub_type_02'
}
enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, logs_config)
assert_equal(enabled_logs, expected_result)
@patch('logging.Logger.error')
def test_load_enabled_sources_invalid_log(self, log_mock):
"""FirehoseClient - Load Enabled Log Sources, Invalid Log Type"""
logs_config = {
'log_type_01:sub_type_01': {},
'log_type_01:sub_type_02': {}
}
log_type = 'log_type_03'
firehose_config = {
'enabled_logs': [
log_type
]
}
enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, logs_config)
assert_equal(enabled_logs, dict())
log_mock.assert_called_with(
'Enabled Firehose log %s not declared in logs.json', log_type
)
@patch('logging.Logger.error')
def test_load_enabled_sources_invalid_log_subtype(self, log_mock):
"""FirehoseClient - Load Enabled Log Sources, Invalid Log Sub-type"""
logs_config = {
'log_type_01:sub_type_01': {}
}
log_type = 'log_type_01:sub_type_02'
firehose_config = {
'enabled_logs': [
log_type
]
}
enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, logs_config)
assert_equal(enabled_logs, dict())
log_mock.assert_called_with(
'Enabled Firehose log %s not declared in logs.json', log_type
)
def test_load_from_config(self):
"""FirehoseClient - Load From Config"""
with patch('boto3.client'): # patch to speed up unit tests slightly
client = FirehoseClient.load_from_config({'enabled': True}, None)
assert_equal(isinstance(client, FirehoseClient), True)
def test_load_from_config_disabled(self):
"""FirehoseClient - Load From Config, Disabled"""
assert_equal(FirehoseClient.load_from_config({}, None), None)
@patch.object(FirehoseClient, '_send_batch')
def test_send(self, send_batch_mock):
"""FirehoseClient - Send"""
FirehoseClient._ENABLED_LOGS = {
'log_type_01_sub_type_01': 'log_type_01:sub_type_01'
}
expected_batch = [
'{"unit_key_02":"test","unit_key_01":1}\n',
'{"unit_key_02":"test","unit_key_01":2}\n'
]
self._client.send(self._sample_payloads)
send_batch_mock.assert_called_with(
'streamalert_data_log_type_01_sub_type_01', expected_batch
)
|
the-stack_0_9092 | from numpy import nan
from pandas import DataFrame, concat, read_sql_table
from pandas._testing import assert_frame_equal
from df_to_azure import df_to_azure
from df_to_azure.db import auth_azure
# #############################
# #### APPEND METHOD TESTS ####
# #############################
def test_append():
df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4.0, 5.0, nan]})
# 1. we create a new dataframe
df_to_azure(
df=df,
tablename="append",
schema="test",
method="create",
wait_till_finished=True,
)
# 2. we append the same data
df_to_azure(
df=df,
tablename="append",
schema="test",
method="append",
wait_till_finished=True,
)
# 3. we test if the data is what we expect
with auth_azure() as con:
result = read_sql_table(table_name="append", con=con, schema="test")
expected = concat([df, df], ignore_index=True)
assert_frame_equal(result, expected)
|
the-stack_0_9094 | """btre URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls', namespace='pages')),
path('listings/', include('listings.urls', namespace='listings')),
path('accounts/', include('accounts.urls', namespace='accounts')),
path('contacts/', include('contacts.urls', namespace='contacts')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
the-stack_0_9095 | import itertools
from typing import Any
import torch
from torch.autograd import DeviceType
from torch.futures import Future
from collections import defaultdict, namedtuple
from operator import attrgetter
from typing import Dict, List, Tuple, Optional
import math
try:
# Available in Python >= 3.2
from contextlib import ContextDecorator
except ImportError:
import functools
class ContextDecorator(object): # type: ignore[no-redef]
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
def __call__(self, func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapped
class EventList(list):
"""A list of Events (for pretty printing)"""
def __init__(self, *args, **kwargs):
use_cuda = kwargs.pop('use_cuda', True)
profile_memory = kwargs.pop('profile_memory', False)
with_flops = kwargs.pop('with_flops', False)
super(EventList, self).__init__(*args, **kwargs)
self._use_cuda = use_cuda
self._profile_memory = profile_memory
self._tree_built = False
self._with_flops = with_flops
def _build_tree(self):
self._populate_cpu_children()
self._remove_dup_nodes()
self._set_backward_stacktraces()
self._tree_built = True
def __str__(self):
return self.table()
def _remove_dup_nodes(self):
while True:
to_delete = []
for idx in range(len(self)):
if (self[idx].cpu_parent is not None and
self[idx].cpu_parent.name == self[idx].name and
len(self[idx].cpu_parent.cpu_children) == 1):
self[idx].cpu_parent.cpu_children = self[idx].cpu_children
self[idx].cpu_parent.kernels = self[idx].kernels # lift kernels up
for ch in self[idx].cpu_children:
ch.cpu_parent = self[idx].cpu_parent
to_delete.append(idx)
if len(to_delete) == 0:
break
new_evts = [ev for ind, ev in enumerate(self) if ind not in to_delete]
self.clear()
self.extend(new_evts)
def _populate_cpu_children(self):
"""Populates child events into each underlying FunctionEvent object.
One event is a child of another if [s1, e1) is inside [s2, e2). Where
s1 and e1 would be start and end of the child event's interval. And
s2 and e2 start and end of the parent event's interval
Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10]
be a parent of two other intervals.
If for any reason two intervals intersect only partially, this function
will not record a parent child relationship between then.
"""
# Some events can be async (i.e. start and end on different threads),
# since it's generally undefined how to attribute children ranges to
# async ranges, we do not use them when calculating nested ranges and stats
sync_events = [evt for evt in self if not evt.is_async and evt.device_type == DeviceType.CPU]
events = sorted(
sync_events,
key=attrgetter("thread"),
)
# Group by both thread and node_id, so that events that happen to have
# the same thread_id but are from different nodes aren't incorrectly
# grouped together.
threads = itertools.groupby(
events, key=lambda event: (event.thread, event.node_id)
)
# For each thread we keep a stack of current nested parents.
# We maintain the invariant that each interval is a subset of all other
# intervals lower in the stack.
#
# First we sort the intervals by their start time. Then we iterate over them.
# Every time we see a new interval we remove several parents from
# the top until we restore the invariant. Then parent child relationship
# if recorded if the stack is not empty.
# Finally we add new interval to the list
#
# Algorithm has O(N * log(N)) complexity where N is number of
# intervals
for thread_id, thread_events in threads:
thread_events_ = sorted(
thread_events,
key=lambda event: [event.time_range.start, -event.time_range.end],
)
current_events: List[FunctionEvent] = []
cur_end = 0
for event in thread_events_:
while len(current_events) > 0:
parent = current_events[-1]
if event.time_range.start >= parent.time_range.end or \
event.time_range.end > parent.time_range.end:
# this can't be a parent
current_events.pop()
else:
parent.append_cpu_child(event)
assert (
event.cpu_parent is None
), "There is already a CPU parent event for {}".format(
event.key
)
event.set_cpu_parent(parent)
break
current_events.append(event)
def _set_backward_stacktraces(self):
def bw_parent(evt):
if evt is None:
return None
elif evt.scope == 1: # BACKWARD_FUNCTION
return evt
else:
return bw_parent(evt.cpu_parent)
fwd_stacks = {}
for evt in self:
if bw_parent(evt) is None and evt.stack is not None:
t = (evt.sequence_nr, evt.thread)
if t not in fwd_stacks:
fwd_stacks[t] = evt.stack
for evt in self:
p = bw_parent(evt)
if p is not None:
assert p.fwd_thread is not None
t = (p.sequence_nr, p.fwd_thread)
if t in fwd_stacks:
evt.stack = fwd_stacks[t]
else:
evt.stack = []
@property
def self_cpu_time_total(self):
return sum([event.self_cpu_time_total for event in self])
def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False):
"""Prints an EventList as a nicely formatted table.
Args:
sort_by (str, optional): Attribute used to sort entries. By default
they are printed in the same order as they were registered.
Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``,
``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``,
``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``.
top_level_events_only(bool, optional): Boolean flag to determine the
selection of events to display. If true, the profiler will only
display events at top level like top-level invocation of python
`lstm`, python `add` or other functions, nested events like low-level
cpu/cuda ops events are omitted for profiler result readability.
Returns:
A string containing the table.
"""
return build_table(
self,
sort_by=sort_by,
row_limit=row_limit,
max_src_column_width=max_src_column_width,
header=header,
profile_memory=self._profile_memory,
with_flops=self._with_flops,
top_level_events_only=top_level_events_only)
def export_chrome_trace(self, path):
"""Exports an EventList as a Chrome tracing tools file.
The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL.
Args:
path (str): Path where the trace will be written.
"""
import os
with open(path, 'w') as f:
chrome_events = []
next_id = 0
# Use file IO over using json.dump since JSON dumping is very slow and
# this technique is proven to give a 4x speedup.
f.write("[")
for evt in self:
if evt.trace_name is None:
continue
f.write(
'{"name": "%s", '
'"ph": "X", '
'"ts": %s, '
'"dur": %s, '
'"tid": %s, '
'"pid": "CPU functions", '
'"args": {}}, '
% (
evt.trace_name,
evt.time_range.start,
evt.time_range.elapsed_us(),
evt.thread
if not evt.is_remote
else f'" node_id:{evt.node_id}, thread_id:{evt.thread} "',
)
)
for k in evt.kernels:
# 's' and 'f' draw Flow arrows from
# the CPU launch to the GPU kernel
f.write('{"name": "%s", '
'"ph": "s", '
'"ts": %s, '
'"tid": %s, '
'"pid": "CPU functions", '
'"id": %s, '
'"cat": "cpu_to_cuda", '
'"args": {}}, ' % (evt.trace_name, evt.time_range.start,
evt.thread, next_id))
# Note: use torch.profiler to get device kernel trace
next_id += 1
# remove trailing whitespace and comma
f.seek(f.tell() - 2, os.SEEK_SET)
f.truncate()
f.write("]")
def supported_export_stacks_metrics(self):
return ["self_cpu_time_total", "self_cuda_time_total"]
def export_stacks(self, path: str, metric: str):
if metric not in self.supported_export_stacks_metrics():
raise ValueError("metric should be one of: " + str(self.supported_export_stacks_metrics()))
translate_table = str.maketrans(" ;\t\n", "____")
with open(path, 'w') as f:
for evt in self:
if evt.stack and len(evt.stack) > 0:
metric_value = getattr(evt, metric)
if int(metric_value) > 0:
stack_str = ""
for entry in reversed(evt.stack):
stack_str += entry.translate(translate_table)
stack_str += ";"
stack_str = stack_str[:-1] + " " + str(int(metric_value))
f.write(stack_str + "\n")
def key_averages(self, group_by_input_shapes=False, group_by_stack_n=0):
"""Averages all function events over their keys.
Args:
group_by_input_shapes: group entries by
(event name, input shapes) rather than just event name.
This is useful to see which input shapes contribute to the runtime
the most and may help with size-specific optimizations or
choosing the best candidates for quantization (aka fitting a roof line)
group_by_stack_n: group by top n stack trace entries
Returns:
An EventList containing FunctionEventAvg objects.
"""
assert self._tree_built
stats: Dict[Tuple[str, ...], FunctionEventAvg] = defaultdict(FunctionEventAvg)
def get_key(event, group_by_input_shapes, group_by_stack_n) -> Tuple[str, ...]:
key = [str(event.key), str(event.node_id), str(event.device_type), str(event.is_legacy)]
if group_by_input_shapes:
key.append(str(event.input_shapes))
if group_by_stack_n > 0:
key += event.stack[:group_by_stack_n]
return tuple(key)
for evt in self:
stats[get_key(evt, group_by_input_shapes, group_by_stack_n)].add(evt)
avg_list = EventList(
stats.values(),
use_cuda=self._use_cuda,
profile_memory=self._profile_memory,
with_flops=self._with_flops)
for evt in avg_list:
evt.stack = evt.stack[:group_by_stack_n]
if not group_by_input_shapes:
evt.input_shapes = ""
return avg_list
def total_average(self):
"""Averages all events.
Returns:
A FunctionEventAvg object.
"""
total_stat = FunctionEventAvg()
for evt in self:
total_stat += evt
total_stat.key = None
total_stat.key = 'Total'
return total_stat
class profile(object):
"""Context manager that manages autograd profiler state and holds a summary of results.
Under the hood it just records events of functions being executed in C++ and
exposes those events to Python. You can wrap any code into it and it will
only report runtime of PyTorch functions.
Note: profiler is thread local and is automatically propagated into the async tasks
Args:
enabled (bool, optional): Setting this to False makes this context manager a no-op.
use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.
Adds approximately 4us of overhead to each tensor operation.
record_shapes (bool, optional): If shapes recording is set, information
about input dimensions will be collected. This allows one to see which
dimensions have been used under the hood and further group by them
using prof.key_averages(group_by_input_shape=True). Please note that
shape recording might skew your profiling data. It is recommended to
use separate runs with and without shape recording to validate the timing.
Most likely the skew will be negligible for bottom most events (in a case
of nested function calls). But for higher level functions the total
self cpu time might be artificially increased because of the shape
collection.
with_flops (bool, optional): If with_flops is set, the profiler will estimate
the FLOPS (floating pointer operations per second) value using the operator's input shape
and total time. This allows one to estimate the hardware performance. Currently,
this option only works for the matrix multiplication and 2D convolution operators.
profile_memory (bool, optional): track tensor memory allocation/deallocation.
with_stack (bool, optional): record source information (file and line number) for the ops.
use_kineto (bool, optional): experimental, enable profiling with Kineto profiler.
use_cpu (bool, optional): profile CPU events; setting to ``False`` requires
``use_kineto=True`` and can be used to lower the overhead for GPU-only profiling.
.. warning:
Enabling memory profiling or source attribution incurs additional profiler
overhead
.. warning:
This context managers should not be called recursively, i.e. no nested
instances are allowed
.. warning:
Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_),
one cannot use the profiler with ``use_cuda = True`` to benchmark
DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading,
please use ``use_cuda = False`` or ``num_workers = 0``.
Example:
>>> x = torch.randn((1, 1), requires_grad=True)
>>> with torch.autograd.profiler.profile() as prof:
>>> for _ in range(100): # any normal python code, really!
>>> y = x ** 2
>> y.backward()
>>> # NOTE: some columns were removed for brevity
>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
----------------------------------- --------------- --------------- ---------------
Name Self CPU total CPU time avg Number of Calls
----------------------------------- --------------- --------------- ---------------
mul 32.048ms 32.048ms 200
pow 27.041ms 27.041ms 200
PowBackward0 9.727ms 55.483ms 100
torch::autograd::AccumulateGrad 9.148ms 9.148ms 100
torch::autograd::GraphRoot 691.816us 691.816us 100
----------------------------------- --------------- --------------- ---------------
"""
def __init__(
self,
enabled=True,
*,
use_cuda=False,
record_shapes=False,
with_flops=False,
profile_memory=False,
with_stack=False,
use_kineto=False,
use_cpu=True):
self.enabled: bool = enabled
if not self.enabled:
return
self.use_cuda = use_cuda
self.function_events = None
self.entered = False
self.record_shapes = record_shapes
self.with_flops = with_flops
self.record_shapes |= self.with_flops
self.profile_memory = profile_memory
self.with_stack = with_stack
self.use_cpu = use_cpu
self.kineto_results = None
if not self.use_cpu:
assert use_kineto, \
"Device-only events supported only with Kineto (use_kineto=True)"
self.profiler_kind = None
self.kineto_activities = set()
if use_kineto:
self.profiler_kind = torch.autograd.ProfilerState.KINETO
if self.use_cpu:
self.kineto_activities.add(torch.autograd.ProfilerActivity.CPU)
if self.use_cuda:
self.kineto_activities.add(
# uses CUPTI
torch.autograd.ProfilerActivity.CUDA)
assert len(self.kineto_activities) > 0, \
"No activities specified for Kineto profiler"
elif self.use_cuda:
# legacy CUDA mode
self.profiler_kind = torch.autograd.ProfilerState.CUDA
else:
self.profiler_kind = torch.autograd.ProfilerState.CPU
if self.profiler_kind == torch.autograd.ProfilerState.KINETO:
assert (
torch.autograd.kineto_available()
), """Requested Kineto profiling but Kineto is not available,
make sure PyTorch is built with USE_KINETO=1"""
def config(self):
assert self.profiler_kind is not None
return torch.autograd.ProfilerConfig(
self.profiler_kind,
self.record_shapes,
self.profile_memory,
self.with_stack,
self.with_flops)
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("profiler context manager is not reentrant")
self.entered = True
if self.kineto_activities:
torch.autograd._prepare_profiler(self.config(), self.kineto_activities)
torch.autograd._enable_profiler(self.config(), self.kineto_activities)
else:
torch.autograd._enable_profiler_legacy(self.config())
return self
def _prepare_kineto_trace(self):
assert self.kineto_activities
self.entered = True
torch.autograd._prepare_profiler(self.config(), self.kineto_activities)
def _start_kineto_trace(self):
assert self.kineto_activities
torch.autograd._enable_profiler(self.config(), self.kineto_activities)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
if self.kineto_activities:
self.kineto_results = torch.autograd._disable_profiler()
parsed_results = parse_kineto_results(self.kineto_results)
else:
records = torch.autograd._disable_profiler_legacy()
parsed_results = parse_legacy_records(records)
self.function_events = EventList(
parsed_results,
use_cuda=self.use_cuda,
profile_memory=self.profile_memory,
with_flops=self.with_flops)
self.function_events._build_tree()
return False
def __repr__(self):
if self.function_events is None:
return '<unfinished torch.autograd.profile>'
return repr(self.function_events)
def __str__(self):
if self.function_events is None:
return '<unfinished torch.autograd.profile>'
return str(self.function_events)
def _check_finish(self):
if self.function_events is None:
raise RuntimeError("can't export a trace that didn't finish running")
def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False):
self._check_finish()
assert self.function_events is not None
return self.function_events.table(
sort_by=sort_by, row_limit=row_limit, max_src_column_width=max_src_column_width, header=header,
top_level_events_only=top_level_events_only
)
table.__doc__ = EventList.table.__doc__
def export_chrome_trace(self, path):
self._check_finish()
if self.kineto_results is not None:
self.kineto_results.save(path)
else:
assert self.function_events is not None
return self.function_events.export_chrome_trace(path)
export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__
def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
assert self.with_stack, "export_stacks() requires with_stack=True"
return self.function_events.export_stacks(path, metric)
def key_averages(self, group_by_input_shape=False, group_by_stack_n=0):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.key_averages(group_by_input_shape, group_by_stack_n)
key_averages.__doc__ = EventList.key_averages.__doc__
def total_average(self):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.total_average()
total_average.__doc__ = EventList.total_average.__doc__
@property
def self_cpu_time_total(self):
""" Returns total time spent on CPU obtained as a sum of
all self times across all the events.
"""
self._check_finish()
assert self.function_events is not None
return self.function_events.self_cpu_time_total
class record_function(ContextDecorator):
"""Context manager/function decorator that adds a label to a block of
Python code (or function) when running autograd profiler. It is
useful when tracing the code profile.
Args:
name (str): Label assigned to the block of code.
node_id (int): ID of node, for distributed profiling. Unset in
non-distributed cases.
Example:
>>> x = torch.randn((1, 1), requires_grad=True)
>>> with torch.autograd.profiler.profile() as prof:
... y = x ** 2
... with torch.autograd.profiler.record_function("label-z"): # label the block
... z = y ** 3
... y.backward()
...
>>> # NOTE: some columns were removed for brevity
>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
----------------------------------- --------------- --------------- ---------------
Name Self CPU total % CPU time avg Number of Calls
----------------------------------- --------------- --------------- ---------------
pow 60.77% 47.470us 3
mul 21.73% 25.465us 2
PowBackward0 12.03% 121.891us 1
torch::autograd::AccumulateGrad 2.70% 6.324us 1
label-z 2.13% 12.421us 1
torch::autograd::GraphRoot 0.64% 1.503us 1
----------------------------------- --------------- --------------- ---------------
Self CPU time total: 234.344us
CUDA time total: 0.000us
"""
def __init__(self, name: str):
self.name: str = name
# Whether or not we should run record function's end callbacks when exiting.
self.run_callbacks_on_exit: bool = True
# Stores underlying RecordFunction as a tensor. TODO: move to custom
# class (https://github.com/pytorch/pytorch/issues/35026).
self.handle: torch.Tensor = torch.zeros(1)
def __enter__(self):
self.handle = torch.ops.profiler._record_function_enter(self.name)
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
if self.run_callbacks_on_exit:
torch.ops.profiler._record_function_exit(self.handle)
def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:
"""
_call_end_callbacks_on_future is meant to be used for profiling async
calls that return a future. Calling this function will extend recording
beyond this scope, until the future is satisfied. It is useful for profiling
the end to end time of asynchronous calls. This function should only be called
once to attach the callback onto the future, and will throw if called multiple
times.
Args:
fut: (torch._C.Future): future for which to schedule
callback for.
Returns:
A future that completes with the value of the passed in future when
the profiling callbacks have ran.
"""
# Throw if we have already attached a callback onto the future.
if not self.run_callbacks_on_exit:
raise RuntimeError("_call_end_callbacks_on_future can only be called once.")
# We are scheduling to run this RecordFunction's end callbacks when the
# passed in future completes, so don't run end callbacks on exit.
self.run_callbacks_on_exit = False
profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(self.handle, fut)
return profiled_future
class emit_nvtx(object):
"""Context manager that makes every autograd operation emit an NVTX range.
It is useful when running the program under nvprof::
nvprof --profile-from-start off -o trace_name.prof -- <regular command here>
Unfortunately, there's no way to force nvprof to flush the data it collected
to disk, so for CUDA profiling one has to use this context manager to annotate
nvprof traces and wait for the process to exit before inspecting them.
Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or
:func:`torch.autograd.profiler.load_nvprof` can load the results for inspection
e.g. in Python REPL.
.. warning:
This context manager should not be called recursively, i.e. at most one
instance should be enabled at any given time.
Args:
enabled (bool, optional, default=True): Setting ``enabled=False`` makes this context manager a no-op.
Default: ``True``.
record_shapes (bool, optional, default=False): If ``record_shapes=True``, the nvtx range wrapping
each autograd op will append information about the sizes of Tensor arguments received
by that op, in the following format:
``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``
Non-tensor arguments will be represented by ``[]``.
Arguments will be listed in the order they are received by the backend op.
Please note that this order may not match the order in which those arguments were passed
on the Python side. Also note that shape recording may increase the overhead of nvtx range creation.
Example:
>>> with torch.cuda.profiler.profile():
... model(x) # Warmup CUDA memory allocator and profiler
... with torch.autograd.profiler.emit_nvtx():
... model(x)
**Forward-backward correlation**
When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler,
correlating each backward-pass op with the corresponding forward-pass op can be difficult.
To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it
generates.
During the forward pass, each function range is decorated with ``seq=<N>``. ``seq`` is a running
counter, incremented each time a new backward Function object is created and stashed for backward.
Thus, the ``seq=<N>`` annotation associated with each forward function range tells you that
if a backward Function object is created by this forward function,
the backward object will receive sequence number N.
During the backward pass, the top-level range wrapping each C++ backward Function's
``apply()`` call is decorated with ``stashed seq=<M>``. ``M`` is the sequence number that
the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq``
numbers in forward, you can track down which forward op created each backward Function.
Any functions executed during the backward pass are also decorated with ``seq=<N>``. During
default backward (with ``create_graph=False``) this information is irrelevant, and in fact,
``N`` may simply be 0 for all such functions. Only the top-level ranges associated with
backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function
objects with the earlier forward pass.
**Double-backward**
If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words,
if you are setting up for a double-backward), each function's execution during backward
is given a nonzero, useful ``seq=<N>``. Those functions may themselves create Function objects
to be executed later during double-backward, just as the original functions in the forward pass did.
The relationship between backward and double-backward is conceptually the same as the relationship
between forward and backward: The functions still emit current-sequence-number-tagged ranges,
the Function objects they create still stash those sequence numbers, and during the eventual
double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq``
numbers, which can be compared to `seq` numbers from the backward pass.
.. warning:
The sequence number is thread-local, and some forward functions don't create an associated
backward Function object (instead delegating that to sub-functions further down the call chain).
For these reasons, the correspondence of stashed sequence numbers in
backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is
not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully
disambiguate which forward function created which
backward Function object. You may need to make a judgment based on analytic knowledge of what
the expected correspondence should be.
"""
def __init__(self, enabled=True, record_shapes=False):
self.enabled = enabled
self.entered = False
self.record_shapes = record_shapes
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("NVTX annotation context manager is not reentrant")
self.entered = True
torch.cuda.synchronize()
torch.autograd._enable_profiler_legacy(
torch.autograd.ProfilerConfig(
torch.autograd.ProfilerState.NVTX,
self.record_shapes,
False,
False,
False)
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
torch.cuda.synchronize()
torch.autograd._disable_profiler_legacy()
return False
def load_nvprof(path):
"""Opens an nvprof trace file and parses autograd annotations.
Args:
path (str): path to nvprof trace
"""
return EventList(parse_nvprof_trace(path))
################################################################################
# FunctionEvent
def format_time(time_us):
"""Defines how to format time in FunctionEvent"""
US_IN_SECOND = 1000.0 * 1000.0
US_IN_MS = 1000.0
if time_us >= US_IN_SECOND:
return '{:.3f}s'.format(time_us / US_IN_SECOND)
if time_us >= US_IN_MS:
return '{:.3f}ms'.format(time_us / US_IN_MS)
return '{:.3f}us'.format(time_us)
def format_time_share(time_us, total_time_us):
"""Defines how to format time in FunctionEvent"""
if total_time_us == 0:
assert time_us == 0, "Expected time_us == 0 but got {}".format(time_us)
return "NaN"
return '{:.2f}%'.format(time_us * 100.0 / total_time_us)
def format_memory(nbytes):
"""Returns a formatted memory size string"""
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
if (abs(nbytes) >= GB):
return '{:.2f} Gb'.format(nbytes * 1.0 / GB)
elif (abs(nbytes) >= MB):
return '{:.2f} Mb'.format(nbytes * 1.0 / MB)
elif (abs(nbytes) >= KB):
return '{:.2f} Kb'.format(nbytes * 1.0 / KB)
else:
return str(nbytes) + ' b'
def attr_formatter(name):
return property(lambda self: format_time(getattr(self, name)))
class FormattedTimesMixin(object):
"""Helpers for FunctionEvent and FunctionEventAvg.
The subclass should define `*_time_total` and `count` attributes.
"""
cpu_time_str = attr_formatter('cpu_time')
cuda_time_str = attr_formatter('cuda_time')
cpu_time_total_str = attr_formatter('cpu_time_total')
cuda_time_total_str = attr_formatter('cuda_time_total')
self_cpu_time_total_str = attr_formatter('self_cpu_time_total')
self_cuda_time_total_str = attr_formatter('self_cuda_time_total')
@property
def cpu_time(self):
return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined]
@property
def cuda_time(self):
return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined]
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
def elapsed_us(self):
return self.end - self.start
Kernel = namedtuple('Kernel', ['name', 'device', 'duration'])
class FunctionEvent(FormattedTimesMixin):
"""Profiling information about a single function."""
def __init__(
self, id, name, thread, start_us, end_us, fwd_thread=None, input_shapes=None,
stack=None, scope=0, cpu_memory_usage=0, cuda_memory_usage=0, is_async=False,
is_remote=False, sequence_nr=-1, node_id=-1, device_type=DeviceType.CPU, device_index=0,
is_legacy=False, flops=None, trace_name=None):
self.id: int = id
self.node_id: int = node_id
self.name: str = name
self.trace_name: str = trace_name
self.time_range: Interval = Interval(start_us, end_us)
self.thread: int = thread
self.fwd_thread: Optional[int] = fwd_thread
self.kernels: List[Kernel] = []
self.count: int = 1
self.cpu_children: List[FunctionEvent] = []
self.cpu_parent: Optional[FunctionEvent] = None
self.input_shapes: Tuple[int, ...] = input_shapes
self.stack: List = stack
self.scope: int = scope
self.cpu_memory_usage: int = cpu_memory_usage
self.cuda_memory_usage: int = cuda_memory_usage
self.is_async: bool = is_async
self.is_remote: bool = is_remote
self.sequence_nr: int = sequence_nr
self.device_type: DeviceType = device_type
self.device_index: int = device_index
self.is_legacy: bool = is_legacy
self.flops: Optional[float] = flops
def append_kernel(self, name, device, duration):
assert self.device_type == DeviceType.CPU
self.kernels.append(Kernel(name, device, duration))
def append_cpu_child(self, child):
"""Append a CPU child of type FunctionEvent.
One is supposed to append only direct children to the event to have
correct self cpu time being reported.
"""
assert(self.device_type == DeviceType.CPU)
assert(isinstance(child, FunctionEvent))
assert(child.device_type == DeviceType.CPU)
self.cpu_children.append(child)
def set_cpu_parent(self, parent):
"""Set the immediate CPU parent of type FunctionEvent
One profiling FunctionEvent should have only one CPU parent such that
the child's range interval is completely inside the parent's. We use
this connection to determine the event is from top-level op or not.
"""
assert(self.device_type == DeviceType.CPU)
assert(isinstance(parent, FunctionEvent))
assert(parent.device_type == DeviceType.CPU)
self.cpu_parent = parent
# Note: async events don't have children, are not used when computing 'self'
# metrics of other events, have only total cpu time
@property
def self_cpu_memory_usage(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_memory_usage - sum(
[child.cpu_memory_usage for child in self.cpu_children]
)
@property
def self_cuda_memory_usage(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cuda_memory_usage - sum(
[child.cuda_memory_usage for child in self.cpu_children]
)
@property
def self_cpu_time_total(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_time_total - sum(
[child.cpu_time_total for child in self.cpu_children]
)
@property
def cuda_time_total(self):
if self.is_async:
return 0
if self.device_type == DeviceType.CPU:
if not self.is_legacy:
# account for the kernels in the children ops
return (sum(kinfo.duration for kinfo in self.kernels) +
sum(ch.cuda_time_total for ch in self.cpu_children))
else:
# each legacy cpu events has a single (fake) kernel
return sum(kinfo.duration for kinfo in self.kernels)
else:
assert self.device_type == DeviceType.CUDA
return self.time_range.elapsed_us()
@property
def self_cuda_time_total(self):
if self.is_async:
return 0
if self.device_type == DeviceType.CPU:
return self.cuda_time_total - \
sum([child.cuda_time_total for child in self.cpu_children])
else:
assert(self.device_type == DeviceType.CUDA)
return self.cuda_time_total
@property
def cpu_time_total(self):
if self.device_type == DeviceType.CPU:
return self.time_range.elapsed_us()
else:
return 0
@property
def key(self):
return self.name
def __repr__(self):
return (
'<FunctionEvent id={} name={} device_type={} node_id={} cpu_time={} start_us={} end_us={} '
'cpu_children={} cuda_time={} name={} thread={} input_shapes={} '
'cpu_memory_usage={} cuda_memory_usage={} is_async={} is_remote={} seq_nr={} is_legacy={}>'.format(
self.id,
self.name,
self.device_type,
self.node_id,
self.cpu_time_str,
self.time_range.start,
self.time_range.end,
str([child.id for child in self.cpu_children]),
self.cuda_time_str,
self.name,
self.thread,
str(self.input_shapes),
self.cpu_memory_usage,
self.cuda_memory_usage,
self.is_async,
self.is_remote,
self.sequence_nr,
self.is_legacy,
)
)
class FunctionEventAvg(FormattedTimesMixin):
"""Used to average stats over multiple FunctionEvent objects."""
def __init__(self):
self.key: Optional[str] = None
self.count: int = 0
self.node_id: int = 0
self.is_async: bool = False
self.is_remote: bool = False
self.cpu_time_total: int = 0
self.cuda_time_total: int = 0
self.self_cpu_time_total: int = 0
self.self_cuda_time_total: int = 0
self.input_shapes: Optional[List[List[int]]] = None
self.stack: Optional[List] = None
self.scope: Optional[int] = None
self.cpu_memory_usage: int = 0
self.cuda_memory_usage: int = 0
self.self_cpu_memory_usage: int = 0
self.self_cuda_memory_usage: int = 0
self.cpu_children: Optional[List[FunctionEvent]] = None
self.cpu_parent: Optional[FunctionEvent] = None
self.device_type: DeviceType = DeviceType.CPU
self.is_legacy: bool = False
self.flops: float = 0.0
def add(self, other):
if self.key is None:
# First function being recorded as part of FunctionEventAvg, propagate
# fields.
self.key = other.key
self.node_id = other.node_id
self.is_async = other.is_async
self.is_remote = other.is_remote
self.cpu_parent = other.cpu_parent
self.cpu_children = other.cpu_children
self.input_shapes = other.input_shapes
self.stack = other.stack
self.scope = other.scope
self.device_type = other.device_type
self.is_legacy = other.is_legacy
assert isinstance(other, (FunctionEvent, FunctionEventAvg))
assert other.key == self.key
self.cpu_time_total += other.cpu_time_total
self.cuda_time_total += other.cuda_time_total
self.self_cpu_time_total += other.self_cpu_time_total
self.self_cuda_time_total += other.self_cuda_time_total
self.cpu_memory_usage += other.cpu_memory_usage
self.cuda_memory_usage += other.cuda_memory_usage
self.self_cpu_memory_usage += other.self_cpu_memory_usage
self.self_cuda_memory_usage += other.self_cuda_memory_usage
self.count += other.count
if self.flops is None:
self.flops = other.flops
elif other.flops is not None:
self.flops += other.flops
return self
def __iadd__(self, other):
return self.add(other)
def __repr__(self):
return (
'<FunctionEventAvg key={} self_cpu_time={} cpu_time={} '
' self_cuda_time={} cuda_time={} input_shapes={} '
'cpu_memory_usage={} cuda_memory_usage={}>'.format(
self.key,
self.self_cpu_time_total_str,
self.cpu_time_str,
self.self_cuda_time_total_str,
self.cuda_time_str,
str(self.input_shapes),
self.cpu_memory_usage,
self.cuda_memory_usage,
)
)
################################################################################
# Utilities
class StringTable(defaultdict):
def __missing__(self, key):
# manage cases like 't' (demangled to 'unsigned short') separately,
# for now simply check the length to avoid unexpected results for
# the short sequences
self[key] = torch._C._demangle(key) if len(key) > 1 else key
return self[key]
def filter_stack_entry(entry):
filtered_entries = [
("autograd/__init__", "_make_grads"),
("autograd/__init__", "backward"),
("torch/tensor", "backward"),
("_internal/common_utils", "prof_callable"),
("_internal/common_utils", "prof_func_call"),
("_internal/common_utils", "prof_meth_call"),
]
return all([not (f[0] in entry and f[1] in entry) for f in filtered_entries])
def filter_name(name):
# ignoring the following utility ops
filtered_out_names = [
"profiler::_record_function_enter",
"profiler::_record_function_exit",
"aten::is_leaf",
"aten::output_nr",
"aten::_version",
]
return name in filtered_out_names
# Demangles and optionally rewrites the provided event name,
# with_wildcard - whether to replace certain numbered event names
# with a wildcard name to aggregate them together in the profiler table
# output
def rewrite_name(name, with_wildcard=False):
string_table = StringTable()
name = string_table[name]
if with_wildcard:
if name.startswith("ProfilerStep#"):
name = "ProfilerStep*"
return name
# Parsing of kineto profiler events
def parse_kineto_results(result):
# result.events() has most of the events - PyTorch op-level and device-level events
# result.legacy_events() has events not yet ported to kineto
# (e.g. start/stop marks, tensor memory allocator events)
# First, find __start_profile mark to get the absolute time of the start of the trace;
# save memory allocation records
start_record = None
mem_records = []
for record in itertools.chain(*result.legacy_events()):
if record.kind() == 'mark' and record.name() == '__start_profile':
assert start_record is None
start_record = record
if record.kind() == 'memory_alloc':
mem_records.append([record, False])
assert start_record is not None, "Invalid profiler output, __start_profile is missing"
# Create and return FunctionEvent list
function_events = []
cuda_corr_map: Dict[int, List[FunctionEvent]] = {}
for kineto_event in result.events():
if filter_name(kineto_event.name()):
continue
rel_start_us = kineto_event.start_us() - start_record.start_us()
rel_end_us = rel_start_us + kineto_event.duration_us()
abs_end_us = kineto_event.start_us() + kineto_event.duration_us()
cpu_memory_usage = 0
cuda_memory_usage = 0
if kineto_event.device_type() == DeviceType.CPU:
# find the corresponding memory allocation events
for mem_record in mem_records:
if (mem_record[0].start_us() >= kineto_event.start_us() and
mem_record[0].start_us() <= abs_end_us):
cpu_memory_usage += mem_record[0].cpu_memory_usage()
cuda_memory_usage += mem_record[0].cuda_memory_usage()
mem_record[1] = True
is_async = kineto_event.start_thread_id() != kineto_event.end_thread_id()
fe = FunctionEvent(
id=kineto_event.correlation_id(),
name=rewrite_name(name=kineto_event.name(), with_wildcard=True),
trace_name=rewrite_name(name=kineto_event.name(), with_wildcard=False),
thread=kineto_event.start_thread_id(),
start_us=rel_start_us,
end_us=rel_end_us,
fwd_thread=kineto_event.fwd_thread_id(),
input_shapes=kineto_event.shapes(),
stack=[entry for entry in kineto_event.stack() if filter_stack_entry(entry)],
scope=kineto_event.scope(),
cpu_memory_usage=cpu_memory_usage,
cuda_memory_usage=cuda_memory_usage,
is_async=is_async,
sequence_nr=kineto_event.sequence_nr(),
device_type=kineto_event.device_type(),
device_index=kineto_event.device_index(),
flops=kineto_event.flops(),
)
function_events.append(fe)
corr_id = kineto_event.linked_correlation_id()
if corr_id > 0:
if corr_id not in cuda_corr_map:
cuda_corr_map[corr_id] = []
cuda_corr_map[corr_id].append(fe)
# associate CUDA kernels and CUDA runtime (CPU) with CPU events
for fe in function_events:
if (fe.device_type == DeviceType.CPU and not fe.is_async and
fe.id in cuda_corr_map):
for f_evt in cuda_corr_map[fe.id]:
if f_evt.device_type == DeviceType.CUDA:
fe.append_kernel(
f_evt.name,
f_evt.device_index,
f_evt.time_range.end - f_evt.time_range.start)
elif f_evt.device_type == DeviceType.CPU:
# make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated
# with the 'thread' of the corresponding linked PyTorch event to properly track
# parents and children
f_evt.thread = fe.thread
# output top-level memory events
for mem_record in mem_records:
if not mem_record[1]:
rel_start_us = mem_record[0].start_us() - start_record.start_us()
fe = FunctionEvent(
id=mem_record[0].handle(),
name="[memory]",
trace_name=None, # not outputting in the trace
thread=mem_record[0].thread_id(),
start_us=rel_start_us,
end_us=rel_start_us, # no duration
fwd_thread=mem_record[0].fwd_thread_id(),
input_shapes=[],
stack=[],
scope=mem_record[0].scope(),
cpu_memory_usage=mem_record[0].cpu_memory_usage(),
cuda_memory_usage=mem_record[0].cuda_memory_usage(),
is_async=False,
sequence_nr=-1,
device_type=DeviceType.CPU,
device_index=0,
)
function_events.append(fe)
function_events.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end])
return function_events
# Parsing of legacy profiler events
def parse_legacy_records(thread_records):
def get_record_key(record):
"""
Returns a tuple to be used by parse_legacy_records for correlating start and
end records.
"""
return (record.handle(), record.node_id())
next_id = 0
start_record = None
functions = []
record_stack = []
# '__start_profile' is not guaranteed to be first, so we must find it here
for record in itertools.chain(*thread_records):
name = record.name()
if start_record is None and name == '__start_profile':
start_record = record
assert start_record is not None and not start_record.is_remote()
for thread_record_list in thread_records:
# accumulated memory allocations per handle
cpu_memory_allocs = {}
cuda_memory_allocs = {}
# ranges per handle
range_starts = {}
filtered_handles = set()
prev_record = None
for record in thread_record_list:
record_key = get_record_key(record)
if (filter_name(record.name()) or
record_key in filtered_handles):
filtered_handles.add(record_key)
continue
if record.kind() == 'push':
# workaround to reduce double logging from operator
# wrappers and redispatch
if prev_record is not None:
duplicate = (
prev_record.name() == record.name()
and prev_record.kind() == record.kind()
and prev_record.node_id() == record.node_id()
)
if duplicate:
filtered_handles.add(record_key)
continue
range_starts[record_key] = record
cpu_memory_allocs[record_key] = 0
cuda_memory_allocs[record_key] = 0
elif record.kind() == 'pop':
assert (
record_key in range_starts
), """Expected record with key {} to exist in range_starts.
This means that the pop event did not have a corresponding push.""".format(
record_key
)
start = range_starts[record_key]
cpu_memory_usage = cpu_memory_allocs[record_key]
cuda_memory_usage = cuda_memory_allocs[record_key]
is_async = start.thread_id() != record.thread_id()
is_remote_event = record.is_remote()
start_flops = start.flops()
fe = FunctionEvent(
id=record.handle(),
node_id=record.node_id(),
name=rewrite_name(name=start.name(), with_wildcard=True),
trace_name=rewrite_name(name=start.name(), with_wildcard=False),
thread=start.thread_id(),
start_us=start_record.cpu_elapsed_us(start),
end_us=start_record.cpu_elapsed_us(record),
fwd_thread=start.fwd_thread_id(),
input_shapes=start.shapes(),
stack=[entry for entry in start.stack() if filter_stack_entry(entry)],
scope=start.scope(),
cpu_memory_usage=cpu_memory_usage,
cuda_memory_usage=cuda_memory_usage,
is_async=is_async,
is_remote=is_remote_event,
sequence_nr=start.sequence_nr(),
device_type=DeviceType.CPU,
is_legacy=True,
flops=start_flops,
)
# note: async events have only cpu total time
if not is_async and start.has_cuda():
duration = start.cuda_elapsed_us(record)
if duration > 0:
fe.append_kernel(
start.name(),
start.device(),
duration)
functions.append(fe)
del range_starts[record_key]
del cpu_memory_allocs[record_key]
del cuda_memory_allocs[record_key]
elif record.kind() == 'memory_alloc':
num_open_handles_cpu = len(cpu_memory_allocs)
num_open_handles_cuda = len(cuda_memory_allocs)
assert num_open_handles_cpu == num_open_handles_cuda
for handle in cpu_memory_allocs.keys():
cpu_memory_allocs[handle] += record.cpu_memory_usage()
for handle in cuda_memory_allocs.keys():
cuda_memory_allocs[handle] += record.cuda_memory_usage()
if num_open_handles_cpu == 0:
# output event as a top-level memory event
fe = FunctionEvent(
id=0,
name="[memory]",
trace_name=None,
thread=0,
start_us=0,
end_us=0,
stack=[],
cpu_memory_usage=record.cpu_memory_usage(),
cuda_memory_usage=record.cuda_memory_usage(),
is_legacy=True,
)
functions.append(fe)
prev_record = record
# Sort functions by start time then by end time ascending.
# This ensures that--in the case of nested events which
# have the same start time (which may happen due to the
# granularity of the given clock tick)--we always show
# the outermost nested call first. This adds stability
# in how FunctionEvents appear
functions.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end])
return functions
################################################################################
# CUDA checkpoints
class EnforceUnique(object):
"""Raises an error if a key is seen more than once."""
def __init__(self):
self.seen = set()
def see(self, *key):
if key in self.seen:
raise RuntimeError('duplicate key: ' + str(key))
self.seen.add(key)
def parse_nvprof_trace(path):
import sqlite3
conn = sqlite3.connect(path)
conn.row_factory = sqlite3.Row
# Parse strings table
strings = {}
for r in conn.execute("SELECT _id_ as id, value FROM StringTable"):
strings[r["id"]] = torch._C._demangle(r["value"])
# First, find all functions and create FunctionEvents for them
marker_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
WHERE
start.name != 0 AND end.name = 0
"""
functions = []
functions_map = {}
unique = EnforceUnique()
for row in conn.execute(marker_query):
unique.see(row['marker_id'])
evt = FunctionEvent(id=row['marker_id'],
node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure
# that pytorch doesn't crash when creating a FunctionEvent() object
name=strings[row['name']],
start_us=row['start_time'],
end_us=row['end_time'],
thread=0) # TODO: find in sqlite database
functions.append(evt)
functions_map[evt.id] = evt
# Now, correlate all kernels with FunctionEvents
kernel_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp, end.timestamp,
runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end,
kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start
INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime
ON (start.timestamp < runtime.start AND runtime.end < end.timestamp)
INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel
ON kernel.correlationId = runtime.correlationId
"""
unique = EnforceUnique()
for row in conn.execute(kernel_query):
unique.see(row['marker_id'], row['runtime_id'])
# 211 is cudaKernelLaunch for cuda >= 9.2; 13 is for older cuda versions
assert (row['cbid'] == 211) or (row['cbid'] == 13)
evt = functions_map[row['marker_id']]
evt.append_kernel(row['kernel_name'],
0,
row['kernel_end'] - row['kernel_start'])
functions.sort(key=lambda evt: evt.time_range.start)
return functions
################################################################################
# Pretty printer
def build_table(
events,
sort_by=None,
header=None,
row_limit=100,
max_src_column_width=75,
with_flops=False,
profile_memory=False,
top_level_events_only=False):
"""Prints a summary of events (which can be a list of FunctionEvent or FunctionEventAvg)."""
if len(events) == 0:
return ""
has_cuda_time = any([event.self_cuda_time_total > 0 for event in events])
has_cuda_mem = any([event.self_cuda_memory_usage > 0 for event in events])
has_input_shapes = any(
[(event.input_shapes is not None and len(event.input_shapes) > 0) for event in events])
if sort_by is not None:
events = EventList(sorted(
events, key=lambda evt: getattr(evt, sort_by), reverse=True
), use_cuda=has_cuda_time, profile_memory=profile_memory, with_flops=with_flops)
MAX_NAME_COLUMN_WIDTH = 55
name_column_width = max([len(evt.key) for evt in events]) + 4
name_column_width = min(name_column_width, MAX_NAME_COLUMN_WIDTH)
DEFAULT_COLUMN_WIDTH = 12
shapes_column_width = max([len(str(evt.input_shapes)) for evt in events]) + 4
shapes_column_width = min(shapes_column_width, 45)
flops_column_width = DEFAULT_COLUMN_WIDTH
src_column_width = None
stacks = []
for evt in events:
if evt.stack is not None and len(evt.stack) > 0:
stacks.append(evt.stack)
has_stack = len(stacks) > 0
if has_stack:
src_column_width = max([max([len(entry) for entry in stack]) for stack in stacks]) + 4
src_column_width = min(src_column_width, max_src_column_width)
headers = [
'Name',
'Self CPU %',
'Self CPU',
'CPU total %',
'CPU total',
'CPU time avg',
]
if has_cuda_time:
headers.extend([
'Self CUDA',
'Self CUDA %',
'CUDA total',
'CUDA time avg',
])
if profile_memory:
headers.extend([
'CPU Mem',
'Self CPU Mem',
])
if has_cuda_mem:
headers.extend([
'CUDA Mem',
'Self CUDA Mem',
])
headers.append(
'# of Calls'
)
# Only append Node ID if any event has a valid (>= 0) Node ID
append_node_id = any([evt.node_id != -1 for evt in events])
if append_node_id:
headers.append('Node ID')
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
MAX_STACK_ENTRY = 5
def add_column(padding, text_dir='>'):
row_format_lst[0] += '{: ' + text_dir + str(padding) + '}' + (' ' * SPACING_SIZE)
header_sep_lst[0] += '-' * padding + (' ' * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
def auto_scale_flops(flops):
flop_headers = [
'FLOPS',
'KFLOPS',
'MFLOPS',
'GFLOPS',
'TFLOPS',
'PFLOPS',
]
assert flops > 0
log_flops = max(0, min(math.log10(flops) / 3, float(len(flop_headers) - 1)))
assert log_flops >= 0 and log_flops < len(flop_headers)
return (pow(10, (math.floor(log_flops) * -3.0)), flop_headers[int(log_flops)])
add_column(name_column_width)
for _ in headers[1:]:
add_column(DEFAULT_COLUMN_WIDTH)
if has_input_shapes:
headers.append('Input Shapes')
add_column(shapes_column_width)
if has_stack:
headers.append('Source Location')
add_column(src_column_width, text_dir='<')
if with_flops:
# Auto-scaling of flops header
US_IN_SECOND = 1000.0 * 1000.0 # cpu_time_total is in us
raw_flops = []
for evt in events:
if evt.flops > 0:
if evt.cuda_time_total != 0:
evt.flops = float(evt.flops) / evt.cuda_time_total * US_IN_SECOND
else:
evt.flops = float(evt.flops) / evt.cpu_time_total * US_IN_SECOND
raw_flops.append(evt.flops)
if len(raw_flops) != 0:
(flops_scale, flops_header) = auto_scale_flops(min(raw_flops))
headers.append(flops_header)
add_column(flops_column_width)
else:
with_flops = False # can't find any valid flops
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
line_length = line_length_lst[0]
add_column = None # type: ignore[assignment]
# Have to use a list because nonlocal is Py3 only...
result = []
def append(s):
result.append(s)
result.append('\n') # Yes, newline after the end as well
sum_self_cpu_time_total = sum([event.self_cpu_time_total for event in events])
sum_self_cuda_time_total = 0
for evt in events:
if evt.device_type == DeviceType.CPU:
# in legacy profiler, kernel info is stored in cpu events
if evt.is_legacy:
sum_self_cuda_time_total += evt.self_cuda_time_total
elif evt.device_type == DeviceType.CUDA:
# in kineto profiler, there're events with the correct device type (e.g. CUDA)
sum_self_cuda_time_total += evt.self_cuda_time_total
# Actual printing
if header is not None:
append('=' * line_length)
append(header)
if top_level_events_only:
append('=' * line_length)
append('This report only display top-level ops statistics')
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
def trim_path(path, src_column_width):
if len(path) > src_column_width:
offset = len(path) - src_column_width
path = path[offset:]
if len(path) > 3:
path = "..." + path[3:]
return path
event_limit = 0
for evt in events:
if event_limit == row_limit:
break
if top_level_events_only and evt.cpu_parent is not None:
continue
else:
event_limit += 1
name = evt.key
if len(name) >= MAX_NAME_COLUMN_WIDTH - 3:
name = name[:(MAX_NAME_COLUMN_WIDTH - 3)] + "..."
row_values = [
name,
# Self CPU total %, 0 for async events.
format_time_share(evt.self_cpu_time_total,
sum_self_cpu_time_total),
evt.self_cpu_time_total_str, # Self CPU total
# CPU total %, 0 for async events.
format_time_share(evt.cpu_time_total, sum_self_cpu_time_total) if not evt.is_async else 0,
evt.cpu_time_total_str, # CPU total
evt.cpu_time_str, # CPU time avg
]
if has_cuda_time:
row_values.extend([
evt.self_cuda_time_total_str,
# CUDA time total %
format_time_share(evt.self_cuda_time_total, sum_self_cuda_time_total),
evt.cuda_time_total_str,
evt.cuda_time_str, # Cuda time avg
])
if profile_memory:
row_values.extend([
# CPU Mem Total
format_memory(evt.cpu_memory_usage),
# Self CPU Mem Total
format_memory(evt.self_cpu_memory_usage),
])
if has_cuda_mem:
row_values.extend([
# CUDA Mem Total
format_memory(evt.cuda_memory_usage),
# Self CUDA Mem Total
format_memory(evt.self_cuda_memory_usage),
])
row_values.append(
evt.count, # Number of calls
)
if append_node_id:
row_values.append(evt.node_id)
if has_input_shapes:
row_values.append(str(evt.input_shapes)[:shapes_column_width])
if with_flops:
if evt.flops <= 0.0:
row_values.append("--")
else:
row_values.append('{0:8.3f}'.format(evt.flops * flops_scale))
if has_stack:
src_field = ""
if len(evt.stack) > 0:
src_field = trim_path(evt.stack[0], src_column_width)
row_values.append(src_field)
append(row_format.format(*row_values))
if has_stack:
empty_headers = [""] * (len(headers) - 1)
for entry in evt.stack[1:MAX_STACK_ENTRY]:
append(row_format.format(*(empty_headers + [trim_path(entry, src_column_width)])))
empty_headers.append("")
append(row_format.format(*empty_headers))
append(header_sep)
append("Self CPU time total: {}".format(format_time(sum_self_cpu_time_total)))
if has_cuda_time:
append("Self CUDA time total: {}".format(format_time(sum_self_cuda_time_total)))
return ''.join(result)
|
the-stack_0_9096 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import six
import logging
import filecmp
import os
import re
import sys
import uuid
import json
import time
import tempfile
from nose.tools import assert_raises, assert_equals, assert_true, assert_in
import shutil
from mock import patch
import synapseclient
import synapseclient.utils as utils
import synapseclient.__main__ as cmdline
from synapseclient.evaluation import Evaluation
import integration
from integration import schedule_for_cleanup, QUERY_TIMEOUT_SEC
if six.PY2:
from StringIO import StringIO
else:
from io import StringIO
def setup_module(module):
module.syn = integration.syn
module.project = integration.project
module.parser = cmdline.build_parser()
# used for --description and --descriptionFile tests
module.upload_filename = _create_temp_file_with_cleanup()
module.description_text = "'some description text'"
module.desc_filename = _create_temp_file_with_cleanup(module.description_text)
module.update_description_text = \
"'SOMEBODY ONCE TOLD ME THE WORLD WAS GONNA ROLL ME I AINT THE SHARPEST TOOL IN THE SHED'"
def run(*command, **kwargs):
"""
Sends the given command list to the command line client.
:returns: The STDOUT output of the command.
"""
old_stdout = sys.stdout
capturedSTDOUT = StringIO()
syn_client = kwargs.get('syn', syn)
stream_handler = logging.StreamHandler(capturedSTDOUT)
try:
sys.stdout = capturedSTDOUT
syn_client.logger.addHandler(stream_handler)
sys.argv = [item for item in command]
args = parser.parse_args()
args.debug = True
cmdline.perform_main(args, syn_client)
except SystemExit:
pass # Prevent the test from quitting prematurely
finally:
sys.stdout = old_stdout
syn_client.logger.handlers.remove(stream_handler)
capturedSTDOUT = capturedSTDOUT.getvalue()
return capturedSTDOUT
def parse(regex, output):
"""Returns the first match."""
m = re.search(regex, output)
if m:
if len(m.groups()) > 0:
return m.group(1).strip()
else:
raise Exception('ERROR parsing output: "' + str(output) + '"')
def test_command_line_client():
# Create a Project
output = run('synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.S3FileHandle')
# Get File from the command line
output = run('synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert_true(os.path.exists(downloaded_filename))
assert_true(filecmp.cmp(filename, downloaded_filename))
# Update the File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--id',
file_entity_id,
filename)
# Get the File again
output = run('synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert_true(os.path.exists(downloaded_filename))
assert_true(filecmp.cmp(filename, downloaded_filename))
# Move the file to new folder
folder = syn.store(synapseclient.Folder(parentId=project_id))
output = run('synapse',
'mv',
'--id',
file_entity_id,
'--parentid',
folder.id)
movedFile = syn.get(file_entity_id, downloadFile=False)
assert_equals(movedFile.parentId, folder.id)
# Test Provenance
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse',
'--skip-checks',
'set-provenance',
'-id',
file_entity_id,
'-name',
'TestActivity',
'-description',
'A very excellent provenance',
'-used',
file_entity_id,
'-executed',
repo_url)
output = run('synapse',
'--skip-checks',
'get-provenance',
'--id',
file_entity_id)
activity = json.loads(output)
assert_equals(activity['name'], 'TestActivity')
assert_equals(activity['description'], 'A very excellent provenance')
used = utils._find_used(activity, lambda used: 'reference' in used)
assert_equals(used['reference']['targetId'], file_entity_id)
used = utils._find_used(activity, lambda used: 'url' in used)
assert_equals(used['url'], repo_url)
assert_true(used['wasExecuted'])
# Note: Tests shouldn't have external dependencies
# but this is a pretty picture of Singapore
singapore_url = 'http://upload.wikimedia.org/wikipedia/commons/' \
'thumb/3/3e/1_singapore_city_skyline_dusk_panorama_2011.jpg' \
'/1280px-1_singapore_city_skyline_dusk_panorama_2011.jpg'
# Test external file handle
output = run('synapse',
'--skip-checks',
'add',
'-name',
'Singapore',
'-description',
'A nice picture of Singapore',
'-parentid',
project_id,
singapore_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we created an external file handle
f2 = syn.get(exteral_entity_id)
fh = syn._getFileHandle(f2.dataFileHandleId)
assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.ExternalFileHandle')
output = run('synapse',
'--skip-checks',
'get',
exteral_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert_true(os.path.exists(downloaded_filename))
# Delete the Project
run('synapse', '--skip-checks', 'delete', project_id)
def test_command_line_client_annotations():
# Create a Project
output = run('synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test setting annotations
run('synapse',
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 1, "bar": "1", "baz": [1, 2, 3]}')
# Test getting annotations
# check that the three things set are correct
# This test should be adjusted to check for equality of the
# whole annotation dictionary once the issue of other
# attributes (creationDate, eTag, id, uri) being returned is resolved
# See: https://sagebionetworks.jira.com/browse/SYNPY-175
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert_equals(annotations['foo'], [1])
assert_equals(annotations['bar'], [u"1"])
assert_equals(annotations['baz'], [1, 2, 3])
# Test setting annotations by replacing existing ones.
output = run('synapse',
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 2}',
'--replace')
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert_equals(annotations['foo'], [2])
# Since this replaces the existing annotations, previous values
# Should not be available.
assert_raises(KeyError, lambda key: annotations[key], 'bar')
assert_raises(KeyError, lambda key: annotations[key], 'baz')
# Test running add command to set annotations on a new object
filename2 = utils.make_bogus_data_file()
schedule_for_cleanup(filename2)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusData2',
'-description',
'Bogus data to test file upload with add and add annotations',
'-parentid',
project_id,
'--annotations',
'{"foo": 123}',
filename2)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert_equals(annotations['foo'], [123])
# Test running store command to set annotations on a new object
filename3 = utils.make_bogus_data_file()
schedule_for_cleanup(filename3)
output = run('synapse',
'--skip-checks',
'store',
'--name',
'BogusData3',
'--description',
'\"Bogus data to test file upload with store and add annotations\"',
'--parentid',
project_id,
'--annotations',
'{"foo": 456}',
filename3)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert_equals(annotations['foo'], [456])
def test_command_line_store_and_submit():
# Create a Project
output = run('synapse',
'--skip-checks',
'store',
'--name',
str(uuid.uuid4()),
'--description',
'test of store command',
'--type',
'Project')
project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create and upload a file
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--description',
'Bogus data to test file upload',
'--parentid',
project_id,
'--file',
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.S3FileHandle')
# Test that entity is named after the file it contains
assert_equals(f1.name, os.path.basename(filename))
# Create an Evaluation to submit to
eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id)
eval = syn.store(eval)
schedule_for_cleanup(eval)
# Submit a bogus file
output = run('synapse',
'--skip-checks',
'submit',
'--evaluation',
eval.id,
'--name',
'Some random name',
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# testing different commmand line options for submitting to an evaluation
# submitting to an evaluation by evaluationID
output = run('synapse',
'--skip-checks',
'submit',
'--evalID',
eval.id,
'--name',
'Some random name',
'--alias',
'My Team',
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Update the file
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--id',
file_entity_id,
'--file',
filename)
updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output)
schedule_for_cleanup(updated_entity_id)
# Submit an updated bogus file and this time by evaluation name
output = run('synapse',
'--skip-checks',
'submit',
'--evaluationName',
eval.name,
'--entity',
file_entity_id)
# Tests shouldn't have external dependencies, but here it's required
ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif'
# Test external file handle
output = run('synapse',
'--skip-checks',
'store',
'--name',
'Rubber Ducky',
'--description',
'I like rubber duckies',
'--parentid',
project_id,
'--file',
ducky_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(exteral_entity_id)
# Verify that we created an external file handle
f2 = syn.get(exteral_entity_id)
fh = syn._getFileHandle(f2.dataFileHandleId)
assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.ExternalFileHandle')
# submit an external file to an evaluation and use provenance
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
run('synapse', '--skip-checks', 'submit',
'--evalID', eval.id,
'--file', filename,
'--parent', project_id,
'--used', exteral_entity_id,
'--executed', repo_url)
# Delete project
run('synapse', '--skip-checks', 'delete', project_id)
def test_command_get_recursive_and_query():
"""Tests the 'synapse get -r' and 'synapse get -q' functions"""
project_entity = project
# Create Folders in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=project_entity))
folder_entity2 = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=folder_entity))
# Create and upload two files in sub-Folder
uploaded_paths = []
file_entities = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = synapseclient.File(f, parent=folder_entity2)
file_entity = syn.store(file_entity)
file_entities.append(file_entity)
schedule_for_cleanup(f)
# Add a file in the Folder as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = synapseclient.File(f, parent=folder_entity)
file_entity = syn.store(file_entity)
file_entities.append(file_entity)
# get -r uses syncFromSynapse() which uses getChildren(), which is not immediately consistent,
# but faster than chunked queries.
time.sleep(2)
# Test recursive get
run('synapse', '--skip-checks', 'get', '-r', folder_entity.id)
# Verify that we downloaded files:
new_paths = [os.path.join('.', folder_entity2.name, os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert_true(os.path.exists(downloaded))
assert_true(filecmp.cmp(downloaded, uploaded))
schedule_for_cleanup(downloaded)
# Test query get using a Table with an entity column
# This should be replaced when Table File Views are implemented in the client
cols = [synapseclient.Column(name='id', columnType='ENTITYID')]
schema1 = syn.store(synapseclient.Schema(name='Foo Table', columns=cols, parent=project_entity))
schedule_for_cleanup(schema1.id)
data1 = [[x.id] for x in file_entities]
syn.store(synapseclient.RowSet(schema=schema1,
rows=[synapseclient.Row(r) for r in data1]))
time.sleep(3) # get -q are eventually consistent
# Test Table/View query get
output = run('synapse', '--skip-checks', 'get', '-q',
"select id from %s" % schema1.id)
# Verify that we downloaded files:
new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert_true(os.path.exists(downloaded))
assert_true(filecmp.cmp(downloaded, uploaded))
schedule_for_cleanup(downloaded)
schedule_for_cleanup(new_paths[0])
def test_command_copy():
"""Tests the 'synapse cp' function"""
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=project_entity))
schedule_for_cleanup(folder_entity.id)
# Create and upload a file in Folder
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
annots = {'test': ['hello_world']}
# Create, upload, and set annotations on a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(synapseclient.File(filename, parent=folder_entity))
externalURL_entity = syn.store(synapseclient.File(repo_url, name='rand', parent=folder_entity, synapseStore=False))
syn.setAnnotations(file_entity, annots)
syn.setAnnotations(externalURL_entity, annots)
schedule_for_cleanup(file_entity.id)
schedule_for_cleanup(externalURL_entity.id)
# Test cp function
output = run('synapse', '--skip-checks', 'cp', file_entity.id, '--destinationId', project_entity.id)
output_URL = run('synapse', '--skip-checks', 'cp', externalURL_entity.id, '--destinationId', project_entity.id)
copied_id = parse(r'Copied syn\d+ to (syn\d+)', output)
copied_URL_id = parse(r'Copied syn\d+ to (syn\d+)', output_URL)
# Verify that our copied files are identical
copied_ent = syn.get(copied_id)
copied_URL_ent = syn.get(copied_URL_id, downloadFile=False)
schedule_for_cleanup(copied_id)
schedule_for_cleanup(copied_URL_id)
copied_ent_annot = syn.getAnnotations(copied_id)
copied_url_annot = syn.getAnnotations(copied_URL_id)
copied_prov = syn.getProvenance(copied_id)['used'][0]['reference']['targetId']
copied_url_prov = syn.getProvenance(copied_URL_id)['used'][0]['reference']['targetId']
# Make sure copied files are the same
assert_equals(copied_prov, file_entity.id)
assert_equals(copied_ent_annot, annots)
assert_equals(copied_ent.properties.dataFileHandleId, file_entity.properties.dataFileHandleId)
# Make sure copied URLs are the same
assert_equals(copied_url_prov, externalURL_entity.id)
assert_equals(copied_url_annot, annots)
assert_equals(copied_URL_ent.externalURL, repo_url)
assert_equals(copied_URL_ent.name, 'rand')
assert_equals(copied_URL_ent.properties.dataFileHandleId, externalURL_entity.properties.dataFileHandleId)
# Verify that errors are being thrown when a
# file is copied to a folder/project that has a file with the same filename
assert_raises(ValueError, run, 'synapse', '--debug', '--skip-checks', 'cp', file_entity.id,
'--destinationId', project_entity.id)
def test_command_line_using_paths():
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(synapseclient.File(filename, parent=folder_entity))
# Verify that we can use show with a filename
output = run('synapse', '--skip-checks', 'show', filename)
id = parse(r'File: %s\s+\((syn\d+)\)\s+' % os.path.split(filename)[1], output)
assert_equals(file_entity.id, id)
# Verify that limitSearch works by making sure we get the file entity
# that's inside the folder
file_entity2 = syn.store(synapseclient.File(filename, parent=project_entity))
output = run('synapse', '--skip-checks', 'get',
'--limitSearch', folder_entity.id,
filename)
id = parse(r'Associated file: .* with synapse ID (syn\d+)', output)
name = parse(r'Associated file: (.*) with synapse ID syn\d+', output)
assert_equals(file_entity.id, id)
assert_true(utils.equal_paths(name, filename))
# Verify that set-provenance works with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse', '--skip-checks', 'set-provenance',
'-id', file_entity2.id,
'-name', 'TestActivity',
'-description', 'A very excellent provenance',
'-used', filename,
'-executed', repo_url,
'-limitSearch', folder_entity.id)
activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output)
output = run('synapse', '--skip-checks', 'get-provenance',
'-id', file_entity2.id)
activity = json.loads(output)
assert_equals(activity['name'], 'TestActivity')
assert_equals(activity['description'], 'A very excellent provenance')
# Verify that store works with provenance specified with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
filename2 = utils.make_bogus_data_file()
schedule_for_cleanup(filename2)
output = run('synapse', '--skip-checks', 'add', filename2,
'-parentid', project_entity.id,
'-used', filename,
'-executed', '%s %s' % (repo_url, filename))
entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
output = run('synapse', '--skip-checks', 'get-provenance',
'-id', entity_id)
activity = json.loads(output)
a = [a for a in activity['used'] if not a['wasExecuted']]
assert_in(a[0]['reference']['targetId'], [file_entity.id, file_entity2.id])
# Test associate command
# I have two files in Synapse filename and filename2
path = tempfile.mkdtemp()
schedule_for_cleanup(path)
shutil.copy(filename, path)
shutil.copy(filename2, path)
run('synapse', '--skip-checks', 'associate', path, '-r')
run('synapse', '--skip-checks', 'show', filename)
def test_table_query():
"""Test command line ability to do table query."""
cols = [synapseclient.Column(name='name', columnType='STRING', maximumSize=1000),
synapseclient.Column(name='foo', columnType='STRING', enumValues=['foo', 'bar', 'bat']),
synapseclient.Column(name='x', columnType='DOUBLE'),
synapseclient.Column(name='age', columnType='INTEGER'),
synapseclient.Column(name='cartoon', columnType='BOOLEAN')]
project_entity = project
schema1 = syn.store(synapseclient.Schema(name=str(uuid.uuid4()), columns=cols, parent=project_entity))
schedule_for_cleanup(schema1.id)
data1 = [['Chris', 'bar', 11.23, 45, False],
['Jen', 'bat', 14.56, 40, False],
['Jane', 'bat', 17.89, 6, False],
['Henry', 'bar', 10.12, 1, False]]
syn.store(synapseclient.RowSet(schema=schema1,
rows=[synapseclient.Row(r) for r in data1]))
# Test query
output = run('synapse', '--skip-checks', 'query',
'select * from %s' % schema1.id)
output_rows = output.rstrip("\n").split("\n")
# Check the length of the output
assert_equals(len(output_rows), 5, "got %s rows" % (len(output_rows),))
# Check that headers are correct.
# Should be column names in schema plus the ROW_ID and ROW_VERSION
my_headers_set = output_rows[0].split("\t")
expected_headers_set = ["ROW_ID", "ROW_VERSION"] + list(map(lambda x: x.name, cols))
assert_equals(my_headers_set, expected_headers_set, "%r != %r" % (my_headers_set, expected_headers_set))
def test_login():
alt_syn = synapseclient.Synapse()
username = "username"
password = "password"
with patch.object(alt_syn, "login") as mock_login, \
patch.object(alt_syn, "getUserProfile", return_value={"userName": "test_user", "ownerId": "ownerId"})\
as mock_get_user_profile:
run('synapse', '--skip-checks', 'login',
'-u', username,
'-p', password,
'--rememberMe',
syn=alt_syn)
mock_login.assert_called_once_with(username, password, forced=True, rememberMe=True, silent=False)
mock_get_user_profile.assert_called_once_with()
def test_configPath():
"""Test using a user-specified configPath for Synapse configuration file."""
tmp_config_file = tempfile.NamedTemporaryFile(suffix='.synapseConfig', delete=False)
shutil.copyfile(synapseclient.client.CONFIG_FILE, tmp_config_file.name)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'--configPath',
tmp_config_file.name,
'add',
'-name',
'BogusFileEntityTwo',
'-description',
'Bogus data to test file upload',
'-parentid',
project.id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.S3FileHandle')
def _description_wiki_check(run_output, expected_description):
entity_id = parse(r'Created.* entity:\s+(syn\d+)\s+', run_output)
wiki = syn.getWiki(entity_id)
assert_equals(expected_description, wiki.markdown)
def _create_temp_file_with_cleanup(specific_file_text=None):
if specific_file_text:
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as file:
file.write(specific_file_text)
filename = file.name
else:
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
return filename
def test_create__with_description():
output = run('synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_store__with_description():
output = run('synapse',
'store',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_add__with_description():
output = run('synapse',
'add',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_create__with_descriptionFile():
output = run('synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_store__with_descriptionFile():
output = run('synapse',
'store',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_add__with_descriptionFile():
output = run('synapse',
'add',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_create__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
def test_store__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'store',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'store',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
def test_add__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'add',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'add',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
|
the-stack_0_9098 | from collections import OrderedDict
from django.utils.functional import cached_property
from six import iteritems
from slyd.orm.exceptions import ValidationError
__all__ = [
'cached_property',
'cached_property_ignore_set',
'class_property',
'unspecified',
'validate_type',
'AttributeDict',
]
class cached_property_ignore_set(cached_property):
def __set__(self, instance, value):
pass
class class_property(object):
"""A read-only descriptor that works on the class too"""
def __init__(self, fget=None):
if fget is not None and not isinstance(fget, classmethod):
fget = classmethod(fget)
self.fget = fget
def __get__(self, instance, instance_type=None):
return self.fget.__get__(instance, instance_type)()
unspecified = object()
def validate_type(value, model):
if not isinstance(value, model):
raise ValidationError(
"'{!r}' is not an instance of type '{}'".format(
value, model.__name__))
def unwrap_envelopes(data, many, pk_field, remove_key):
unwrapped = []
for pk, obj in iteritems(data):
if not remove_key:
try:
if obj[pk_field] != pk:
raise ValidationError(
u"Envelope id does not match value of primary key "
u"field")
except KeyError:
pass
obj[pk_field] = pk
unwrapped.append(obj)
if not many and len(unwrapped) == 1:
return unwrapped[0]
return unwrapped
def wrap_envelopes(data, many, pk_field, remove_key):
if not many:
data = [data]
wrapped = OrderedDict()
for obj in data:
pk = obj[pk_field]
if remove_key:
del obj[pk_field]
wrapped[pk] = obj
return wrapped
class AttributeDict(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(
u"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
|
the-stack_0_9099 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_block(block) if with_witness else msg_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0", "-mempoolreplacement=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=VB_TOP_BITS):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block()
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
# WinstarRxcoin: SCRIPT_VERIFY_WITNESS is enforced when segwit is activated
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
"""Mine enough blocks for segwit's vb state to be 'started'."""
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
@subtest
def advance_to_segwit_lockin(self):
"""Mine enough blocks to lock in segwit, but don't activate."""
height = self.nodes[0].getblockcount()
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit version transactions are non-standard, but valid in blocks.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
self.log.info("Testing rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(version=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('rltc'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
if __name__ == '__main__':
SegWitTest().main()
|
the-stack_0_9102 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
##
import sys
import random
## equation class
coeffs = [ [4, 5, 6, 7 ],
[0, 9, 10, 11 ],
[0, 0, 12, 13 ],
[0, 0, 0, 14 ]]
def van_der_pol_oscillator_deriv(x, t):
nx0 = x[1]
nx1 = -mu * (x[0] ** 2.0 - 1.0) * x[1] - x[0]
res = np.array([nx0, nx1])
return res
# ts = np.linspace(0.0, 50.0, 500)
# xs = odeint(van_der_pol_oscillator_deriv, [0.2, 0.2], ts)
# plt.plot(xs[:,0], xs[:,1])
# xs = odeint(van_der_pol_oscillator_deriv, [-3.0, -3.0], ts)
# plt.plot(xs[:,0], xs[:,1])
# xs = odeint(van_der_pol_oscillator_deriv, [4.0, 4.0], ts)
# plt.plot(xs[:,0], xs[:,1])
# plt.gca().set_aspect('equal')
# plt.savefig('vanderpol_oscillator.png')
# plt.show()
def quadtheta(t, y):
w_dot = 0.
x_dot = 0.
y_dot = 0.
z_dot = 0.
return w_dot, x_dot, y_dot, z_dot
## TODO
# define coefficient object, a[i][j] = ...
# define evaluation
class FourQuad:
def __init__(self):
coeffs = None
def evaluate(self, z):
"""
Evaluates a complex polynomial at z
"""
total = _zero()
coeffs = self.coeffs
for i in range(len(coeffs)):
total = total + coeffs[i] * z**i
return total
def differentiate(self, z):
return
def __iter__(self):
return
def __getitem__(self, key):
return key
## Next steps:
## Need to define class, coefficient calling
## Just use scalars, can switch to intervals easily
## How general?
## Visualiation of scaling by one dimension
## Autodifferentiation
## viz to discard big parts of the space, esp. spirals, etc.,
## help decide where not to start
def quad_distance(w, x, y, z):
return [w[i]**2 + x[i]**2 + y[i]**2 + z[i]**2 for i in range(len(w))]
def quad1(w, x, y, z, s=10, r=28, b=2.667):
w_dot = x*y - b*z
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return w_dot, x_dot, y_dot, z_dot
## setting up parameters
default_lambda_1 = .2523718
default_lambda_2 = .392931
default_lambda_3 = 1 - default_lambda_1 - default_lambda_2
def quad2(x_1, y_1, x_2, y_2,
lambda_1 = default_lambda_1,
lambda_2 = default_lambda_2,
lambda_3 = default_lambda_3):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
"""
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return x_1_dot, y_1_dot, x_2_dot, y_2_dot
def plot_quad(ws, xs, ys, zs, plot_type = 0, txt = ""):
if plot_type == 0:
print("Plotting Double Plot Quad Viz")
plt.figure(1)
plt.subplot(2, 1, 1)
plt.subplots_adjust(top=0.85)
plt.plot(xs, ws)
#plt.yscale('linear')
plt.title('xy')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('wz')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
if plot_type == 1:
print("Plotting Overlain Double Plot Quad Viz")
plt.figure(1)
plt.plot(xs, ws)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('x-w, y-z')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 2:
print("Plotting Sphere Plot Quad Viz")
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
qdist = quad_distance(ws, xs, ys, zs)
ws = np.divide(ws, qdist)
xs = np.divide(xs, qdist)
ys = np.divide(ys, qdist)
zs = np.divide(zs, qdist)
ax.plot(xs, ys, zs)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Nonrigorous Solution")
plt.show()
else:
print("Invalid Plot Type")
def main(argv):
sim = 'demo3'
if sim == 'demo1':
dt = 0.01
stepCnt = 10000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = (0., 0., 1., 1.05)
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad1(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
plot_quad(ws, xs, ys, zs, float(argv[1]))
elif sim == 'demo2':
dt = 0.01
stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = (9., 3., 1., 1.05)
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
plot_quad(ws, xs, ys, zs, float(argv[1]))
elif sim == 'demo3':
"""
Loop through simulations
"""
for i in range(1):
lambda_1 = random.random()
lambda_2 = (1 - lambda_1) * random.random()
lambda_3 = 1 - lambda_1 - lambda_2
dt = 0.01
stepCnt = 1000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = ( 0.4 * random.random(),
0.4 * random.random(),
-0.4 * random.random(),
-0.4 * random.random() )
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i],
lambda_1, lambda_2, lambda_3)
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
# display initial value
print("w_0, x_0, y_0, z_0 = "
+ str(ws[0]) + ", "
+ str(xs[0]) + ", "
+ str(ys[0]) + ", "
+ str(zs[0]))
# display parameters
print("lambda_1, lambda_2, lambda_3 = "
+ str(lambda_1) + ", "
+ str(lambda_2) + ", "
+ str(lambda_3))
txt = ("Parameters: lambda_1, lambda_2, lambda_3 = "
+ str(round(lambda_1, 3)) + ", "
+ str(round(lambda_2, 3)) + ", "
+ str(round(lambda_3, 3)) + '\n'
+ "Initial Point: w_0, x_0, y_0, z_0 = "
+ str(round(ws[0], 3)) + ", "
+ str(round(xs[0], 3)) + ", "
+ str(round(ys[0], 3)) + ", "
+ str(round(zs[0], 3)) )
plot_quad(ws, xs, ys, zs, float(argv[1]), txt = txt)
if __name__=="__main__":
main(sys.argv)
|
the-stack_0_9104 | #!/usr/bin/env python3
# Pi-Ware main UI
from tkinter import *
from tkinter.ttk import *
from ttkthemes import ThemedStyle
import tkinter as tk
import os
import webbrowser
from functools import partial
import getpass
import json
from screeninfo import get_monitors
#Set global var username
global username
username = getpass.getuser()
#set global vsb to make scrollbar perfect
vsb = None
#Set global install/uninstall scripts
global install_script
global uninstall_script
telemetry = None
#Import custom pi-ware functions
#import function
import classes
from function import istherefile
window = Tk()
s = Style()
#Functions
def callback(url):
webbrowser.open_new_tab(url)
def error(mode,message,contact):
#Create window
ErrorWindow = Toplevel(window)
#Set window icon based on arg 1
icon = PhotoImage(file = f'/home/{username}/pi-ware/icons/error-{mode}.png')
ErrorWindow.iconphoto(False, icon)
if mode == "critical":
ErrorWindow.title(f"{mode} Error!")
else:
ErrorWindow.title("Error!")
errorimage = Label(ErrorWindow,image=icon)
errorhappened = Label(ErrorWindow, text = "An error occurred!")
#error message
errormessage = Label(ErrorWindow, text = message)
#If contact is set to true, and telemetry is enabled, send us an error message.
if contact == "True":
error_message = {"error": "fatal", "action": "imedient"}
with open('error.txt', 'w') as json_file:
json.dump(error_message, json_file)
if telemetry == "True":
print("Sending log to pi-wareHQ")
#Ok button
okbutton = Button(ErrorWindow, text = "ok",command=quit)
#Pack all items
errorimage.pack()
errorhappened.pack()
errormessage.pack()
okbutton.pack()
def show_desc(apt,*args):
# Gets the size of the mainwindow
wingeo = window.geometry()
item = tree.selection()[0]
app = tree.item(item,"text")
#print(app)
global install_script, uninstall_script, desc_win
desc_win = Toplevel(window)
p2 = PhotoImage(file = f'/home/{username}/pi-ware/apps/{app}/icon.png')
# Icon set for program window
desc_win.iconphoto(False, p2)
window.resizable(0, 0)
desc_win.title(f"{app}")
#print("320x500+" + mainwinx + "+" + mainwiny)
# Makes sure the new window is the same size as the old one
desc_win.geometry(wingeo)
#style = ThemedStyle(desc_win)
#style.set_theme("arc")
window.withdraw()
desc = open(f"/home/{username}/pi-ware/apps/{app}/description.txt", "r")
desc_contents = desc.read()
text_box = Text(desc_win, height=12, width=40)
text_box.pack()
text_box.insert('end', desc_contents)
text_box.config(state='disabled')
#Old description box:
#app_desc = tk.Label(desc_win, text=desc_contents, font="Arial 9")
#app_desc.pack()
#Check if website file exists
if istherefile(f"/home/{username}/pi-ware/apps/{app}/website"):
websiteurlfile = open(f'/home/{username}/pi-ware/apps/{app}/website', 'r')
websiteurl = websiteurlfile.readlines()
# Strips the newline character
for line in websiteurl:
#print("{}".format(line.strip()))
#Website = classes.HyperLink(desc_win, f"""{line}""");
#Website.pack()
Website = Label(desc_win, text=line,font=('Arial', 11), cursor="hand2")
s.configure(Website, foreground='blue')
Website.pack()
Website.bind("<Button-1>", lambda e:
callback(line))
#Buttons
install = Button(desc_win, text="INSTALL", width=200, command=install_app, style="install.TButton")
uninstall = Button(desc_win, text="UNINSTALL", width=200, command=uninstall_app, style="uninstall.TButton")
back_to_menu_button = Button(desc_win, text="BACK", width=200, command=back_to_menu, style="back.TButton")
s.configure("install.TButton", foreground='blue', background='blue', font=("Arial", 11))
s.configure("uninstall.TButton", foreground='red', background='red', font=("Arial", 11))
s.configure("back.TButton", foreground='green', background='green', font=("Arial", 11))
#Commands
ucommand = f"""bash /home/{username}/pi-ware/func/term/uninst '{app}' 'Uninstalling {app}'"""
command = f"""bash /home/{username}/pi-ware/func/term/inst '{app}' 'Installing {app}'"""
install_script = "'%s'" % command
uninstall_script = "'%s'" % ucommand
#Pack all buttons
install.pack()
uninstall.pack()
back_to_menu_button.pack(side = "bottom")
desc_win.protocol("WM_DELETE_WINDOW",back_to_menu)
def back_to_menu(window, parent, app=None):
parent.destroy()
window.deiconify()
def install_app():
global install_script
if IsDev == "True":
print(f"bash /home/{username}/pi-ware/func/term/term-run {install_script}")
os.system(f"bash /home/{username}/pi-ware/func/term/term-run {install_script}")
def uninstall_app():
global uninstall_script
if IsDev == "True":
print(f"bash /home/{username}/pi-ware/func/term/term-run {uninstall_script}")
os.system(f"bash /home/{username}/pi-ware/func/term/term-run {uninstall_script}")
def back_to_menu():
window.deiconify()
desc_win.destroy()
window.title("Pi-Ware")
def quit():
window.destroy()
#Check for certain files
#Check if apps.json exists
if not istherefile(f"/home/{username}/pi-ware/apps/apps.json"):
error("critical", "Apps.json not found!", True)
else:
#Read apps.json
with open(f"/home/{username}/pi-ware/apps/apps.json") as f:
archdata = json.load(f)
#Check if dev files exist
if not istherefile(f"/home/{username}/pi-ware/.dev"):
IsDev = "False"
else:
IsDev = "True"
#Set window icon
p1 = PhotoImage(file = f'/home/{username}/pi-ware/icons/logo.png')
window.iconphoto(False, p1)
#Main
width = None
height = None
for m in get_monitors():
width = (m.width/2)-165
height = m.height/2-250
print(height)
print(width)
window.resizable(0, 0)
window.geometry("330x500+" + str(int(width)) + "+" + str(int(height)))
#window.eval('tk::PlaceWindow . center')
window.title("Pi-Ware")
style = ThemedStyle(window)
style.set_theme("arc")
tabtext = "Apps"
def addscroll(event):
selected = event.widget.select()
tabtext = event.widget.tab(selected, "text")
if (tabtext != "Apps"):
vsb.place_forget()
elif (tabtext == "Apps"):
vsb.place(x=310, y=60, height=380)
print(tabtext)
# Window tabs
tab_control = Notebook(window)
apps_tab = Frame(tab_control)
news_tab = Frame(tab_control)
credits_tab = Frame(tab_control)
DEV_tab = Frame(tab_control)
tab_control.bind("<<NotebookTabChanged>>", addscroll)
tab_control.add(apps_tab, text="Apps")
tab_control.add(news_tab, text="News")
tab_control.add(credits_tab, text="Credits")
#Show dev stuff if dev files are found
if IsDev == "True":
tab_control.add(DEV_tab, text="Dev")
print("App arcitectures:")
print(archdata)
tab_control.pack(expand=0, fill="both")
#Show DEV stuff
PiWareVersionFile = open(f"/home/{username}/.local/share/pi-ware/version", "r")
PiWareVersioncontent = PiWareVersionFile.read()
files = folders = 0
for _, dirnames, filenames in os.walk(f"/home/{username}/pi-ware/apps"):
files += len(filenames)
folders += len(dirnames)
InstallibleApps = "{:,} installible Apps".format(folders)
PiWareVersion = Label(DEV_tab, text=f"Pi-Ware Version:\n{PiWareVersioncontent}", font="Arial 11 bold")
PiWareInstallableApps = Label(DEV_tab, text=f"{InstallibleApps}", font="Arial 11 bold")
PiWareInstallableApps.configure(anchor="center")
PiWareVersion.pack()
PiWareInstallableApps.pack()
#Show latest news message
NewsMessagefile = open(f"/home/{username}/pi-ware/func/info/latestnewsmessage", "r")
NewsMessagecontent = NewsMessagefile.read()
NewsMessage = Label(news_tab, text=f"Latest news:\n{NewsMessagecontent}", font="Arial 11 bold")
NewsMessage.config(justify=tk.CENTER)
NewsMessage.pack()
#Show info message
InfoMessagefile = open(f"/home/{username}/pi-ware/func/info/infomessage", "r")
InfoMessagecontent = InfoMessagefile.read()
InfoMessage = Label(credits_tab, text=f"{InfoMessagecontent}", font="Arial 11 bold")
InfoMessage.config(justify=tk.CENTER)
InfoMessage.pack()
#Show commit links
commitmessage = Label(credits_tab, text=f"To see commits, please go to the link below.", font="Arial 11 bold")
commitmessage.config(justify=tk.CENTER)
commitmessage.pack()
commit = Label(credits_tab, text="https://github.com/piware14/pi-ware/graphs/contributors",font=('Arial', 9), cursor="hand2")
s.configure(commit, foreground='blue')
commit.pack()
commit.bind("<Button-1>", lambda e:
callback("https://github.com/piware14/pi-ware/graphs/contributors"))
#Add pi-ware website
piwarewebsite = Label(credits_tab, text=f"To vist the pi-ware website, click the link below.", font="Arial 11 bold")
piwarewebsite.config(justify=tk.CENTER)
piwarewebsite.pack()
Website = Label(credits_tab, text="https://pi-ware.ml",font=('Arial', 9), cursor="hand2")
s.configure(Website, foreground='blue')
Website.pack()
Website.bind("<Button-1>", lambda e:
callback("https://pi-ware.ml"))
tree = Treeview(apps_tab)
tree.pack(expand=YES, fill=BOTH)
tree.column("#0", minwidth=0, width=330, stretch=NO)
s.configure('Treeview', rowheight=35)
s.map('Treeview', foreground = [('active', '!disabled', 'green')],
background = [('active', 'black')])
ap = next(os.walk(f"/home/{username}/pi-ware/apps"))[1]
applist = sorted(ap)
print("Current apps:\n")
for app in applist:
print(app)
appb = ""
for a in app:
if(a == " "):
appb += "_"
else:
appb += a
#tree.bind("<Button-1>", print(app))
tree.bind("<ButtonRelease-1>", partial(show_desc,app))
exec(appb + """_button = PhotoImage(file=f'/home/{username}/pi-ware/apps/{app}/icon.png')""")
exec("""tree.insert('', 'end', text=f"{app}",image=""" + appb + """_button)""")
vsb =Scrollbar(window, orient="vertical", command=tree.yview)
vsb.place(x=310, y=60, height=380)
tree.configure(yscrollcommand=vsb.set)
if (tabtext != "Apps"):
vsb.place_forget()
ScrollForMore = Label(apps_tab, text="Scroll down for more apps.", font="Arial 11 bold")
ScrollForMore.pack()
quitbutton = Button(window, text="Quit", width=200, style="goback.TButton", command=quit)
s.configure("goback.TButton", foreground='grey', background='grey', font=("Arial", 11))
quitbutton.pack(side="bottom")
window.mainloop()
|
the-stack_0_9105 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line interface for Turbinia."""
# pylint: disable=bad-indentation
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import getpass
import logging
import os
import sys
import uuid
from turbinia import config
from turbinia import TurbiniaException
from turbinia.lib import recipe_helpers
from turbinia.config import logger
from turbinia import __version__
from turbinia.processors import archive
from turbinia.output_manager import OutputManager
from turbinia.output_manager import GCSOutputWriter
log = logging.getLogger('turbinia')
# We set up the logger first without the file handler, and we will set up the
# file handler later once we have read the log path from the config.
logger.setup(need_file_handler=False)
def csv_list(string):
"""Helper method for having CSV argparse types.
Args:
string(str): Comma separated string to parse.
Returns:
list[str]: The parsed strings.
"""
return string.split(',')
def check_args(source_path, args):
"""Checks lengths of supplied args match or raise an error.
Lists can have only one element where they are automatically extended.
Args:
source_path(list(str)): List of source_paths supplied to turbiniactl.
args(list(list)): List of args (i.e. name, source, partitions, etc) and
their values supplied to turbiniactl.
Raises:
TurbiniaException: If length of args don't match.
Returns:
list(str): List of arg or None """
ret = list()
if not args[0]:
args[0] = source_path
for arg in args:
if not arg:
arg = [None]
if len(arg) > 1 and len(arg) != len(source_path):
raise TurbiniaException(
'Number of passed in args ({0:d}) must equal to one or '
'number of source_paths/disks ({1:d}).'.format(
len(arg), len(source_path)))
if len(arg) == 1:
arg = [arg[0] for _ in source_path]
ret.append(arg)
return ret
def process_args(args):
"""Parses and processes args.
Args:
args(namespace): turbiniactl args.
Raises:
TurbiniaException: If there's an error processing args.
"""
parser = argparse.ArgumentParser(
description='Turbinia can bulk process multiple evidence of same type '
'(i.e. rawdisk, google cloud disk). For bulk processing, pass in csv '
'list of args to be processed. If all pieces of evidence share the same '
'property, such as project or source, there is no need for repeating '
'those values in the command.')
parser.add_argument(
'-q', '--quiet', action='store_true', help='Show minimal output')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Show verbose output',
default=True)
parser.add_argument(
'-d', '--debug', action='store_true', help='Show debug output',
default=False)
parser.add_argument(
'-a', '--all_fields', action='store_true',
help='Show all task status fields in output', required=False)
parser.add_argument(
'-c', '--config_file', help='Load explicit config file. If specified it '
'will ignore config files in other default locations '
'(/etc/turbinia.conf, ~/.turbiniarc, or in paths referenced in '
'environment variable TURBINIA_CONFIG_PATH)', required=False)
parser.add_argument(
'-I', '--recipe', help='Name of Recipe to be employed on evidence',
required=False)
parser.add_argument(
'-P', '--recipe_path', help='Recipe file path to load and use.',
required=False)
parser.add_argument(
'-X', '--skip_recipe_validation', action='store_true', help='Do not '
'perform recipe validation on the client.', required=False, default=False)
parser.add_argument(
'-f', '--force_evidence', action='store_true',
help='Force evidence processing request in potentially unsafe conditions',
required=False)
parser.add_argument(
'-k', '--decryption_keys', help='Decryption keys to be passed in as '
' comma separated list. Each entry should be in the form type=key. (e.g. '
'"-k password=123456,recovery_password=XXXX-XXXX-XXXX-XXXX-XXXX-XXXX")',
default=[], type=csv_list)
parser.add_argument('-o', '--output_dir', help='Directory path for output')
parser.add_argument('-L', '--log_file', help='Log file')
parser.add_argument(
'-r', '--request_id', help='Create new requests with this Request ID',
required=False)
parser.add_argument(
'-V', '--version', action='version', version=__version__,
help='Show the version')
parser.add_argument(
'-D', '--dump_json', action='store_true',
help='Dump JSON output of Turbinia Request instead of sending it')
parser.add_argument(
'-F', '--filter_patterns_file',
help='A file containing newline separated string patterns to filter '
'text based evidence files with (in extended grep regex format). '
'This filtered output will be in addition to the complete output')
parser.add_argument(
'-Y', '--yara_rules_file', help='A file containing Yara rules.')
parser.add_argument(
'-j', '--jobs_allowlist', default=[], type=csv_list,
help='An allowlist for Jobs that will be allowed to run (in CSV format, '
'no spaces). This will not force them to run if they are not configured '
'to. This is applied both at server start time and when the client makes '
'a processing request. When applied at server start time the change is '
'persistent while the server is running. When applied by the client, it '
'will only affect that processing request.')
parser.add_argument(
'-J', '--jobs_denylist', default=[], type=csv_list,
help='A denylist for Jobs we will not allow to run. See '
'--jobs_allowlist help for details on format and when it is applied.')
parser.add_argument(
'-p', '--poll_interval', default=60, type=int,
help='Number of seconds to wait between polling for task state info')
parser.add_argument(
'-T', '--debug_tasks', action='store_true',
help='Show debug output for all supported tasks', default=False)
parser.add_argument(
'-w', '--wait', action='store_true',
help='Wait to exit until all tasks for the given request have completed')
subparsers = parser.add_subparsers(
dest='command', title='Commands', metavar='<command>')
# Action for printing config
parser_config = subparsers.add_parser('config', help='Print out config file')
parser_config.add_argument(
'-f', '--file_only', action='store_true', help='Print out file path only')
#Sends Test Notification
parser_testnotify = subparsers.add_parser(
'testnotify', help='Sends test notification')
# TODO(aarontp): Find better way to specify these that allows for multiple
# pieces of evidence to be submitted. Maybe automagically create different
# commands based on introspection of evidence objects?
# RawDisk
parser_rawdisk = subparsers.add_parser(
'rawdisk', help='Process RawDisk as Evidence (bulk processable)')
parser_rawdisk.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_rawdisk.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_rawdisk.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for Google Cloud Disk Evidence type
parser_googleclouddisk = subparsers.add_parser(
'googleclouddisk',
help='Process Google Cloud Persistent Disk as Evidence '
'(bulk processable)')
parser_googleclouddisk.add_argument(
'-C', '--copy_only', action='store_true', help='Only copy disk and do '
'not process with Turbinia. This only takes effect when a source '
'--project is defined and can be run without any Turbinia server or '
'workers configured.')
parser_googleclouddisk.add_argument(
'-d', '--disk_name', help='Google Cloud name for disk', required=True,
type=csv_list)
parser_googleclouddisk.add_argument(
'-p', '--project', help='Project that the disk to process is associated '
'with. If this is different from the project that Turbinia is running '
'in, it will be copied to the Turbinia project.', type=csv_list)
parser_googleclouddisk.add_argument(
'-z', '--zone', help='Geographic zone the disk exists in', type=csv_list)
parser_googleclouddisk.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_googleclouddisk.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for Google Cloud Persistent Disk Embedded Raw Image
parser_googleclouddiskembedded = subparsers.add_parser(
'googleclouddiskembedded',
help='Process Google Cloud Persistent Disk with an embedded raw disk '
'image as Evidence (bulk processable)')
parser_googleclouddiskembedded.add_argument(
'-C', '--copy_only', action='store_true', help='Only copy disk and do '
'not process with Turbinia. This only takes effect when a source '
'--project is defined and can be run without any Turbinia server or '
'workers configured.')
parser_googleclouddiskembedded.add_argument(
'-e', '--embedded_path',
help='Path within the Persistent Disk that points to the raw image file',
required=True, type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-d', '--disk_name', help='Google Cloud name for disk', required=True,
type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-p', '--project', help='Project that the disk to process is associated '
'with. If this is different from the project that Turbinia is running '
'in, it will be copied to the Turbinia project.', type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-P', '--mount_partition', type=csv_list, default=[1],
help='The partition number as an integer to use when mounting the '
'parent disk. Defaults to the first partition. Only affects mounting, and '
'not what gets processed.')
parser_googleclouddiskembedded.add_argument(
'-z', '--zone', help='Geographic zone the disk exists in', type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_googleclouddiskembedded.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# RawMemory
parser_rawmemory = subparsers.add_parser(
'rawmemory', help='Process RawMemory as Evidence (bulk processable)')
parser_rawmemory.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_rawmemory.add_argument(
'-P', '--profile', help='Profile to use with Volatility', required=True,
type=csv_list)
parser_rawmemory.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
parser_rawmemory.add_argument(
'-m', '--module_list', type=csv_list,
help='Volatility module(s) to execute', required=True)
# Parser options for Directory evidence type
parser_directory = subparsers.add_parser(
'directory', help='Process a directory as Evidence (bulk processable)')
parser_directory.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_directory.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_directory.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for CompressedDirectory evidence type
parser_directory = subparsers.add_parser(
'compresseddirectory', help='Process a compressed tar file as Evidence '
'(bulk processable)')
parser_directory.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_directory.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_directory.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for ChromiumProfile evidence type
parser_hindsight = subparsers.add_parser(
'hindsight', help='Process ChromiumProfile as Evidence '
'(bulk processable)')
parser_hindsight.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_hindsight.add_argument(
'-f', '--format', help='Output format (supported types are '
'xlsx, sqlite, jsonl)', type=csv_list, default=['sqlite'])
parser_hindsight.add_argument(
'-b', '--browser_type', help='The type of browser the input files belong'
'to (supported types are Chrome, Brave)', type=csv_list,
default=['Chrome'])
parser_hindsight.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# List Jobs
subparsers.add_parser(
'listjobs',
help='List all available Jobs. These Job names can be used by '
'--jobs_allowlist and --jobs_denylist')
# PSQ Worker
parser_psqworker = subparsers.add_parser('psqworker', help='Run PSQ worker')
parser_psqworker.add_argument(
'-S', '--single_threaded', action='store_true',
help='Run PSQ Worker in a single thread', required=False)
# Celery Worker
subparsers.add_parser('celeryworker', help='Run Celery worker')
# Parser options for Turbinia status command
parser_status = subparsers.add_parser(
'status', help='Get Turbinia Task status')
parser_status.add_argument(
'-c', '--close_tasks', action='store_true',
help='Close tasks based on Request ID or Task ID', required=False)
parser_status.add_argument(
'-C', '--csv', action='store_true',
help='When used with --statistics, the output will be in CSV format',
required=False)
parser_status.add_argument(
'-d', '--days_history', default=0, type=int,
help='Number of days of history to show', required=False)
parser_status.add_argument(
'-D', '--dump_json', action='store_true',
help='Dump JSON status output instead text. Compatible with -d, -u, '
'-r and -t flags, but not others')
parser_status.add_argument(
'-f', '--force', help='Gatekeeper for --close_tasks', action='store_true',
required=False)
parser_status.add_argument(
'-r', '--request_id',
help='Show all tasks for this Request ID. A request to process Evidence will '
'generate a unique request ID and this option will show all Tasks associated '
'with this request.', required=False)
# 20 == Priority.High. We are setting this manually here because we don't want
# to load the worker module yet in order to access this Enum.
parser_status.add_argument(
'-p', '--priority_filter', default=20, type=int, required=False,
help='This sets what report sections are shown in full detail in '
'report output. Any tasks that have set a report_priority value '
'equal to or lower than this setting will be shown in full detail, and '
'tasks with a higher value will only have a summary shown. To see all '
'tasks report output in full detail, set --priority_filter=100')
parser_status.add_argument(
'-R', '--full_report',
help='Generate full markdown report instead of just a summary',
action='store_true', required=False)
parser_status.add_argument(
'-s', '--statistics', help='Generate statistics only',
action='store_true', required=False)
parser_status.add_argument(
'-t', '--task_id', help='Show task data for the given Task ID. A '
'processing request can generate multiple Tasks as part of the request '
'and this will filter to only the specified Task.', required=False)
parser_status.add_argument(
'-u', '--user', help='Show task for given user', required=False)
parser_status.add_argument(
'-i', '--requests', required=False, action='store_true',
help='Show all requests from a specified timeframe. The default '
'timeframe is 7 days. Please use the -d flag to extend this.')
parser_status.add_argument(
'-g', '--group_id', help='Show Requests for given group ID. This command'
' only shows the related requests and overview of their task status. Run '
'--full_report for the full list of requests and their tasks.',
required=False)
parser_status.add_argument(
'-w', '--workers', required=False, action='store_true',
help='Show Worker status information from a specified timeframe. The '
'default timeframe is 7 days. Please use the -d flag to extend this. '
'Additionaly, you can use the -a or --all_fields flag to retrieve the '
'full output containing finished and unassigned worker tasks.')
parser_log_collector = subparsers.add_parser(
'gcplogs', help='Collects Turbinia logs from Stackdriver.')
parser_log_collector.add_argument(
'-o', '--output_dir', help='Directory path for output', required=False)
parser_log_collector.add_argument(
'-q', '--query',
help='Filter expression to use to query Stackdriver logs.')
parser_log_collector.add_argument(
'-d', '--days_history', default=1, type=int,
help='Number of days of history to show', required=False)
parser_log_collector.add_argument(
'-s', '--server_logs', action='store_true',
help='Collects all server related logs.')
parser_log_collector.add_argument(
'-w', '--worker_logs', action='store_true',
help='Collects all worker related logs.')
# Add GCS logs collector
parser_gcs_logs = subparsers.add_parser(
'dumpgcs', help='Get Turbinia results from Google Cloud Storage.')
parser_gcs_logs.add_argument(
'-o', '--output_dir', help='Directory path for output.', required=True)
parser_gcs_logs.add_argument(
'-t', '--task_id', help='Download all the results for given task_id.')
parser_gcs_logs.add_argument(
'-r', '--request_id',
help='Download the results for all Tasks for the given request_id.')
parser_gcs_logs.add_argument(
'-b', '--bucket',
help='Alternate GCS bucket to download from. Must be in the following '
'format gs://{BUCKET_NAME}/. Defaults to the BUCKET_NAME as specified '
'in the config')
parser_gcs_logs.add_argument(
'-d', '--days_history', default=0, type=int,
help='Number of days of history to to query results for', required=False)
parser_gcs_logs.add_argument(
'-i', '--instance_id',
help='Instance ID used to run tasks/requests. You must provide an '
'instance ID if the task/request was not processed on the same instance '
'as your confing file.')
# Server
subparsers.add_parser('server', help='Run Turbinia Server')
args = parser.parse_args(args)
# Load the config before final logger setup so we can the find the path to the
# log file.
try:
if args.config_file:
config.LoadConfig(config_file=args.config_file)
else:
config.LoadConfig()
except TurbiniaException as exception:
print(
'Could not load config file ({0!s}).\n{1:s}'.format(
exception, config.CONFIG_MSG))
sys.exit(1)
if args.log_file:
user_specified_log = args.log_file
if args.output_dir:
config.OUTPUT_DIR = args.output_dir
config.TURBINIA_COMMAND = args.command
server_flags_set = args.command == 'server'
worker_flags_set = args.command in ('psqworker', 'celeryworker')
# Run logger setup again if we're running as a server or worker (or have a log
# file explicitly set on the command line) to set a file-handler now that we
# have the logfile path from the config.
if server_flags_set or worker_flags_set or args.log_file:
if args.log_file:
logger.setup(log_file_path=user_specified_log)
else:
logger.setup()
if args.quiet:
log.setLevel(logging.ERROR)
elif args.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# Enable tasks debugging for supported tasks
if args.debug_tasks:
config.DEBUG_TASKS = True
if config.TASK_MANAGER == 'PSQ':
from turbinia.lib import google_cloud
from libcloudforensics.providers.gcp import forensics as gcp_forensics
# Enable GCP Stackdriver Logging
if config.STACKDRIVER_LOGGING and args.command in ('server', 'psqworker'):
google_cloud.setup_stackdriver_handler(
config.TURBINIA_PROJECT, args.command)
log.info('Turbinia version: {0:s}'.format(__version__))
# Do late import of other needed Turbinia modules. This is needed because the
# config is loaded by these modules at load time, and we want to wait to load
# the config until after we parse the args so that we can use those arguments
# to point to config paths.
from turbinia import notify
from turbinia import client as TurbiniaClientProvider
from turbinia.worker import TurbiniaCeleryWorker
from turbinia.worker import TurbiniaPsqWorker
from turbinia.server import TurbiniaServer
# Print out config if requested
if args.command == 'config':
if args.file_only:
log.info('Config file path is {0:s}\n'.format(config.configSource))
sys.exit(0)
try:
with open(config.configSource, "r") as f:
print(f.read())
sys.exit(0)
except IOError as exception:
msg = (
'Failed to read config file {0:s}: {1!s}'.format(
config.configSource, exception))
raise TurbiniaException(msg)
#sends test notification
if args.command == 'testnotify':
notify.sendmail(
config.EMAIL_ADDRESS, 'Turbinia test notification',
'This is a test notification')
sys.exit(0)
args.jobs_allowlist = [j.lower() for j in args.jobs_allowlist]
args.jobs_denylist = [j.lower() for j in args.jobs_denylist]
# Read set set filter_patterns
filter_patterns = []
if (args.filter_patterns_file and
not os.path.exists(args.filter_patterns_file)):
msg = 'Filter patterns file {0:s} does not exist.'.format(
args.filter_patterns_file)
raise TurbiniaException(msg)
elif args.filter_patterns_file:
try:
filter_patterns = open(args.filter_patterns_file).read().splitlines()
except IOError as e:
log.warning(
'Cannot open file {0:s} [{1!s}]'.format(args.filter_patterns_file, e))
# Read yara rules
yara_rules = ''
if (args.yara_rules_file and not os.path.exists(args.yara_rules_file)):
msg = 'Filter patterns file {0:s} does not exist.'.format(
args.yara_rules_file)
raise TurbiniaException(msg)
elif args.yara_rules_file:
try:
yara_rules = open(args.yara_rules_file).read()
except IOError as e:
msg = ('Cannot open file {0:s} [{1!s}]'.format(args.yara_rules_file, e))
raise TurbiniaException(msg)
# Create Client object
client = None
if args.command not in ('psqworker', 'server'):
client = TurbiniaClientProvider.get_turbinia_client()
# Set group id
group_id = uuid.uuid4().hex
# Checks for bulk processing
if args.command in ('rawdisk', 'directory', 'compresseddirectory'):
args.name, args.source = check_args(
args.source_path, [args.name, args.source])
# Iterate through evidence and call process_evidence
for i, source_path in enumerate(args.source_path):
name = args.name[i]
source = args.source[i]
process_evidence(
args=args, source_path=source_path, name=name, source=source,
group_id=group_id, filter_patterns=filter_patterns, client=client,
yara_rules=yara_rules)
elif args.command in ('googleclouddisk', 'googleclouddiskembedded'):
# Fail if this is a local instance
if config.SHARED_FILESYSTEM and not args.force_evidence:
msg = (
'The evidence type {0:s} is Cloud only, and this instance of '
'Turbinia is not a cloud instance.'.format(args.command))
raise TurbiniaException(msg)
# Check cloud zones
if not args.zone and config.TURBINIA_ZONE:
args.zone = [config.TURBINIA_ZONE]
elif not args.zone and not config.TURBINIA_ZONE:
msg = 'Turbinia zone must be set by --zone or in config.'
raise TurbiniaException(msg)
# Checks for cloud project
if not args.project and config.TURBINIA_PROJECT:
args.project = [config.TURBINIA_PROJECT]
elif not args.project and not config.TURBINIA_PROJECT:
msg = 'Turbinia project must be set by --project or in config'
raise TurbiniaException(msg)
# Since mount_partition and embedded_path are not in cloud disk namespace,
# Setting them to None here
if args.command == 'googleclouddisk':
args.mount_partition = None
args.embedded_path = None
(
args.name, args.source, args.project, args.zone, args.mount_partition,
args.embedded_path) = check_args(
args.disk_name, [
args.name, args.source, args.project, args.zone,
args.mount_partition, args.embedded_path
])
mount_partition = None
embedded_path = None
for i, disk_name in enumerate(args.disk_name):
project = args.project[i]
zone = args.zone[i]
name = args.name[i]
source = args.source[i]
if args.command == 'googleclouddiskembedded':
embedded_path = args.embedded_path[i]
mount_partition = args.mount_partition[i]
if ((project and project != config.TURBINIA_PROJECT) or
(zone and zone != config.TURBINIA_ZONE)):
new_disk = gcp_forensics.CreateDiskCopy(
project, config.TURBINIA_PROJECT, None, config.TURBINIA_ZONE,
disk_name=disk_name)
disk_name = new_disk.name
if args.copy_only:
log.info(
'--copy_only specified, so not processing {0:s} with '
'Turbinia'.format(disk_name))
continue
process_evidence(
args=args, disk_name=disk_name, name=name, source=source,
project=project, zone=zone, embedded_path=embedded_path,
mount_partition=mount_partition, group_id=group_id,
filter_patterns=filter_patterns, client=client, yara_rules=yara_rules)
elif args.command == 'rawmemory':
# Checks if length of args match
args.name, args.profile = check_args(
args.source_path, [args.name, args.profile])
for i, source_path in enumerate(args.source_path):
profile = args.profile[i]
name = args.name[i]
process_evidence(
args=args, source_path=source_path, name=name, profile=profile,
group_id=group_id, filter_patterns=filter_patterns, client=client,
yara_rules=yara_rules)
elif args.command == 'hindsight':
args.name, args.browser_type, args.format = check_args(
args.source_path, [args.name, args.browser_type, args.format])
for i, source_path in enumerate(args.source_path):
name = args.name[i]
browser_type = args.browser_type[i]
format = args.format[i]
process_evidence(
args=args, source_path=source_path, name=name, format=format,
group_id=group_id, client=client, filter_patterns=filter_patterns,
yara_rules=yara_rules, browser_type=browser_type)
elif args.command == 'psqworker':
# Set up root logger level which is normally set by the psqworker command
# which we are bypassing.
logger.setup()
worker = TurbiniaPsqWorker(
jobs_denylist=args.jobs_denylist, jobs_allowlist=args.jobs_allowlist)
worker.start()
elif args.command == 'celeryworker':
logger.setup()
worker = TurbiniaCeleryWorker(
jobs_denylist=args.jobs_denylist, jobs_allowlist=args.jobs_allowlist)
worker.start()
elif args.command == 'server':
server = TurbiniaServer(
jobs_denylist=args.jobs_denylist, jobs_allowlist=args.jobs_allowlist)
server.start()
elif args.command == 'status':
region = config.TURBINIA_REGION
if args.request_id and args.group_id:
msg = (
'Cannot run status command with request ID and group ID. Please '
'only specify one.')
raise TurbiniaException(msg)
if args.close_tasks:
if args.group_id:
msg = 'The --close_task flag is not compatible with --group_id.'
raise TurbiniaException(msg)
if args.user or args.request_id or args.task_id:
print(
client.close_tasks(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=args.request_id, task_id=args.task_id,
user=args.user, requester=getpass.getuser()))
sys.exit(0)
else:
log.info(
'--close_tasks (-c) requires --user, --request_id, or/and --task_id'
)
sys.exit(1)
if args.dump_json and (args.statistics or args.requests or args.workers):
log.info(
'The --dump_json flag is not compatible with --statistics, '
'--reqeusts, or --workers flags')
sys.exit(1)
if args.statistics:
print(
client.format_task_statistics(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history, task_id=args.task_id,
request_id=args.request_id, user=args.user, csv=args.csv))
sys.exit(0)
if args.wait and args.request_id:
client.wait_for_request(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=args.request_id, user=args.user,
poll_interval=args.poll_interval)
elif args.wait and not args.request_id:
log.info(
'--wait requires --request_id, which is not specified. '
'turbiniactl will exit without waiting.')
if args.requests:
print(
client.format_request_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history,
all_fields=args.all_fields))
sys.exit(0)
if args.workers:
print(
client.format_worker_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history,
all_fields=args.all_fields))
sys.exit(0)
if args.dump_json:
output_json = True
else:
output_json = False
print(
client.format_task_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history, task_id=args.task_id,
request_id=args.request_id, group_id=args.group_id, user=args.user,
all_fields=args.all_fields, full_report=args.full_report,
priority_filter=args.priority_filter, output_json=output_json))
sys.exit(0)
elif args.command == 'listjobs':
log.info('Available Jobs:')
client.list_jobs()
elif args.command == 'gcplogs':
if not config.STACKDRIVER_LOGGING:
msg = 'Stackdriver logging must be enabled in order to use this.'
raise TurbiniaException(msg)
if args.output_dir and not os.path.isdir(args.output_dir):
msg = 'Please provide a valid directory path.'
raise TurbiniaException(msg)
query = None
if args.query:
query = args.query
if args.worker_logs:
if query:
query = 'jsonPayload.origin="psqworker" {0:s}'.format(query)
else:
query = 'jsonPayload.origin="psqworker"'
if args.server_logs:
if query:
query = 'jsonPayload.origin="server" {0:s}'.format(query)
else:
query = 'jsonPayload.origin="server"'
google_cloud.get_logs(
config.TURBINIA_PROJECT, args.output_dir, args.days_history, query)
elif args.command == 'dumpgcs':
if not config.GCS_OUTPUT_PATH and not args.bucket:
msg = 'GCS storage must be enabled in order to use this.'
raise TurbiniaException(msg)
if not args.task_id and not args.request_id:
msg = 'You must specify one of task_id or request_id.'
raise TurbiniaException(msg)
if not os.path.isdir(args.output_dir):
msg = 'Please provide a valid directory path.'
raise TurbiniaException(msg)
gcs_bucket = args.bucket if args.bucket else config.GCS_OUTPUT_PATH
instance_id = args.instance_id if args.instance_id else config.INSTANCE_ID
try:
task_data = client.get_task_data(
instance=instance_id, days=args.days_history,
project=config.TURBINIA_PROJECT, region=config.TURBINIA_REGION,
task_id=args.task_id, request_id=args.request_id,
function_name='gettasks')
output_writer = GCSOutputWriter(
gcs_bucket, local_output_dir=args.output_dir)
if not task_data:
msg = 'No Tasks found for task/request ID'
raise TurbiniaException(msg)
if args.task_id:
log.info(
'Downloading GCS files for task_id {0:s} to {1:s}.'.format(
args.task_id, args.output_dir))
for task in task_data:
if task['id'] == args.task_id:
if task['saved_paths']:
output_writer.copy_from_gcs(task['saved_paths'])
if args.request_id:
log.info(
'Downloading GCS files for request_id {0:s} to {1:s}.'.format(
args.request_id, args.output_dir))
paths = []
for task in task_data:
if task['saved_paths']:
paths.extend(task['saved_paths'])
output_writer.copy_from_gcs(paths)
except TurbiniaException as exception:
log.error('Failed to pull the data {0!s}'.format(exception))
else:
log.warning('Command {0!s} not implemented.'.format(args.command))
# TODO: shard this function and move some of its functionalities to other files
# (move some of this to evidence.py to run the checks etc)
def process_evidence(
client, group_id, args=None, browser_type=None, disk_name=None,
embedded_path=None, filter_patterns=None, format=None, mount_partition=None,
name=None, profile=None, project=None, source=None, source_path=None,
yara_rules=None, zone=None):
"""Creates evidence and turbinia request.
Args:
client(TurbiniaClient): TurbiniaClient used for creating requests.
group_id(str): Group ID used for bulk processing.
args(Namespace): commandline args.
browser_type(str): Browser type used for hindsight.
disk_name(str): Disk name used for processing cloud evidence.
embedded_path(str): Embedded path for clouddiskembedded.
filter_pattern(str): Filter patterns used for processing evidence.
format(str): Output format for hindsight.
mount_partition(int): Mount partition for clouddiskembedded.
name(str): Evidence name.
profile(list(str)): List of volatility profiles used for rawmemory.
project(str): Project for cloud related evidence.
source(str): Source for evidence.
source_path(str): Source path used for host evidence.
yara_rules(str): Yara rule for processing evidence.
zone(str): Could zone used for cloud evidence.
"""
from turbinia import evidence
# Set request id
request_id = args.request_id if args.request_id else uuid.uuid4().hex
# Start Evidence configuration
evidence_ = None
if args.command == 'rawdisk':
evidence_ = evidence.RawDisk(
name=name, source_path=os.path.abspath(source_path), source=source)
elif args.command == 'directory':
source_path = os.path.abspath(source_path)
if not config.SHARED_FILESYSTEM:
log.info(
'A Cloud Only Architecture has been detected. '
'Compressing the directory for GCS upload.')
source_path = archive.CompressDirectory(
source_path, output_path=config.TMP_DIR)
evidence_ = evidence.CompressedDirectory(
name=name, source_path=source_path, source=source)
else:
evidence_ = evidence.Directory(
name=name, source_path=source_path, source=source)
elif args.command == 'compresseddirectory':
archive.ValidateTarFile(source_path)
evidence_ = evidence.CompressedDirectory(
name=name, source_path=os.path.abspath(source_path), source=source)
elif args.command == 'googleclouddisk':
evidence_ = evidence.GoogleCloudDisk(
name=name, disk_name=disk_name, project=project, zone=zone,
source=source)
elif args.command == 'googleclouddiskembedded':
parent_evidence_ = evidence.GoogleCloudDisk(
name=name, disk_name=disk_name, project=project, source=source,
mount_partition=mount_partition, zone=zone)
evidence_ = evidence.GoogleCloudDiskRawEmbedded(
name=name, disk_name=disk_name, project=project, zone=zone,
embedded_path=embedded_path)
evidence_.set_parent(parent_evidence_)
elif args.command == 'hindsight':
if format not in ['xlsx', 'sqlite', 'jsonl']:
msg = 'Invalid output format.'
raise TurbiniaException(msg)
if browser_type not in ['Chrome', 'Brave']:
msg = 'Browser type not supported.'
raise TurbiniaException(msg)
source_path = os.path.abspath(source_path)
evidence_ = evidence.ChromiumProfile(
name=name, source_path=source_path, output_format=format,
browser_type=browser_type)
elif args.command == 'rawmemory':
source_path = os.path.abspath(source_path)
evidence_ = evidence.RawMemory(
name=name, source_path=source_path, profile=profile,
module_list=args.module_list)
if evidence_ and not args.force_evidence:
if not config.SHARED_FILESYSTEM and evidence_.copyable:
if os.path.exists(evidence_.local_path):
output_manager = OutputManager()
output_manager.setup(evidence_.type, request_id, remote_only=True)
output_manager.save_evidence(evidence_)
else:
msg = (
'The evidence local path does not exist: {0:s}. Please submit '
'a new Request with a valid path.'.format(evidence_.local_path))
raise TurbiniaException(msg)
elif not config.SHARED_FILESYSTEM and not evidence_.cloud_only:
msg = (
'The evidence type {0:s} cannot run on Cloud instances of '
'Turbinia. Consider wrapping it in a '
'GoogleCloudDiskRawEmbedded or other Cloud compatible '
'object'.format(evidence_.type))
raise TurbiniaException(msg)
request = None
if evidence_:
request = client.create_request(
request_id=request_id, group_id=group_id, requester=getpass.getuser())
request.evidence.append(evidence_)
if args.decryption_keys:
for credential in args.decryption_keys:
try:
credential_type, credential_data = credential.split('=')
except ValueError as exception:
msg = (
'Could not parse credential [{0:s}] from decryption keys '
'{1!s}: {2!s}'.format(
credential, args.decryption_keys, exception))
raise TurbiniaException(msg)
evidence_.credentials.append((credential_type, credential_data))
# Recipe pre-condition checks.
if args.recipe and args.recipe_path:
msg = ('Expected a recipe name (-I) or path (-P) but found both.')
raise TurbiniaException(msg)
if args.recipe or args.recipe_path:
# Load the specified recipe.
recipe_dict = client.create_recipe(
debug_tasks=args.debug_tasks, filter_patterns=filter_patterns,
group_id=group_id, jobs_allowlist=args.jobs_allowlist,
jobs_denylist=args.jobs_denylist,
recipe_name=args.recipe if args.recipe else args.recipe_path,
sketch_id=None, skip_recipe_validation=args.skip_recipe_validation,
yara_rules=yara_rules)
request.recipe = recipe_dict
if args.dump_json:
print(request.to_json().encode('utf-8'))
sys.exit(0)
else:
log.info(
'Creating request {0:s} with group id {1:s} and evidence '
'{2:s}'.format(request.request_id, request.group_id, evidence_.name))
# TODO add a new log line when group status is implemented
log.info(
'Run command "turbiniactl status -r {0:s}" to see the status of'
' this request and associated tasks'.format(request.request_id))
client.send_request(request)
if args.wait:
log.info(
'Waiting for request {0:s} to complete'.format(request.request_id))
region = config.TURBINIA_REGION
client.wait_for_request(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=request.request_id,
poll_interval=args.poll_interval)
print(
client.format_task_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=request.request_id,
all_fields=args.all_fields))
def main():
"""Main function for turbiniactl"""
try:
process_args(sys.argv[1:])
except TurbiniaException as e:
log.error('There was a problem processing arguments: {0:s}'.format(str(e)))
sys.exit(1)
log.info('Done.')
sys.exit(0)
if __name__ == '__main__':
main()
|
the-stack_0_9106 | import asyncio
import concurrent.futures
import json
import logging
import tempfile
import pytest
import bitcoinx
from aiohttp import web
from aiohttp.test_utils import make_mocked_request
from bitcoinx import BitcoinTestnet, hex_str_to_hash
from typing import List, Union, Dict, Any, Optional, Tuple
from concurrent.futures.thread import ThreadPoolExecutor
from electrumsv.constants import ScriptType, TransactionOutputFlag, TxFlags
from electrumsv.restapi import Fault, good_response
from electrumsv.wallet import AbstractAccount, Wallet
from electrumsv.transaction import Transaction, TransactionContext
from electrumsv.types import TransactionSize
from electrumsv.wallet_database.types import TransactionOutputSpendableRow
from ..errors import Errors
from ..handlers import ExtensionEndpoints
class SVTestnet(object):
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
NAME = 'testnet'
WIF_PREFIX = 0xef
COIN = BitcoinTestnet
BIP44_COIN_TYPE = 1
class _CurrentNetMeta(type):
def __getattr__(cls, attr):
return getattr(cls._net, attr)
class Net(metaclass=_CurrentNetMeta):
_net = SVTestnet
SPENDABLE_UTXOS = [
TransactionOutputSpendableRow(
tx_hash=hex_str_to_hash(
'76d5bfabe40ca6cbd315b04aa24b68fdd8179869fd1c3501d5a88a980c61c1bf'),
txo_index = 0,
value=100000,
keyinstance_id=0,
script_type=ScriptType.P2PKH,
flags=TransactionOutputFlag.NONE,
account_id=None,
masterkey_id=None,
derivation_type=None,
derivation_data2=None),
TransactionOutputSpendableRow(
tx_hash=hex_str_to_hash(
'76d5bfabe40ca6cbd315b04aa24b68fdd8179869fd1c3501d5a88a980c61c1bf'),
txo_index=0,
value=100000,
keyinstance_id=0,
script_type=ScriptType.P2PKH,
flags=TransactionOutputFlag.NONE,
account_id=None,
masterkey_id=None,
derivation_type=None,
derivation_data2=None)
]
p2pkh_object = bitcoinx.P2PKH_Address.from_string("muV4JqcF3V3Vi7J2hGucQJzSLcsUAaJwLA", Net.COIN)
P2PKH_OUTPUT = {"value": 100,
"script_pubkey": p2pkh_object.to_script().to_hex()}
rawtx = "0100000001c2f9bbe87ab222fa84954a9f8140696eafdeb578e8a7555c1db60c7cb4b391b601" \
"0000006a47304402207f4e64f379412ed251e4e454c52fb10b716b40a4f44986a1b410940663" \
"d7fcce02200fbf6483de08e66e05ec91240fc69a7623d3295ec872e9676d95cac438e3207541" \
"2102e49bb187d96b6a1556f08b46732a54b71a73e947e4d31cf84a4d14e20b071f6effffffff" \
"02000000000000000009006a0648656c6c6f0afc050000000000001976a914c95b08d2e984a0" \
"92c1bcaad98b387aa5d8db3f7d88ac7c761400"
def _fake_history_dto_succeeded(account: AbstractAccount, tx_states: int=None) -> List[Dict[
Any, Any]]:
result = [
{
"txid": "d4e226dde5c652782679a44bfad7021fb85df6ba8d32b1b17b8dc043e85d7103",
"height": 1,
"tx_flags": 2097152,
"value": 5000000000
},
{
"txid": "6a25882b47b3f2e97c09ee9f3131831df4b2ec1b54cc45fe3899bb4a3b5e2b29",
"height": 0,
"tx_flags": 1048576,
"value": -104
},
{
"txid": "611baae09b4db5894bbb4f13f35ae3ef492f34b388905a31a0ef82898cd3e6f6",
"height": None,
"tx_flags": 8388608,
"value": -5999999718
}
]
return result
async def _fake_reset_wallet_transaction_state_succeeded(wallet_name, index) -> Optional[Fault]:
return None
def _fake_balance_dto_succeeded(wallet) -> Dict[Any, Any]:
return {"confirmed_balance": 10,
"unconfirmed_balance": 20,
"unmatured_balance": 0}
def _fake_remove_transaction(tx_hash: bytes, wallet: AbstractAccount):
return
def _fake_remove_transaction_raise_fault(tx_hash: bytes, wallet: AbstractAccount):
raise Fault(Errors.DISABLED_FEATURE_CODE, Errors.DISABLED_FEATURE_MESSAGE)
async def _fake_load_wallet_succeeds(wallet_name: str, password: str) -> Wallet:
return MockWallet()
def _fake_coin_state_dto(wallet) -> Dict[str, Any]:
results = {"cleared_coins": 50,
"settled_coins": 2000,
"unmatured": 100}
return results
def _fake_create_transaction_succeeded(file_id, message_bytes, child_wallet, password,
require_confirmed) -> Tuple[Any, set]:
# Todo - test _create_transaction separately
tx = Transaction.from_hex(rawtx)
frozen_utxos = set()
return tx, frozen_utxos
async def _fake_broadcast_tx(rawtx: str, tx_hash: bytes, account: AbstractAccount) -> str:
return "6797415e3b4a9fbb61b209302374853bdefeb4567ad0ed76ade055e94b9b66a2"
def _fake_spawn(fn, *args):
return '<throwaway _future>'
class FakeFuture:
def __init__(self, result) -> None:
self._result = result
def result(self) -> Any:
return self._result
class MockAccount(AbstractAccount):
def __init__(self, wallet=None):
self._id = 1
self._wallet = wallet
def maybe_set_transaction_state(self, tx_hash: bytes, flags: TxFlags,
ignore_mask: Optional[TxFlags]=None) -> bool:
return True
def dumps(self):
return None
def get_transaction_outputs_with_key_data(self, exclude_frozen: bool=True, mature: bool=True,
confirmed_only: Optional[bool]=None, keyinstance_ids: Optional[List[int]]=None) \
-> List[TransactionOutputSpendableRow]:
return SPENDABLE_UTXOS
def make_unsigned_transaction(self, utxos=None, outputs=None):
return Transaction.from_hex(rawtx), TransactionContext()
def sign_transaction(self, tx: Transaction, password: str,
context: Optional[TransactionContext]=None) \
-> Optional[FakeFuture]:
return FakeFuture(Transaction.from_hex(rawtx))
class MockWallet(Wallet):
def __init__(self):
self._accounts: Dict[int, AbstractAccount] = {1: MockAccount(self)}
def set_boolean_setting(self, setting_name: str, enabled: bool) -> None:
return
def _fake_get_account(self, account_id):
return self._accounts[account_id]
def get_id(self) -> int:
return 32323232
class MockApp:
def __init__(self):
self.txb_executor = ThreadPoolExecutor()
def _create_transaction(self):
pass
def _broadcast_transaction(self):
pass
def broadcast_file(*args):
pass
def _create_tx_helper(self):
pass
class MockConfig:
def __init__(self):
pass
def estimate_fee(self, size: TransactionSize) -> int:
return sum(size) * 1 # 1 sat/byte
def fee_per_kb(self):
return 1000 # 1 sat/bytes
class MockAsync(object):
def spawn(self, fn, *args):
return '<throwaway _future>'
class MockSession:
def __init__(self):
pass
def set_throttled(self, flag: bool):
return True
async def send_request(self, method, args):
return '6797415e3b4a9fbb61b209302374853bdefeb4567ad0ed76ade055e94b9b66a2'
async def mock_main_session():
return MockSession()
class MockNetwork:
def __init__(self):
self._main_session = mock_main_session
class MockDaemon:
def __init__(self):
self.network = MockNetwork()
self.wallets = {"wallet_file1.sqlite": "path/to/wallet"}
class MockAppState:
def __init__(self):
self.app = MockApp()
self.config = MockConfig()
self.async_ = MockAsync()
self.daemon = MockDaemon()
class MockDefaultEndpoints(ExtensionEndpoints):
# fake init for LocalRESTExtensions
def __init__(self):
self.all_wallets = ["wallet_file1.sqlite", "wallet_file2.sqlite"]
self.wallets_path = tempfile.TemporaryDirectory()
self.app_state = MockAppState()
self.logger = logging.getLogger("mock-restapi")
self.prev_transaction = ''
self.txb_executor = ThreadPoolExecutor(max_workers=1)
def select_inputs_and_outputs(self, config=None, child_wallet=None, base_fee=None,
split_count=None, desired_utxo_count=None, max_utxo_margin=200, split_value=3000,
require_confirmed=None):
return SPENDABLE_UTXOS, None, True
# monkeypatching methods of LocalRESTExtensions
def _fake_get_all_wallets(self, wallets_path):
return self.all_wallets
def _fake_get_parent_wallet(self, wallet_name):
return MockWallet()
def _fake_account_dto(self, wallet):
return {wallet._id: {"wallet_type": "StandardWallet",
"is_wallet_ready": True}}
def _fake_create_tx_helper_raise_exception(self, request) -> Tuple[Any, set]:
raise Fault(Errors.INSUFFICIENT_COINS_CODE, Errors.INSUFFICIENT_COINS_MESSAGE)
async def _fake_send_request(self, method, args):
'''fake for 'blockchain.transaction.broadcast' '''
return Transaction.from_hex(rawtx).txid()
def _fake_get_account_succeeded(wallet_name, index) -> AbstractAccount:
return MockAccount() # which in-turn patches get_transaction_outputs_with_key_data()
class TestDefaultEndpoints:
# PATHS
VERSION = "/v1"
NETWORK = "/{network}"
BASE = VERSION + NETWORK + "/dapp" # avoid conflicts with built-ins
WALLETS_TLD = BASE + "/wallets"
WALLETS_PARENT = WALLETS_TLD + "/{wallet_name}"
WALLETS_ACCOUNT = WALLETS_PARENT + "/{account_id}"
ACCOUNT_TXS = WALLETS_ACCOUNT + "/txs"
ACCOUNT_UTXOS = WALLETS_ACCOUNT + "/utxos"
@pytest.fixture
def cli(self, loop, aiohttp_client, monkeypatch):
"""mock client - see: https://docs.aiohttp.org/en/stable/client_quickstart.html"""
app = web.Application()
app.router.add_get(self.WALLETS_TLD, self.rest_server.get_all_wallets)
app.router.add_get(self.WALLETS_PARENT, self.rest_server.get_parent_wallet)
app.router.add_post(self.WALLETS_PARENT + "/load_wallet", self.rest_server.load_wallet)
app.router.add_get(self.WALLETS_ACCOUNT, self.rest_server.get_account)
app.router.add_get(self.ACCOUNT_UTXOS + "/coin_state", self.rest_server.get_coin_state)
app.router.add_get(self.ACCOUNT_UTXOS, self.rest_server.get_utxos)
app.router.add_get(self.ACCOUNT_UTXOS + "/balance", self.rest_server.get_balance)
app.router.add_delete(self.ACCOUNT_TXS, self.rest_server.remove_txs)
app.router.add_get(self.ACCOUNT_TXS + "/history", self.rest_server.get_transaction_history)
app.router.add_get(self.ACCOUNT_TXS + "/fetch", self.rest_server.fetch_transaction)
app.router.add_post(self.ACCOUNT_TXS + "/create", self.rest_server.create_tx)
app.router.add_post(self.ACCOUNT_TXS + "/create_and_broadcast",
self.rest_server.create_and_broadcast)
app.router.add_post(self.ACCOUNT_TXS + "/broadcast", self.rest_server.broadcast)
app.router.add_post(self.ACCOUNT_TXS + "/split_utxos", self.rest_server.split_utxos)
return loop.run_until_complete(aiohttp_client(app))
@pytest.fixture(autouse=True)
def init_restapi(self, monkeypatch):
"""This is injected into all test functions"""
self.rest_server = MockDefaultEndpoints()
monkeypatch.setattr(self.rest_server, '_get_parent_wallet',
self.rest_server._fake_get_parent_wallet)
monkeypatch.setattr(Wallet, 'get_account',
MockWallet._fake_get_account)
monkeypatch.setattr(self.rest_server, '_account_dto',
self.rest_server._fake_account_dto)
monkeypatch.setattr(self.rest_server, '_get_all_wallets',
self.rest_server._fake_get_all_wallets)
# Todo
# - test_ping_txb_good_response
# - test_ping_node_via_txb_good_response
async def test_get_all_wallets_good_request(self):
"""
GET http://127.0.0.1:9999/v1/{network}/wallets
Gets all wallet file paths in AppData / .electrumsv directory
"""
network = "test"
all_wallets = self.rest_server.all_wallets
mock_request = make_mocked_request("GET", f"/v1/{network}/dapp/wallets/")
expected_json = {"wallets": all_wallets}
resp = await self.rest_server.get_all_wallets(mock_request)
assert resp.text == good_response(expected_json).text
async def test_get_parent_wallet_good_response(self, cli):
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
resp = await cli.get(f"/v1/{network}/dapp/wallets/{wallet_name}")
expected_json = {'parent_wallet': "wallet_file1.sqlite",
'accounts': {'1': {'wallet_type': 'StandardWallet',
'is_wallet_ready': True}}}
assert resp.status == 200
response = await resp.read()
assert json.loads(response) == expected_json
async def test_get_account_good_response(self, cli):
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
resp = await cli.get(f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{account_id}")
# check
expected_json = {'1': {'wallet_type': 'StandardWallet',
'is_wallet_ready': True}}
assert resp.status == 200
response = await resp.read()
assert json.loads(response) == expected_json
async def test_load_wallet_good_request(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, '_load_wallet',
_fake_load_wallet_succeeds)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
password = "mypass"
resp = await cli.post(f"/v1/{network}/dapp/wallets/{wallet_name}/load_wallet",
json= { "password": password })
# check
expected_json = {
"parent_wallet": wallet_name,
"wallet_id": 32323232,
"accounts": {'1': {"wallet_type": "StandardWallet",
"is_wallet_ready": True}}}
assert resp.status == 200
response = await resp.read()
assert json.loads(response) == expected_json
async def test_get_balance_good_response(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, '_balance_dto',
_fake_balance_dto_succeeded)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
resp = await cli.get(f"/v1/{network}/dapp/wallets/{wallet_name}/{account_id}/utxos/balance")
# check
expected_json = {"confirmed_balance": 10,
"unconfirmed_balance": 20,
"unmatured_balance": 0}
assert resp.status == 200
response = await resp.read()
assert json.loads(response) == expected_json
async def test_remove_txs_specific_txid(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, 'remove_transaction',
_fake_remove_transaction)
expected_response = {
"items": [
{
'id': '0000000000000000000000000000000000000000000000000000000000000000',
'result': 200
}
]
}
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
txids = ["00" * 32]
resp = await cli.delete(f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{account_id}/txs",
data=json.dumps({"txids": txids}))
assert resp.status == 207, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_response
async def test_remove_txs_specific_txid_failed_to_delete(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, 'remove_transaction',
_fake_remove_transaction_raise_fault)
expected_response = {
"items": [
{
'id': '0000000000000000000000000000000000000000000000000000000000000000',
'result': 400,
'description': 'DisabledFeatureError: You used this endpoint in a way that '
'is not supported for safety reasons. See documentation for '
'details (https://electrumsv.readthedocs.io/ )',
}
]
}
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
txids = ["00" * 32]
resp = await cli.delete(f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{account_id}/txs",
data=json.dumps({"txids": txids}))
assert resp.status == 207, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_response
async def test_remove_txs_bad_request(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, 'remove_transaction',
_fake_remove_transaction_raise_fault)
expected_response = \
{'code': 40000, 'message': "Required body variable: 'txids' was not provided."}
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
# txids = ["00" * 32]
resp = await cli.delete(f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{account_id}/txs")
assert resp.status == 400, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_response
async def test_get_transaction_history_good_response(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, '_history_dto',
_fake_history_dto_succeeded)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
resp = await cli.get(f"/v1/{network}/dapp/wallets/{wallet_name}/{account_id}/txs/history")
# check
expected_json = {
"history": [
{
"txid": "d4e226dde5c652782679a44bfad7021fb85df6ba8d32b1b17b8dc043e85d7103",
"height": 1,
"tx_flags": 2097152,
"value": 5000000000
},
{
"txid": "6a25882b47b3f2e97c09ee9f3131831df4b2ec1b54cc45fe3899bb4a3b5e2b29",
"height": 0,
"tx_flags": 1048576,
"value": -104
},
{
"txid": "611baae09b4db5894bbb4f13f35ae3ef492f34b388905a31a0ef82898cd3e6f6",
"height": None,
"tx_flags": 8388608,
"value": -5999999718
}
]
}
assert resp.status == 200, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_json
async def test_get_coin_state_good_response(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, '_coin_state_dto',
_fake_coin_state_dto)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
resp = await cli.get(f"/v1/{network}/dapp/wallets/{wallet_name}/{account_id}/"
f"utxos/coin_state")
# check
expected_json = {"cleared_coins": 50,
"settled_coins": 2000,
"unmatured": 100}
assert resp.status == 200, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_json
async def test_get_utxos_good_response(self, monkeypatch, cli):
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
index = "1"
resp = await cli.get(f"/v1/{network}/dapp/wallets/{wallet_name}/{index}/utxos")
# check
expected_json = {"utxos": self.rest_server._utxo_dto(SPENDABLE_UTXOS)}
assert resp.status == 200
response = await resp.read()
assert json.loads(response) == expected_json
async def test_create_tx_good_response(self, monkeypatch, cli):
class MockEventLoop:
async def run_in_executor(self, *args):
tx = Transaction.from_hex(rawtx)
frozen_utxos = None
return tx, frozen_utxos
def get_debug(self):
return
def _fake_get_event_loop():
return MockEventLoop()
monkeypatch.setattr(self.rest_server, '_get_account',
_fake_get_account_succeeded)
monkeypatch.setattr(self.rest_server.app_state.app, '_create_transaction',
_fake_create_transaction_succeeded)
monkeypatch.setattr(asyncio, 'get_event_loop', _fake_get_event_loop)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
index = "1"
password = "mypass"
resp = await cli.request(path=f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{index}/txs/create",
method='post',
json={"outputs": [P2PKH_OUTPUT],
"password": password})
# check
expected_json = {"txid": Transaction.from_hex(rawtx).txid(),
"rawtx": rawtx}
assert resp.status == 200, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_json
async def test_create_tx_insufficient_coins(self, monkeypatch, cli):
"""ensure that exception handling works even if no tx was successfully created"""
class MockEventLoop:
def get_debug(self):
return
def _fake_get_event_loop():
return MockEventLoop()
monkeypatch.setattr(self.rest_server, '_get_account',
_fake_get_account_succeeded)
monkeypatch.setattr(self.rest_server, '_create_tx_helper',
self.rest_server._fake_create_tx_helper_raise_exception)
monkeypatch.setattr(asyncio, 'get_event_loop', _fake_get_event_loop)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
index = "1"
password = "mypass"
resp = await cli.request(path=f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{index}/txs/create",
method='post',
json={"outputs": [P2PKH_OUTPUT],
"password": password})
# check
expected_json = {'code': 40006, 'message': 'You have insufficient coins for this transaction'}
assert resp.status == 400, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_json
async def test_create_and_broadcast_good_response(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, '_get_account',
_fake_get_account_succeeded)
monkeypatch.setattr(self.rest_server.app_state.app, '_create_transaction',
_fake_create_transaction_succeeded)
monkeypatch.setattr(self.rest_server, '_broadcast_transaction',
_fake_broadcast_tx)
monkeypatch.setattr(self.rest_server.app_state.async_, 'spawn',
_fake_spawn)
monkeypatch.setattr(self.rest_server.app_state.async_, 'spawn',
_fake_spawn)
monkeypatch.setattr(self.rest_server, 'send_request',
self.rest_server._fake_send_request)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
index = "1"
password = "mypass"
resp = await cli.request(path=f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{index}/txs/create_and_broadcast",
method='post',
json={"outputs": [P2PKH_OUTPUT],
"password": password})
# check
expected_json = {'txid': Transaction.from_hex(rawtx).txid()}
assert resp.status == 200, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_json
async def test_broadcast_good_response(self, monkeypatch, cli):
monkeypatch.setattr(self.rest_server, '_get_account',
_fake_get_account_succeeded)
monkeypatch.setattr(self.rest_server.app_state.app, '_create_transaction',
_fake_create_transaction_succeeded)
monkeypatch.setattr(self.rest_server, '_broadcast_transaction',
_fake_broadcast_tx)
monkeypatch.setattr(self.rest_server, 'send_request',
self.rest_server._fake_send_request)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
index = "1"
resp = await cli.request(path=f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{index}/txs/broadcast",
method='post',
json={"rawtx": rawtx})
# check
tx = Transaction.from_hex(rawtx)
expected_json = {"txid": tx.txid()}
assert resp.status == 200, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_json
@pytest.mark.parametrize("spendable_utxos", [SPENDABLE_UTXOS[0]])
async def test_split_utxos_good_response(self, monkeypatch, cli, spendable_utxos):
monkeypatch.setattr(self.rest_server, '_get_account',
_fake_get_account_succeeded)
# mock request
network = "test"
wallet_name = "wallet_file1.sqlite"
account_id = "1"
password = "mypass"
resp = await cli.request(path=f"/v1/{network}/dapp/wallets/{wallet_name}/"
f"{account_id}/txs/split_utxos",
method='post',
json={"split_count": 10,
"desired_utxo_count": 100,
"split_value": 3000,
"password": password})
# check
tx = Transaction.from_hex(rawtx)
expected_json = {"txid": tx.txid()}
assert resp.status == 200, await resp.read()
response = await resp.read()
assert json.loads(response) == expected_json
|
the-stack_0_9108 | #!/usr/bin/python3
import argparse
import multiprocessing as mp
import sys
import os
import webserver
import firmware as f
from rpi_get_serial import rpi_get_serial
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Kuzzle IoT - multi sensor demo', prog="kuzzle-iot-demo-multi-device")
parser.add_argument('--pihost',
default='localhost',
help='The host pi to witch the pigpio will connect if used in remote')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
cmd_args = parser.parse_args(sys.argv[1:])
with mp.Manager() as manager:
config_update_event = manager.Event()
config_path = os.path.abspath('config.yaml')
admin_server = mp.Process(target=webserver.start_admin_server, name="admin-server",
args=(config_update_event, {'uid': rpi_get_serial()}, config_path))
admin_server.start()
while 1:
firmware = mp.Process(target=f.startup, name='firmware',
args=({"cmd_line": cmd_args, 'update_evt': config_update_event},))
firmware.start()
firmware.join()
|
the-stack_0_9110 | # NAME : Anagram Counting
# URL : https://open.kattis.com/problems/anagramcounting
# =============================================================================
# Calculate the total as n! / (n<sub>1</sub>!n<sub>2</sub>!...n<sub>k</sub>!)
# where the denominator is the product of the factorials of the number of
# occurrences of each distinct character in the input sequence.
# =============================================================================
import math
import sys
def dbg(str):
print(str, file=sys.stderr)
def main():
for line in sys.stdin:
line = line.strip()
n = math.factorial(len(line))
occurrences = {}
for c in line:
curr = occurrences.get(c, 0)
occurrences[c] = curr + 1
product = 1
for key in occurrences:
product *= math.factorial(occurrences[key])
result = n // product
# dbg(f"LINE:{line}; N:{n}; PROD:{product}; RES:{result}")
print(result)
if __name__ == "__main__":
main()
|
the-stack_0_9113 | def gen(font, text, out="text.png", vert=2, fw=0, spacing=0, color="black", bg="transparent", width=256, height=256, center=False, wwrap=False, no_crop=False):
from . import cli
args = [font, text, "--out", out, "--vert", vert, "--fw", fw, "--spacing", spacing, "--color", color, "--bg", bg, "--width", width, "--height", height]
if center:
args.append("--center")
if wwrap:
args.append("--wwrap")
if no_crop:
args.append("--no-crop")
return cli.gen(args, standalone_mode=False)
|
the-stack_0_9116 | import asyncio
import websockets
import json
import aio_pika
import ast
import os
import logging
logger = logging.getLogger('websockets')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class wsServer(object):
def __init__(self, portNum=8765):
self.portNum = portNum
self.connected = {}
self.STATE = {"value": 0}
self.USERS_MAPPING = []
self.start_server = websockets.serve(self.srvHandler, '',
portNum, compression=None)
self.lock = asyncio.Lock()
self.RABBIT_HOST = os.getenv('RABBIT_HOST', "127.0.0.1")
self.queue_receive = "recv_cmds_queue"
self.queue_reply_cmds = "reply_cmds_queue"
asyncio.ensure_future(self.start_rabbit_consumer())
asyncio.get_event_loop().run_until_complete(self.start_server)
asyncio.get_event_loop().run_forever()
async def register(self, hostname, websocket):
self.USERS_MAPPING.append([hostname, websocket])
async def unregister(self, websocket):
# Lock list to prevent race condition
async with self.lock:
userMappingListCopy = self.USERS_MAPPING
for inx, val in enumerate(userMappingListCopy):
if val[1] == websocket:
self.USERS_MAPPING.pop(inx)
async def processClientHostname(self, recvData):
msgRecv = json.loads(recvData)
hostname = msgRecv["regstrMchine"].replace("\n", "")
return hostname.strip()
async def getWsObjectByHostname(self, hostname):
for item in self.USERS_MAPPING:
if item[0] == hostname:
return item[1]
async def sendCmdsToClient(self, hostname, cmdsToExecute):
websocket = await self.getWsObjectByHostname(hostname)
print(cmdsToExecute)
strOfCmds = f'{{"remExecCmds": ["{cmdsToExecute}"]}}'
await websocket.send(strOfCmds)
async def registerClient(self, websocket):
msgRecv = await websocket.recv() # Get hostname sent from client
clntHostname = await self.processClientHostname(msgRecv)
await self.register(clntHostname, websocket)
async def process_rabbit_message(self, message: aio_pika.IncomingMessage):
async with message.process():
messageStr = str(message.body.decode())
messageStr = messageStr.replace("'",'"')
messageJson = ast.literal_eval(messageStr)
for k,v in messageJson.items():
await self.sendCmdsToClient(k,v)
async def send_rabbit_message(self, sendingMsg, routing_key):
connection = await aio_pika.connect_robust(
f"amqp://guest:guest@{self.RABBIT_HOST}/")
async with connection:
routing_key = self.queue_receive
channel = await connection.channel()
# Declaring queue
queue = await channel.declare_queue(self.queue_reply_cmds,
auto_delete=True)
await channel.default_exchange.publish(
aio_pika.Message(body=sendingMsg.encode(), expiration=30),
routing_key=self.queue_reply_cmds)
async def start_rabbit_consumer(self):
connection = await aio_pika.connect_robust(
f"amqp://guest:guest@{self.RABBIT_HOST}/")
# Creating channel
channel = await connection.channel()
# Maximum message count which will be
# processing at the same time.
await channel.set_qos(prefetch_count=100)
# Declaring queue
queue = await channel.declare_queue(self.queue_receive,
auto_delete=True)
# Send message to func
await queue.consume(self.process_rabbit_message)
async def srvHandler(self, websocket, path):
await self.registerClient(websocket)
try:
async for message in websocket:
await self.send_rabbit_message(message,
self.queue_reply_cmds)
finally:
await self.unregister(websocket)
def main():
wss = wsServer()
wss()
if __name__ == "__main__":
main() |
the-stack_0_9119 | """Support for Roku."""
import asyncio
from datetime import timedelta
import logging
from typing import Any, Dict
from rokuecp import Roku, RokuConnectionError, RokuError
from rokuecp.models import Device
import voluptuous as vol
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import ATTR_NAME, CONF_HOST
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util.dt import utcnow
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
DOMAIN,
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list, [vol.Schema({vol.Required(CONF_HOST): cv.string})]
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = [MEDIA_PLAYER_DOMAIN, REMOTE_DOMAIN]
SCAN_INTERVAL = timedelta(seconds=15)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: Dict) -> bool:
"""Set up the Roku integration."""
hass.data.setdefault(DOMAIN, {})
if DOMAIN in config:
for entry_config in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry_config,
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up Roku from a config entry."""
coordinator = RokuDataUpdateCoordinator(hass, host=entry.data[CONF_HOST])
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
def roku_exception_handler(func):
"""Decorate Roku calls to handle Roku exceptions."""
async def handler(self, *args, **kwargs):
try:
await func(self, *args, **kwargs)
except RokuConnectionError as error:
if self.available:
_LOGGER.error("Error communicating with API: %s", error)
except RokuError as error:
if self.available:
_LOGGER.error("Invalid response from API: %s", error)
return handler
class RokuDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Roku data."""
def __init__(
self, hass: HomeAssistantType, *, host: str,
):
"""Initialize global Roku data updater."""
self.roku = Roku(host=host, session=async_get_clientsession(hass))
self.full_update_interval = timedelta(minutes=15)
self.last_full_update = None
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self) -> Device:
"""Fetch data from Roku."""
full_update = self.last_full_update is None or utcnow() >= (
self.last_full_update + self.full_update_interval
)
try:
data = await self.roku.update(full_update=full_update)
if full_update:
self.last_full_update = utcnow()
return data
except RokuError as error:
raise UpdateFailed(f"Invalid response from API: {error}")
class RokuEntity(Entity):
"""Defines a base Roku entity."""
def __init__(
self, *, device_id: str, name: str, coordinator: RokuDataUpdateCoordinator
) -> None:
"""Initialize the Roku entity."""
self._device_id = device_id
self._name = name
self.coordinator = coordinator
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def should_poll(self) -> bool:
"""Return the polling requirement of the entity."""
return False
async def async_added_to_hass(self) -> None:
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self) -> None:
"""Update an Roku entity."""
await self.coordinator.async_request_refresh()
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this Roku device."""
if self._device_id is None:
return None
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)},
ATTR_NAME: self.name,
ATTR_MANUFACTURER: self.coordinator.data.info.brand,
ATTR_MODEL: self.coordinator.data.info.model_name,
ATTR_SOFTWARE_VERSION: self.coordinator.data.info.version,
}
|
the-stack_0_9121 | # Import packages
import os
import cv2
import sys
import numpy as np
from timeit import default_timer
from threading import Thread
from datetime import datetime
import uuid
import random
import dlr
from dlr.counter.phone_home import PhoneHome
from stream_uploader import init_gg_stream_manager, send_to_gg_stream_manager
from message_preparer import create_ipc_client, send_message
DEVICE_NAME = os.environ["DEVICE_NAME"]
IOT_CRED_ENDPOINT = os.environ["IOT_CRED_ENDPOINT"]
IOT_CERT_PATH = os.environ["IOT_CERT_PATH"]
IOT_KEY_PATH = os.environ["IOT_KEY_PATH"]
IOT_CA_PATH = os.environ["IOT_CA_PATH"]
KVS_ROLE_ALIAS = os.environ["KVS_ROLE_ALIAS"]
MODEL_PATH = os.environ["POSE_MODEL_PATH"]
INPUT_RESOLUTION = os.environ["INPUT_RESOLUTION"] ## DEFAULT TO 1280x720
S3_BUCKET_NAME = os.environ["STREAM_MANAGER_S3_BUCKET_NAME"]
s3_prefix = "pose-estimator-demo/processed-video-frames/nano/"
if INPUT_RESOLUTION == "":
INPUT_RESOLUTION = "1280x720"
resW, resH = INPUT_RESOLUTION.split('x')
imW, imH = int(resW), int(resH)
# flag for debugging
debug = False
if not os.path.exists('/tmp/poseEstimator-output'):
os.makedirs('/tmp/poseEstimator-output')
PhoneHome.disable_feature()
print('Loading model...', flush=True)
try:
model = dlr.DLRModel(MODEL_PATH, 'gpu', use_default_dlr=True)
except Exception as e:
print(e, flush=True)
height = 257
width = 257
# set stride to 32 based on model size
output_stride = 32
floating_model = True
input_mean = 127.5
input_std = 127.5
output_frame_rate = 8
kp_list = [
"nose",
"leftEye",
"rightEye",
"leftEar",
"rightEar",
"leftShoulder",
"rightShoulder",
"leftElbow",
"rightElbow",
"leftWrist",
"rightWrist",
"leftHip",
"rightHip",
"leftKnee",
"rightKnee",
"leftAnkle",
"rightAnkle"
]
pose_list = [
"none",
"left_hand_raised",
"right_hand_raised",
"crouching",
]
locations = [
"1f-corridor-1",
"1f-corridor-2",
"1f-frontgate-1",
]
def src_gstreamer_pipeline(
capture_width=1280,
capture_height=720,
inference_width=640,
inference_height=480,
framerate=24,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
inference_width,
inference_height,
)
)
# def dst_gstreamer_pipeline(
# output_width=640,
# output_height=480,
# framerate=24,
# ):
# return (
# 'appsrc ! videoconvert ! '
# 'video/x-raw,format=I420,width=%d,height=%d,framerate=%d/1 ! '
# 'nvv4l2h264enc ! h264parse ! '
# 'video/x-h264,stream-format=avc,alignment=au,width=%d,height=%d,framerate=%d/1,profile=baseline ! '
# f'kvssink stream-name={DEVICE_NAME} storage-size=512 iot-certificate="iot-certificate,endpoint={IOT_CRED_ENDPOINT},cert-path={IOT_CERT_PATH},key-path={IOT_KEY_PATH},ca-path={IOT_CA_PATH},role-aliases={KVS_ROLE_ALIAS}" aws-region=ap-northeast-1'
# % (
# output_width,
# output_height,
# framerate,
# output_width,
# output_height,
# framerate,
# )
# )
def dst_gstreamer_pipeline(
output_width=1280,
output_height=720,
framerate=24,
):
return (
'appsrc ! videoconvert ! '
'video/x-raw,format=I420,width=%d,height=%d,framerate=%d/1 ! '
'x264enc bframes=0 key-int-max=45 bitrate=1000 ! h264parse ! '
'video/x-h264,stream-format=avc,alignment=au,width=%d,height=%d,framerate=%d/1,profile=baseline ! '
f'kvssink stream-name={DEVICE_NAME} storage-size=512 iot-certificate="iot-certificate,endpoint={IOT_CRED_ENDPOINT},cert-path={IOT_CERT_PATH},key-path={IOT_KEY_PATH},ca-path={IOT_CA_PATH},role-aliases={KVS_ROLE_ALIAS}" aws-region=ap-northeast-1'
% (
output_width,
output_height,
framerate,
output_width,
output_height,
framerate,
)
)
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self, resolution=(640, 480), framerate=30):
# Initialize the PiCamera and the camera image stream
# breakpoint()
# src_pipeline = src_gstreamer_pipeline(
# resolution[0], resolution[1], flip_method=2)
# print(src_pipeline, flush=True)
# self.stream = cv2.VideoCapture(src_pipeline, cv2.CAP_GSTREAMER)
self.stream = cv2.VideoCapture(0)
if self.stream.isOpened() == True:
print("Camera initiated.", flush=True)
else:
print("Error open camera", flush=True)
ret = self.stream.set(cv2.CAP_PROP_FOURCC,
cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3, resolution[0])
ret = self.stream.set(4, resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
def mod(a, b):
"""find a % b"""
floored = np.floor_divide(a, b)
return np.subtract(a, np.multiply(floored, b))
def argmax2d(inputs):
"""return y,x coordinates from heatmap"""
# v1 is 9x9x17 heatmap
v1 = inputs[0]
height = v1.shape[0]
width = v1.shape[1]
depth = v1.shape[2]
reshaped = np.reshape(v1, [height * width, depth])
coords = np.argmax(reshaped, axis=0)
yCoords = np.round(np.expand_dims(np.divide(coords, width), 1))
xCoords = np.expand_dims(mod(coords, width), 1)
return np.concatenate([yCoords, xCoords], 1)
def draw_kps(show_img,kps, ratio=None):
for i in range(5,kps.shape[0]):
if kps[i,2]:
if isinstance(ratio, tuple):
cv2.circle(show_img,(int(round(kps[i,1]*ratio[1])),int(round(kps[i,0]*ratio[0]))),2,(0,255,255),round(int(1*ratio[1])))
continue
cv2.circle(show_img,(kps[i,1],kps[i,0]),2,(0,255,255),-1)
return show_img
def get_offset_point(y, x, offsets, keypoint, num_key_points):
"""get offset vector from coordinate"""
y_off = offsets[y, x, keypoint]
x_off = offsets[y, x, keypoint+num_key_points]
return np.array([y_off, x_off])
def get_offsets(offsets_input, coords, num_key_points=17):
"""get offset vectors from all coordinates"""
offsets = offsets_input[0]
offset_vectors = np.array([]).reshape(-1, 2)
for i in range(len(coords)):
heatmap_y = int(coords[i][0])
heatmap_x = int(coords[i][1])
# make sure indices aren't out of range
if heatmap_y > 8:
heatmap_y = heatmap_y - 1
if heatmap_x > 8:
heatmap_x = heatmap_x - 1
offset_vectors = np.vstack((offset_vectors, get_offset_point(
heatmap_y, heatmap_x, offsets, i, num_key_points)))
return offset_vectors
def draw_lines(keypoints, image):
"""connect important body part keypoints with lines"""
#color = (255, 0, 0)
color = (0, 255, 0)
thickness = 2
# refernce for keypoint indexing: https://www.tensorflow.org/lite/models/pose_estimation/overview
body_map = [[5, 6], [5, 7], [7, 9], [5, 11], [6, 8], [8, 10],
[6, 12], [11, 12], [11, 13], [13, 15], [12, 14], [14, 16]]
for map_pair in body_map:
#print(f'Map pair {map_pair}')
start_pos = (int(keypoints[map_pair[0]][1]),
int(keypoints[map_pair[0]][0]))
end_pos = (int(keypoints[map_pair[1]][1]),
int(keypoints[map_pair[1]][0]))
image = cv2.line(image, start_pos, end_pos, color, thickness)
return image
# print(input_details)
# print('--------------')
# print(output_details)
def main():
try:
videostream = VideoStream(resolution=(imW, imH), framerate=30).start()
vidOut = cv2.VideoWriter(
dst_gstreamer_pipeline(640, 480, output_frame_rate), cv2.CAP_GSTREAMER, 0, output_frame_rate, (640, 480), True)
vidOut_status = vidOut.isOpened()
print(vidOut_status)
if vidOut_status != True:
print("Cannot open Gstreamer pipeline for writing...", flush=True)
try:
print('Initializing stream manager client...', flush=True)
stream_mgr_client = init_gg_stream_manager()
print('Completed stream manager initiation', flush=True)
except:
print('Error initializing stream manager client...', sys.exc_info()[0], flush=True)
sys.exit(0)
mqtt_client = create_ipc_client()
current_timestamp = 0
while True:
# Start timer (for calculating frame rate)
start_time = default_timer()
# t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
producer_timestamp = int(datetime.now().timestamp())
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# rotated_frame = cv2.rotate(frame_rgb, cv2.ROTATE_180)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
frame_resized = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
infer_start_time = default_timer()
heatmaps, offsets, fwd_displacement, bwd_displacement = model.run({'sub_2': input_data})
# print("Inference finished after " + str(default_timer() - infer_start_time), flush=True)
#OLD
# get y,x positions from heatmap
coords = argmax2d(heatmaps)
# get offets from postions
offset_vectors = get_offsets(offsets, coords)
# use stide to get coordinates in image coordinates
keypoint_positions = coords * output_stride + offset_vectors
# print(keypoint_positions)
pose_res = {}
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(keypoint_positions)):
# Center coordinates
x = int(keypoint_positions[i][1])
y = int(keypoint_positions[i][0])
center_coordinates = (x, y)
radius = 2
color = (0, 255, 0)
thickness = 2
cv2.circle(frame_resized, center_coordinates,
radius, color, thickness)
pose_res[kp_list[i]] = str(x) + "," + str(y)
if debug:
cv2.putText(frame_resized, str(
i), (x-4, y-4), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1) # Draw label text
frame_resized = draw_lines(keypoint_positions, frame_resized)
output_img = cv2.resize(frame_resized, (640, 480))
vidOut.write(output_img)
if producer_timestamp > current_timestamp:
# save image with time stamp to directory
img_filename = 'nano-pose-output-' + str(producer_timestamp) + '.jpg'
img_folder = '/tmp/poseEstimator-output'
img_path = img_folder + '/' + img_filename
status = cv2.imwrite(img_path, output_img)
s3key = s3_prefix + img_filename
try:
event_msg = {
"msg_id": str(uuid.uuid4()),
"location": locations[random.randint(0,2)],
"timestamp": producer_timestamp,
"msg_type": "pose_event",
"s3uri": "s3://" + S3_BUCKET_NAME + "/" + s3key,
"person_count": int(random.randint(0,6)),
"action": pose_list[random.randint(0,3)],
"pose": pose_res
}
send_message(mqtt_client, event_msg)
except Exception as e:
print('Cannot send message to IoT Core', flush=True)
print(e, flush=True)
print("Frame written after " +
str(default_timer() - start_time), flush=True)
send_to_gg_stream_manager(stream_mgr_client, img_path, s3key)
current_timestamp = producer_timestamp
# Debug only
# cv2.imshow('Hello', frame_resized)
# cv2.waitKey(100)
except KeyboardInterrupt:
# Clean up
cv2.destroyAllWindows()
videostream.stop()
print('Stopped video stream.')
if __name__ == '__main__':
try:
main()
except:
cv2.destroyAllWindows()
|
the-stack_0_9124 | ## LSDMap_SwathPlotting.py
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## These functions are tools to deal with plotting swaths
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## SMM
## 20/02/2018
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
from __future__ import absolute_import, division, print_function
import numpy as np
import os
from . import cubehelix
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib import colors
import LSDPlottingTools.LSDMap_GDALIO as LSDMap_IO
import LSDPlottingTools.LSDMap_BasicPlotting as LSDMap_BP
import LSDPlottingTools.LSDMap_PointTools as LSDMap_PD
from LSDMapFigure.PlottingRaster import MapFigure
from LSDMapFigure.PlottingRaster import BaseRaster
from LSDMapFigure import PlottingHelpers as Helper
def PlotSwath(swath_csv_name, FigFileName = 'Image.png', size_format = "geomorphology", fig_format = "png", dpi = 500, aspect_ratio = 2):
"""
This plots a swath profile
Args:
swath_csv_name (str): the name of the csv file (with path!)
Author: SMM
Date 20/02/2018
"""
print("STARTING swath plot.")
# Set up fonts for plots
label_size = 12
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = label_size
# make a figure,
if size_format == "geomorphology":
fig = plt.figure(1, facecolor='white',figsize=(6.25,3.5))
fig_size_inches = 6.25
l_pad = -40
elif size_format == "big":
fig = plt.figure(1, facecolor='white',figsize=(16,9))
fig_size_inches = 16
l_pad = -50
else:
fig = plt.figure(1, facecolor='white',figsize=(4.92126,3.5))
fig_size_inches = 4.92126
l_pad = -35
# Note all the below parameters are overwritten by the figure sizer routine
gs = plt.GridSpec(100,100,bottom=0.15,left=0.1,right=1.0,top=1.0)
ax = fig.add_subplot(gs[25:100,10:95])
print("Getting data from the file: "+swath_csv_name)
thisPointData = LSDMap_PD.LSDMap_PointData(swath_csv_name)
distance = thisPointData.QueryData('Distance').values
mean_val = thisPointData.QueryData('Mean').values
min_val = thisPointData.QueryData('Min').values
max_val = thisPointData.QueryData('Max').values
# Get the minimum and maximum distances
X_axis_min = 0
X_axis_max = distance[-1]
n_target_tics = 5
xlocs,new_x_labels = LSDMap_BP.TickConverter(X_axis_min,X_axis_max,n_target_tics)
ax.fill_between(distance, min_val, max_val, facecolor='orange', alpha = 0.5, interpolate=True)
ax.plot(distance, mean_val,"b", linewidth = 1)
ax.plot(distance, min_val,"k",distance,max_val,"k",linewidth = 1)
ax.spines['top'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(1)
ax.spines['bottom'].set_linewidth(1)
ax.set_ylabel("Elevation (m)")
ax.set_xlabel("Distance along swath (km)")
ax.set_xticks(xlocs)
ax.set_xticklabels(new_x_labels,rotation=60)
# This gets all the ticks, and pads them away from the axis so that the corners don't overlap
ax.tick_params(axis='both', width=1, pad = 2)
for tick in ax.xaxis.get_major_ticks():
tick.set_pad(2)
# Lets try to size the figure
cbar_L = "None"
[fig_size_inches,map_axes,cbar_axes] = Helper.MapFigureSizer(fig_size_inches,aspect_ratio, cbar_loc = cbar_L, title = "None")
fig.set_size_inches(fig_size_inches[0], fig_size_inches[1])
ax.set_position(map_axes)
FigFormat = fig_format
print("The figure format is: " + FigFormat)
if FigFormat == 'show':
plt.show()
elif FigFormat == 'return':
return fig
else:
plt.savefig(FigFileName,format=FigFormat,dpi=dpi)
fig.clf()
|
the-stack_0_9125 | import logging
from typing import Dict, List
from common_utils import labels
from controller.invoker.invoker_task_base import TaskBaseInvoker
from controller.label_model import label_runner
from controller.utils import utils
from id_definition.error_codes import CTLResponseCode
from proto import backend_pb2
class TaskLabelingInvoker(TaskBaseInvoker):
def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp:
return utils.make_general_response(CTLResponseCode.CTR_OK, "")
@classmethod
def subtask_weights(cls) -> List[float]:
return [1.0]
@classmethod
def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str],
request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str,
previous_subtask_id: str, user_labels: labels.UserLabels) -> backend_pb2.GeneralResp:
labeling_request = request.req_create_task.labeling
logging.info(f"labeling_request: {labeling_request}")
keywords = user_labels.get_main_names(class_ids=list(labeling_request.in_class_ids))
labeler_accounts = list(labeling_request.labeler_accounts)
media_location = assets_config["assetskvlocation"]
label_runner.start_label_task(
repo_root=repo_root,
working_dir=subtask_workdir,
media_location=media_location,
task_id=subtask_id,
project_name=labeling_request.project_name,
dataset_id=labeling_request.dataset_id,
keywords=keywords,
collaborators=labeler_accounts,
expert_instruction=labeling_request.expert_instruction_url,
export_annotation=labeling_request.export_annotation,
)
return utils.make_general_response(CTLResponseCode.CTR_OK, "")
|
the-stack_0_9126 | import logging
import os
import cv2
import numpy as np
from config import GlobalConfig as GlobalConfig
from utils.image_modification import get_grayscaled_image
from utils.other import get_n_unique_rows
cfg = GlobalConfig.get_config()
logger = logging.getLogger(__name__)
def get_edge_candidate_clusters_from_mask(image, mask, n_mask, ksize, output_directory):
from refiner.clustering.dbscan import dbscan_with_masked_image
from refiner.image_processing.draw import draw_masks_on_image
if ksize % 2 == 0:
raise ValueError("Kernel size must be odd")
img_masked = draw_masks_on_image(image, [(mask * 255).astype(np.uint8)])
if cfg.visualization_dict['widened_contour']:
cv2.imwrite(os.path.join(output_directory, "mask_{}_around_edges.jpg".format(str(n_mask).zfill(2))), img_masked)
image[mask == 0] = 0
image = get_grayscaled_image(image)
clustered_edges = dbscan_with_masked_image(image, eps=cfg.clustering_eps, min_samples=cfg.clustering_min_sample)
if len(clustered_edges.values()) > 50:
logger.debug("First run not successful (found {} edges)".format(len(clustered_edges.values())))
clustered_edges = dbscan_with_masked_image(image, eps=cfg.clustering_eps * 2,
min_samples=cfg.clustering_min_sample)
clustered_edges = filter_edges(image, clustered_edges)
return clustered_edges
def compute_widened_contour_line_around_mask(mask, ksize=37):
outer_mask = cv2.GaussianBlur(mask, (ksize, ksize), 0) # enlarge mask
inner_mask = cv2.GaussianBlur(~mask, (ksize, ksize), 0) # enlarge inverse mask
new_mask = np.ones_like(mask)
new_mask[outer_mask == 0] = 0
new_mask[inner_mask == 0] = 0
return new_mask
def filter_edges(masked_image, edges):
# Compute mask size
mask = get_grayscaled_image(masked_image) / 255
mask_size = int(np.sum(mask))
remove_keys = []
for key, val in edges.items():
# Compute edge size
edge_size = get_n_unique_rows(val)
if edge_size / mask_size < 0.01: # remove small ones
remove_keys.append(key)
else: # remove "centered ones" since we are looking for lines
median = np.median(val, axis=0)
dists = np.linalg.norm(val - median, axis=1)
var = np.var(dists)
threshold = (mask.shape[0] + mask.shape[1]) / 25
if var < threshold:
logger.debug(
"Removed edges due to small variance in distribution {} < {}".format(round(var, 2),
round(threshold, 2)))
remove_keys.append(key)
for key in remove_keys:
del edges[key]
return edges
|
the-stack_0_9128 | """COMMAND : .cname"""
import asyncio
import time
from telethon.tl import functions
from telethon.errors import FloodWaitError
from uniborg.util import admin_cmd
DEL_TIME_OUT = 60
@borg.on(admin_cmd("cname")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
while True:
DMY = time.strftime("%d.%m.%y")
HM = time.strftime("%H:%M")
name = f"{HM}🚶♂️AnonymousD3061🚶♂️{DMY}"
logger.info(name)
try:
await borg(functions.account.UpdateProfileRequest( # pylint:disable=E0602
last_name = name
))
except FloodWaitError as ex:
logger.warning(str(e))
await asyncio.sleep(ex.seconds)
# else:
# logger.info(r.stringify())
# await borg.send_message( # pylint:disable=E0602
# Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
# "Changed Profile Picture"
# )
await asyncio.sleep(DEL_TIME_OUT)
|
the-stack_0_9129 | # Rule for simple expansion of template files. This performs a simple
# search over the template file for the keys in substitutions,
# and replaces them with the corresponding values.
#
# Typical usage:
# load("/tools/build_rules/template_rule", "expand_header_template")
# template_rule(
# name = "ExpandMyTemplate",
# src = "my.template",
# out = "my.txt",
# substitutions = {
# "$VAR1": "foo",
# "$VAR2": "bar",
# }
# )
#
# Args:
# name: The name of the rule.
# template: The template file to expand
# out: The destination of the expanded file
# substitutions: A dictionary mapping strings to their substitutions
def template_rule_impl(ctx):
ctx.actions.expand_template(
template = ctx.file.src,
output = ctx.outputs.out,
substitutions = ctx.attr.substitutions,
)
template_rule = rule(
attrs = {
"src": attr.label(
mandatory = True,
allow_single_file = True,
),
"substitutions": attr.string_dict(mandatory = True),
"out": attr.output(mandatory = True),
},
# output_to_genfiles is required for header files.
output_to_genfiles = True,
implementation = template_rule_impl,
) |
the-stack_0_9131 | """
This code is adapted from the source code used in the paper
'Style Change Detection Using BERT (2020)'
Title: Style-Change-Detection-Using-BERT
Authors: Aarish Iyer and Soroush Vosoughi
Date: Jul 18, 2020
Availability: https://github.com/aarish407/Style-Change-Detection-Using-BERT
"""
import random
import re
import pickle
import time
from tqdm import tqdm
import numpy as np
import os
from transformers import BertTokenizer, BertModel
import torch
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ALPHABET = "([A-Za-z])"
PREF = "(Mr|St|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|Mt)[.]"
SUFF = "(Inc|Ltd|Jr|Sr|Co)"
STARTERS = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
ACRONYMS = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
WEBSITES = "[.](com|net|org|io|gov|me|edu)"
DIGITS = "([0-9])"
def paragraph_to_sentences(text):
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(PREF, "\\1<prd>", text)
text = re.sub(WEBSITES, "<prd>\\1", text)
if "Ph.D" in text:
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + ALPHABET + "[.] ", " \\1<prd> ", text)
text = re.sub(ACRONYMS + " " + STARTERS, "\\1<stop> \\2", text)
text = re.sub(ALPHABET + "[.]" + ALPHABET + "[.]" + ALPHABET + "[.]", "\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(ALPHABET + "[.]" + ALPHABET + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + SUFF + "[.] " + STARTERS, " \\1<stop> \\2", text)
text = re.sub(" " + SUFF + "[.]", " \\1<prd>", text)
text = re.sub(" " + ALPHABET + "[.]", " \\1<prd>", text)
text = re.sub(DIGITS + "[.]" + DIGITS, "\\1<prd>\\2", text)
if "e.g." in text:
text = text.replace("e.g.", "e<prd>g<prd>")
if "..." in text:
text = text.replace("...", "<prd><prd><prd>")
if "i.e." in text:
text = text.replace("i.e.", "i<prd>e<prd>")
if "”" in text:
text = text.replace(".”", "”.")
if "\"" in text:
text = text.replace(".\"", "\".")
if "!" in text:
text = text.replace("!\"", "\"!")
if "?" in text:
text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
def embed_sentence(sentence, tokenizer, model):
# Tokenize input
sentence = tokenizer.tokenize("[CLS] " + sentence + " [SEP]")
if len(sentence) > 512:
sentence = sentence[:512]
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(sentence)
# In our case we only have one sentence, i.e. one segment id
segment_ids = [0] * len(indexed_tokens)
# Convert inputs to PyTorch tensors
token_tensor = torch.tensor([indexed_tokens]).to(device)
segment_tensor = torch.tensor([segment_ids]).to(device)
with torch.no_grad():
# Output state of last 4 layers
output = model(token_tensor, segment_tensor, output_hidden_states=True)["hidden_states"][-4:]
token_embeddings = torch.stack(output, dim=0)
token_embeddings = torch.squeeze(token_embeddings, dim=1)
token_embeddings = torch.sum(token_embeddings, dim=0)
sentence_embedding_sum = torch.sum(token_embeddings, dim=0)
return sentence_embedding_sum
def generate_embeddings(documents):
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = BertModel.from_pretrained('bert-base-cased').to(device)
model.eval()
embeddings_per_document = []
embeddings_per_paragraph = []
with tqdm(documents, unit="document", desc=f"Generating embeddings") as pbar:
for doc in pbar:
doc_embedding = torch.zeros(768)
par_embeddings = []
sentence_count = 0
for par in doc:
par_embedding = torch.zeros(768)
sentences = paragraph_to_sentences(par)
for sent in sentences:
sentence_count += 1
sent_embedding = embed_sentence(sent, tokenizer, model)
par_embedding.add_(sent_embedding)
doc_embedding.add_(par_embedding)
par_embeddings.append(par_embedding)
embeddings_per_document.append(doc_embedding / sentence_count)
embeddings_per_paragraph.append(par_embeddings)
# Convert lists to numpy arrays
embeddings_per_document = np.stack(embeddings_per_document)
for i in range(len(embeddings_per_paragraph)):
embeddings_per_paragraph[i] = np.stack(embeddings_per_paragraph[i])
return embeddings_per_document, embeddings_per_paragraph
def main():
from utilities import load_documents
# Load documents
train_docs, train_doc_ids = load_documents('train')
val_docs, val_doc_ids = load_documents('val')
# NB! Generating embeddings takes a long time
train_doc_emb, train_par_emb = generate_embeddings(train_docs)
val_doc_emb, val_par_emb = generate_embeddings(val_docs)
# Save results
timestring = time.strftime("%Y%m%d-%H%M")
if not os.path.exists('./features'):
os.makedirs('./features')
with open('./features/' + timestring + '_doc_emb_train.pickle', 'wb') as handle:
pickle.dump(train_doc_emb, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('./features/' + timestring + '_par_emb_train.pickle', 'wb') as handle:
pickle.dump(train_par_emb, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('./features/' + timestring + '_doc_emb_val.pickle', 'wb') as handle:
pickle.dump(val_doc_emb, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('./features/' + timestring + '_par_emb_val.pickle', 'wb') as handle:
pickle.dump(val_par_emb, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
|
the-stack_0_9132 | import os
import unittest
import torchtext
from seq2seq.evaluator import Predictor
from seq2seq.dataset import SourceField, TargetField
from seq2seq.models import Seq2seq, EncoderRNN, DecoderRNN
class TestPredictor(unittest.TestCase):
@classmethod
def setUpClass(self):
test_path = os.path.dirname(os.path.realpath(__file__))
src = SourceField()
trg = TargetField()
dataset = torchtext.data.TabularDataset(
path=os.path.join(test_path, 'data/eng-fra.txt'), format='tsv',
fields=[('src', src), ('trg', trg)],
)
src.build_vocab(dataset)
trg.build_vocab(dataset)
encoder = EncoderRNN(len(src.vocab), 10, 10, rnn_cell='lstm')
decoder = DecoderRNN(len(trg.vocab), 10, 10, trg.sos_id, trg.eos_id, rnn_cell='lstm')
seq2seq = Seq2seq(encoder, decoder)
self.predictor = Predictor(seq2seq, src.vocab, trg.vocab)
def test_predict(self):
src_seq = ["I", "am", "fat"]
tgt_seq = self.predictor.predict(src_seq)
for tok in tgt_seq:
self.assertTrue(tok in self.predictor.tgt_vocab.stoi)
|
the-stack_0_9133 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class UserAggregateQueryPredicate(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
UserAggregateQueryPredicate - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'type': 'str',
'dimension': 'str',
'operator': 'str',
'value': 'str',
'range': 'NumericRange'
}
self.attribute_map = {
'type': 'type',
'dimension': 'dimension',
'operator': 'operator',
'value': 'value',
'range': 'range'
}
self._type = None
self._dimension = None
self._operator = None
self._value = None
self._range = None
@property
def type(self):
"""
Gets the type of this UserAggregateQueryPredicate.
Optional type, can usually be inferred
:return: The type of this UserAggregateQueryPredicate.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this UserAggregateQueryPredicate.
Optional type, can usually be inferred
:param type: The type of this UserAggregateQueryPredicate.
:type: str
"""
allowed_values = ["dimension", "property", "metric"]
if type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for type -> " + type)
self._type = "outdated_sdk_version"
else:
self._type = type
@property
def dimension(self):
"""
Gets the dimension of this UserAggregateQueryPredicate.
Left hand side for dimension predicates
:return: The dimension of this UserAggregateQueryPredicate.
:rtype: str
"""
return self._dimension
@dimension.setter
def dimension(self, dimension):
"""
Sets the dimension of this UserAggregateQueryPredicate.
Left hand side for dimension predicates
:param dimension: The dimension of this UserAggregateQueryPredicate.
:type: str
"""
allowed_values = ["userId"]
if dimension.lower() not in map(str.lower, allowed_values):
# print("Invalid value for dimension -> " + dimension)
self._dimension = "outdated_sdk_version"
else:
self._dimension = dimension
@property
def operator(self):
"""
Gets the operator of this UserAggregateQueryPredicate.
Optional operator, default is matches
:return: The operator of this UserAggregateQueryPredicate.
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""
Sets the operator of this UserAggregateQueryPredicate.
Optional operator, default is matches
:param operator: The operator of this UserAggregateQueryPredicate.
:type: str
"""
allowed_values = ["matches", "exists", "notExists"]
if operator.lower() not in map(str.lower, allowed_values):
# print("Invalid value for operator -> " + operator)
self._operator = "outdated_sdk_version"
else:
self._operator = operator
@property
def value(self):
"""
Gets the value of this UserAggregateQueryPredicate.
Right hand side for dimension predicates
:return: The value of this UserAggregateQueryPredicate.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this UserAggregateQueryPredicate.
Right hand side for dimension predicates
:param value: The value of this UserAggregateQueryPredicate.
:type: str
"""
self._value = value
@property
def range(self):
"""
Gets the range of this UserAggregateQueryPredicate.
Right hand side for dimension predicates
:return: The range of this UserAggregateQueryPredicate.
:rtype: NumericRange
"""
return self._range
@range.setter
def range(self, range):
"""
Sets the range of this UserAggregateQueryPredicate.
Right hand side for dimension predicates
:param range: The range of this UserAggregateQueryPredicate.
:type: NumericRange
"""
self._range = range
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_9135 | import warnings
from typing import Callable, Any, Optional, List
import torch
from torch import Tensor
from torch import nn
from .._internally_replaced_utils import load_state_dict_from_url
from ..ops.misc import ConvNormActivation
from ..utils import _log_api_usage_once
from ._utils import _make_divisible
__all__ = ["MobileNetV2", "mobilenet_v2"]
model_urls = {
"mobilenet_v2": "https://download.pytorch.org/models/mobilenet_v2-b0353104.pth",
}
# necessary for backwards compatibility
class _DeprecatedConvBNAct(ConvNormActivation):
def __init__(self, *args, **kwargs):
warnings.warn(
"The ConvBNReLU/ConvBNActivation classes are deprecated and will be removed in future versions. "
"Use torchvision.ops.misc.ConvNormActivation instead.",
FutureWarning,
)
if kwargs.get("norm_layer", None) is None:
kwargs["norm_layer"] = nn.BatchNorm2d
if kwargs.get("activation_layer", None) is None:
kwargs["activation_layer"] = nn.ReLU6
super().__init__(*args, **kwargs)
ConvBNReLU = _DeprecatedConvBNAct
ConvBNActivation = _DeprecatedConvBNAct
class InvertedResidual(nn.Module):
def __init__(
self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers: List[nn.Module] = []
if expand_ratio != 1:
# pw
layers.append(
ConvNormActivation(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6)
)
layers.extend(
[
# dw
ConvNormActivation(
hidden_dim,
hidden_dim,
stride=stride,
groups=hidden_dim,
norm_layer=norm_layer,
activation_layer=nn.ReLU6,
),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
]
)
self.conv = nn.Sequential(*layers)
self.out_channels = oup
self._is_cn = stride > 1
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(
self,
num_classes: int = 1000,
width_mult: float = 1.0,
inverted_residual_setting: Optional[List[List[int]]] = None,
round_nearest: int = 8,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
dropout: float = 0.2,
) -> None:
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
dropout (float): The droupout probability
"""
super().__init__()
_log_api_usage_once(self)
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError(
f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}"
)
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features: List[nn.Module] = [
ConvNormActivation(3, input_channel, stride=2, norm_layer=norm_layer, activation_layer=nn.ReLU6)
]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(
ConvNormActivation(
input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6
)
)
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1
x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def mobilenet_v2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV2:
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["mobilenet_v2"], progress=progress)
model.load_state_dict(state_dict)
return model
|
the-stack_0_9137 | import json
from pathlib import Path
import random
class CocoFilter():
""" Filters the COCO dataset
"""
def _process_info(self):
if 'info' in self.coco:
self.info = self.coco['info']
else:
self.info = []
def _process_licenses(self):
if 'licenses' in self.coco:
self.licenses = self.coco['licenses']
else:
self.licenses = []
def _process_categories(self):
self.categories = dict()
self.super_categories = dict()
self.category_set = set()
for category in self.coco['categories']:
cat_id = category['id']
super_category = category['supercategory']
# Add category to categories dict
if cat_id not in self.categories:
self.categories[cat_id] = category
self.category_set.add(category['name'])
else:
print(f'ERROR: Skipping duplicate category id: {category}')
# Add category id to the super_categories dict
if super_category not in self.super_categories:
self.super_categories[super_category] = {cat_id}
else:
self.super_categories[super_category] |= {cat_id} # e.g. {1, 2, 3} |= {4} => {1, 2, 3, 4}
def _process_images(self):
self.images = dict()
for image in self.coco['images']:
image_id = image['id']
if image_id not in self.images:
self.images[image_id] = image
else:
print(f'ERROR: Skipping duplicate image id: {image}')
def _process_segmentations(self):
self.segmentations = dict()
for segmentation in self.coco['annotations']:
image_id = segmentation['image_id']
if image_id not in self.segmentations:
self.segmentations[image_id] = []
self.segmentations[image_id].append(segmentation)
def _filter_categories(self):
""" Find category ids matching args
Create mapping from original category id to new category id
Create new collection of categories
"""
print(self.filter_categories)
missing_categories = set(self.filter_categories) - self.category_set
if len(missing_categories) > 0:
print(f'Did not find categories: {missing_categories}')
should_continue = input('Continue? (y/n) ').lower()
if should_continue != 'y' and should_continue != 'yes':
print('Quitting early.')
quit()
self.new_category_map = dict()
new_id = 1
for key, item in self.categories.items():
if item['name'] in self.filter_categories:
self.new_category_map[key] = new_id
new_id += 1
self.new_categories = []
for original_cat_id, new_id in self.new_category_map.items():
new_category = dict(self.categories[original_cat_id])
new_category['id'] = new_id
self.new_categories.append(new_category)
def _filter_annotations(self):
""" Create new collection of annotations matching category ids
Keep track of image ids matching annotations
"""
self.new_segmentations = []
self.new_image_ids = set()
for image_id, segmentation_list in self.segmentations.items():
for segmentation in segmentation_list:
original_seg_cat = segmentation['category_id']
if original_seg_cat in self.new_category_map.keys():
new_segmentation = dict(segmentation)
new_segmentation['category_id'] = self.new_category_map[original_seg_cat]
self.new_segmentations.append(new_segmentation)
self.new_image_ids.add(image_id)
def _filter_images(self):
""" Create new collection of images
"""
self.new_images = []
for image_id in self.new_image_ids:
self.new_images.append(self.images[image_id])
def main(self, args):
# Open json
self.input_json_path = Path(args.input_json)
self.output_json_path = Path(args.output_json)
self.val_json_path = Path(args.val_json)
self.filter_categories = args.categories
# Verify input path exists
if not self.input_json_path.exists():
print('Input json path not found.')
print('Quitting early.')
quit()
# Verify output path does not already exist
if self.output_json_path.exists():
should_continue = input('Output path already exists. Overwrite? (y/n) ').lower()
if should_continue != 'y' and should_continue != 'yes':
print('Quitting early.')
quit()
# Load the json
print('Loading json file...')
with open(self.input_json_path) as json_file:
self.coco = json.load(json_file)
# Process the json
print('Processing input json...')
self._process_info()
self._process_licenses()
self._process_categories()
self._process_images()
self._process_segmentations()
# Filter to specific categories
print('Filtering...')
self._filter_categories()
self._filter_annotations()
self._filter_images()
if args.val_json:
with open("val_names.json") as json_file:
self.val_names = json.load(json_file)
val_images = [im for im in self.new_images if (im["path"].split("/")[-1] in self.val_names)]
val_ids = [im["id"] for im in val_images]
val_annotations = [seg for seg in self.new_segmentations if seg["image_id"] in val_ids]
val_json = {
'info': self.info,
'licenses': self.licenses,
'images': val_images,
'annotations': val_annotations,
'categories': self.new_categories
}
print('Saving new val json file...')
with open(self.val_json_path, 'w+') as output_file:
json.dump(val_json, output_file)
train_images = [im for im in self.new_images if (im["path"].split("/")[-1] not in self.val_names)]
train_ids = [im["id"] for im in train_images]
train_annotations = [seg for seg in self.new_segmentations if seg["image_id"] in train_ids]
train_json = {
'info': self.info,
'licenses': self.licenses,
'images': train_images,
'annotations': train_annotations,
'categories': self.new_categories
}
else:
# Build new JSON
train_json = {
'info': self.info,
'licenses': self.licenses,
'images': self.new_images,
'annotations': self.new_segmentations,
'categories': self.new_categories
}
# imnames = [im["path"].split("/")[-1] for im in self.new_images]
# random.shuffle(imnames)
# val_names = imnames[:30]
# with open("val_names.json", 'w+') as output_file:
# json.dump(val_names, output_file)
# exit()
# Write the JSON to a file
print('Saving new json file...')
with open(self.output_json_path, 'w+') as output_file:
json.dump(train_json, output_file)
print('Filtered json saved.')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Filter COCO JSON: "
"Filters a COCO Instances JSON file to only include specified categories. "
"This includes images, and annotations. Does not modify 'info' or 'licenses'.")
parser.add_argument("-i", "--input_json", dest="input_json",
help="path to a json file in coco format")
parser.add_argument("-o", "--output_json", dest="output_json",
help="path to save the output json")
parser.add_argument("-v", "--val_json", dest="val_json", default="",
help="path to save the val output json")
parser.add_argument("-c", "--categories", nargs='+', dest="categories",
help="List of category names separated by spaces, e.g. -c person dog bicycle")
args = parser.parse_args()
cf = CocoFilter()
cf.main(args)
|
the-stack_0_9139 | '''
Created by yong.huang on 2016.11.04
'''
from hifive.api.base import RestApi
class HFBaseFavoriteRequest(RestApi):
def __init__(self,domain=None,port=80):
domain = domain or 'hifive-gateway-test.hifiveai.com';
RestApi.__init__(self,domain, port)
self.clientId = None
self.page = None
self.pageSize = None
def getapiname(self):
return 'BaseFavorite'
|
the-stack_0_9140 | """
Copyright Snap Inc. 2021. This sample code is made available by Snap Inc. for informational purposes only.
No license, whether implied or otherwise, is granted in or to such code (including any rights to copy, modify,
publish, distribute and/or commercialize such code), unless you have entered into a separate agreement for such rights.
Such code is provided as-is, without warranty of any kind, express or implied, including any warranties of merchantability,
title, fitness for a particular purpose, non-infringement, or that such code is free of defects, errors or viruses.
In no event will Snap Inc. be liable for any damages or losses of any kind arising from the sample code or your use thereof.
"""
import torch.utils.data
from .video_dataset import VideoDataset, HDF5Dataset, SomethingSomething
class VideoDatasetDataLoader():
def __init__(self, opt):
self.opt = opt
# dataset = VideoDataset(opt)
# dataset = HDF5Dataset(opt)
dataset = SomethingSomething(opt)
self.dataset = dataset
if opt.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(
self.dataset)
else:
self.train_sampler = None
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=(self.train_sampler is None),
num_workers=opt.workers,
pin_memory=True,
sampler=self.train_sampler,
drop_last=True)
def load_data(self):
return self.dataloader
def __len__(self):
return len(self.dataset)
def CreateDataLoader(opt):
data_loader = VideoDatasetDataLoader(opt)
return data_loader
|
the-stack_0_9141 | from app import db
from models import Company, Colleagues, Admins, Boxes, Ideas
from flask import flash, redirect, url_for
from helper import instatiate_admin, get_extension, remove_avatar_file, remove_logo_file
import os
import shutil # to copy files
import random
import lorem
from date import today, add_day, str_to_date
from sqlalchemy.exc import SQLAlchemyError
import s3
def del_company(id):
company = Company.query.get(id)
# delete company logo:
remove_logo_file(company, company.logo)
# delete all avatars:
colleagues = Colleagues.query.filter_by(company_id = id).all()
for colleague in colleagues:
remove_avatar_file(colleague)
# delete company:
try:
db.session.delete(Company.query.get(id))
db.session.commit()
flash(f"{company.name} company successfully deleted from the database.", "inform")
except:
db.session.rollback()
flash(f"Any error occured. Please try again.")
def create_sample_company():
# instatiate Company:
company = Company (
name = "Eric BLABLA KGB"
)
company.set_founder_password("aaa")
company.set_joining_password("bbb")
# update database and query the ID of the new company:
try:
db.session.add(company)
db.session.commit()
except:
db.session.rollback()
flash("Any error occured when created the sample company registration. Please try again.", "error")
return redirect(url_for("register_company"))
registered_company = Company.query.filter_by(name = "Eric BLABLA KGB").first()
# instatiate Jhon Do:
colleague = Colleagues(
user_name = "jhon_do",
email = "[email protected]",
first_name = "Jhon",
last_name = "Do",
position = "Founder",
confirmed = 1
)
colleague.set_password("aaa")
data = {
"company_id": registered_company.id,
"colleague": colleague,
"sample_avatar": "john_do.jpg"
}
create_sample_colleague(data)
# set the founder as Admin with full privilegs:
registered_colleague = Colleagues.query.filter_by(email = "[email protected]").first()
# instatiate Admins:
admin = instatiate_admin(True)
admin.colleague_id = registered_colleague.id
try:
db.session.add(admin)
db.session.commit()
except:
db.session.rollback()
flash("Any error occured when created sample admin registration. Please try again.", "error")
return redirect(url_for("register_company"))
# copy logo:
location = "static/sample_logo/blabla.png"
destination = f"static/logo/{registered_colleague.company_id}.png"
shutil.copy2(location, destination)
print(s3.upload(location, os.environ["S3_BUCKET"], f"logo/{registered_company.id}.png"))
# update database:
company.logo = "png"
try:
db.session.commit()
print("Company logo copied.")
except:
db.session.rollback()
print("An error occured when copied logo.")
# instatiate Jane Do:
colleague = Colleagues(
user_name = "jane_do",
email = "[email protected]",
first_name = "Jane",
last_name = "Do",
position = "Co-Founder",
confirmed = 1
)
colleague.set_password("aaa")
data = {
"company_id": registered_company.id,
"colleague": colleague,
"sample_avatar": "jane_do.png"
}
create_sample_colleague(data)
# instatiate Do Do:
colleague = Colleagues(
user_name = "dodo",
email = "[email protected]",
first_name = "Do",
last_name = "Do",
position = "dodo",
confirmed = 1
)
colleague.set_password("aaa")
data = {
"company_id": registered_company.id,
"colleague": colleague,
"sample_avatar": "dodo.svg"
}
create_sample_colleague(data)
# instatiate x more colleagues:
x_more = 20
usernames = open("fake_dataset/username.txt").readlines()
emails = open("fake_dataset/fake_email.txt").readlines()
first_names = open("fake_dataset/first_name.txt").readlines()
last_names = open("fake_dataset/last_name.txt").readlines()
positions = open("fake_dataset/position.txt").readlines()
for x in range(x_more):
colleague = Colleagues(
user_name = get_random_item(usernames).strip(),
email = get_random_item(emails),
first_name = get_random_item(first_names),
last_name = get_random_item(last_names).lower().title(),
position = get_random_item(positions),
confirmed = 1
)
colleague.set_password("aaa")
data = {
"company_id": registered_company.id,
"colleague": colleague,
"sample_avatar": None
}
create_sample_colleague(data)
# create sample Idea Box:
admin = Admins.query.filter(Admins.colleague_id == registered_colleague.id).first()
for x in range(2):
new_box = Boxes(
name = lorem.sentence().replace(".", ""),
description = lorem.paragraph(),
close_at = str_to_date(add_day(str_to_date(today()), x ).strftime('%Y-%m-%d')),
admin_id = admin.id
)
try:
print("Trying to add new Idea Box to the database...")
db.session.add(new_box)
db.session.commit()
except SQLAlchemyError as e:
error = str(e.__dict__['orig'])
print("**************************************")
print(error)
print("New Idea Box not created!")
print("new_box.name: ", new_box.name)
print("new_box.description: ", new_box.description)
print("new_box.close_at: ", new_box.close_at)
print("new_box.admin_id: ", new_box.admin_id)
db.session.rollback()
# create sample Idea:
colleagues = Colleagues.query.filter(Colleagues.company_id == registered_company.id).all()
boxes = db.session.query(Boxes, Admins, Colleagues).filter(Boxes.admin_id == admin.id).all()
for x in range(7):
colleague = get_random_item(colleagues)
sign = [
"incognito",
colleague.user_name,
colleague.first_name,
colleague.fullname()
]
idea = Ideas(
idea = lorem.paragraph(),
sign = get_random_item(sign),
box_id = get_random_item(boxes).Boxes.id,
colleague_id = colleague.id
)
db.session.add(idea)
try:
db.session.commit()
except:
db.session.rollback()
print("The sample company registered successfully!")
def create_sample_colleague(data):
colleague = data["colleague"]
company_id = data["company_id"]
colleague.company_id = company_id
sample_avatar = data["sample_avatar"]
# insert new colleague to the colleague table:
try:
db.session.add(colleague)
db.session.commit()
except:
db.session.rollback()
# query the id of the new created sample_colleague:
colleague = Colleagues.query.filter_by(email = colleague.email).first()
# copy the sample avatar:
if sample_avatar:
extension = get_extension(sample_avatar)
location = f"static/sample_avatars/{sample_avatar}"
destination = f"static/avatars/{colleague.id}.{extension}"
shutil.copy2(location, destination)
# upload to AWS:
print(s3.upload(location, os.environ["S3_BUCKET"], f"avatars/{colleague.id}.{extension}"))
# update database with the copied avatar:
colleague.avatar = extension
try:
db.session.commit()
print("Avatar copied")
except:
db.session.rollback()
print("An error occured when copied avatar.")
def get_random_item(list):
return random.choice(list) |
the-stack_0_9143 | from django.db import models
from modelcluster.fields import ParentalKey
from wagtail.admin.edit_handlers import (
FieldPanel, MultiFieldPanel, PageChooserPanel)
from wagtailmenus.models import (
SectionMenu, ChildrenMenu, AbstractMainMenu,
AbstractMainMenuItem, AbstractFlatMenu, AbstractFlatMenuItem)
from .utils import TranslatedField
class CustomChildrenMenu(ChildrenMenu):
template_name = "menus/custom-overrides/children.html"
class CustomSectionMenu(SectionMenu):
sub_menu_template_name = "menus/custom-overrides/section-sub.html"
class MultilingualMenuItem(models.Model):
link_text_de = models.CharField(
verbose_name='link text (de)',
max_length=255,
blank=True,
)
link_text_fr = models.CharField(
verbose_name='link text (fr)',
max_length=255,
blank=True,
)
translated_link_text = TranslatedField(
'link_text', 'link_text_de', 'link_text_fr'
)
class Meta:
abstract = True
ordering = ('sort_order',)
@property
def menu_text(self):
return self.translated_link_text or getattr(
self.link_page, 'translated_title', None
) or self.link_page.title
panels = (
PageChooserPanel('link_page'),
FieldPanel('link_url'),
FieldPanel('url_append'),
FieldPanel('link_text'),
FieldPanel('link_text_de'),
FieldPanel('link_text_fr'),
FieldPanel('handle'),
FieldPanel('allow_subnav'),
)
class MainMenuCustomMenuItem(MultilingualMenuItem, AbstractMainMenuItem):
"""Custom MenuItem model for the default MainMenu model. The default
model is swapped out for this one using the setting:
`WAGTAILMENUS_MAIN_MENU_ITEMS_RELATED_NAME = 'custom_menu_items'
"""
menu = ParentalKey(
'wagtailmenus.MainMenu',
on_delete=models.CASCADE,
related_name="custom_menu_items"
)
class FlatMenuCustomMenuItem(MultilingualMenuItem, AbstractFlatMenuItem):
"""Custom MenuItem model for the default FlatMenu model. The default
model is swapped out for this one using the setting:
`WAGTAILMENUS_FLAT_MENU_ITEMS_RELATED_NAME = 'custom_menu_items'
"""
menu = ParentalKey(
'wagtailmenus.FlatMenu',
on_delete=models.CASCADE,
related_name="custom_menu_items"
)
class CustomMainMenu(AbstractMainMenu):
panels = AbstractMainMenu.content_panels + AbstractMainMenu.settings_panels
def get_sub_menu_template_names(self):
# The fix for #329 works should work even when the super class method
# does not recieve the 'level' argument
return super().get_sub_menu_template_names()
class CustomFlatMenu(AbstractFlatMenu):
heading_de = models.CharField(
verbose_name='heading (de)',
max_length=255,
blank=True,
)
heading_fr = models.CharField(
verbose_name='heading (fr)',
max_length=255,
blank=True,
)
translated_heading = TranslatedField(
'heading', 'heading_de', 'heading_fr'
)
content_panels = (
MultiFieldPanel(
heading="Settings",
children=(
FieldPanel('title'),
FieldPanel('site'),
FieldPanel('handle'),
)
),
MultiFieldPanel(
heading="Heading",
children=(
FieldPanel('heading'),
FieldPanel('heading_de'),
FieldPanel('heading_fr'),
),
classname='collapsible'
),
AbstractFlatMenu.content_panels[1],
)
class CustomMainMenuItem(MultilingualMenuItem, AbstractMainMenuItem):
"""Custom MenuItem model for `CustomMainMenu`. Notice the `related_name`
attribue on the field below is the same as it is on
wagtailmenus.MainMenuItem. Because of this, the
`WAGTAILMENUS_MAIN_MENU_ITEMS_RELATED_NAME` setting doesn't need to be
overridden ('menu_items' is the default value)."""
menu = ParentalKey(
'CustomMainMenu',
on_delete=models.CASCADE,
related_name="menu_items"
)
class CustomFlatMenuItem(MultilingualMenuItem, AbstractFlatMenuItem):
"""Custom MenuItem model for `CustomFlatMenu`. Notice the `related_name`
attribue on the field below is the same as it is on
wagtailmenus.FlatMenuItem. Because of this, the
`WAGTAILMENUS_FLAT_MENU_ITEMS_RELATED_NAME` setting doesn't need to be
overridden ('menu_items' is the default value)."""
menu = ParentalKey(
'CustomFlatMenu',
on_delete=models.CASCADE,
related_name="menu_items"
)
|
the-stack_0_9144 | # -*- coding: utf-8 -*-
"""
@author: WZM
@time: 2021/1/17 17:05
@function: 将原文作者的初始网络结构换成inception v2网络提取特征图
"""
import torch
import torch.nn as nn
import torchvision.models as models
from models.spp_net import SpatialPyramidPooling2d
def ConvBNReLU(in_channels, out_channels, kernel_size, stride=1, padding=0):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True),
)
def ConvBNReLUFactorization(in_channels, out_channels, kernel_sizes, paddings):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_sizes, stride=1,
padding=paddings),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
class InceptionV2ModuleA(nn.Module):
def __init__(self, in_channels, out_channels1, out_channels2reduce, out_channels2, out_channels3reduce,
out_channels3, out_channels4):
super(InceptionV2ModuleA, self).__init__()
self.branch1 = ConvBNReLU(in_channels=in_channels, out_channels=out_channels1, kernel_size=1)
self.branch2 = nn.Sequential(
ConvBNReLU(in_channels=in_channels, out_channels=out_channels2reduce, kernel_size=1),
ConvBNReLU(in_channels=out_channels2reduce, out_channels=out_channels2, kernel_size=3, padding=1),
)
self.branch3 = nn.Sequential(
ConvBNReLU(in_channels=in_channels, out_channels=out_channels3reduce, kernel_size=1),
ConvBNReLU(in_channels=out_channels3reduce, out_channels=out_channels3, kernel_size=3, padding=1),
ConvBNReLU(in_channels=out_channels3, out_channels=out_channels3, kernel_size=3, padding=1),
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
ConvBNReLU(in_channels=in_channels, out_channels=out_channels4, kernel_size=1),
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
out = torch.cat([out1, out2, out3, out4], dim=1)
return out
class InceptionV2ModuleB(nn.Module):
def __init__(self, in_channels, out_channels1, out_channels2reduce, out_channels2, out_channels3reduce,
out_channels3, out_channels4):
super(InceptionV2ModuleB, self).__init__()
self.branch1 = ConvBNReLU(in_channels=in_channels, out_channels=out_channels1, kernel_size=1)
self.branch2 = nn.Sequential(
ConvBNReLU(in_channels=in_channels, out_channels=out_channels2reduce, kernel_size=1),
ConvBNReLUFactorization(in_channels=out_channels2reduce, out_channels=out_channels2reduce,
kernel_sizes=[1, 3], paddings=[0, 1]),
ConvBNReLUFactorization(in_channels=out_channels2reduce, out_channels=out_channels2, kernel_sizes=[3, 1],
paddings=[1, 0]),
)
self.branch3 = nn.Sequential(
ConvBNReLU(in_channels=in_channels, out_channels=out_channels3reduce, kernel_size=1),
ConvBNReLUFactorization(in_channels=out_channels3reduce, out_channels=out_channels3reduce,
kernel_sizes=[1, 3], paddings=[0, 1]),
ConvBNReLUFactorization(in_channels=out_channels3reduce, out_channels=out_channels3reduce,
kernel_sizes=[3, 1], paddings=[1, 0]),
ConvBNReLUFactorization(in_channels=out_channels3reduce, out_channels=out_channels3reduce,
kernel_sizes=[1, 3], paddings=[0, 1]),
ConvBNReLUFactorization(in_channels=out_channels3reduce, out_channels=out_channels3, kernel_sizes=[3, 1],
paddings=[1, 0]),
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
ConvBNReLU(in_channels=in_channels, out_channels=out_channels4, kernel_size=1),
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
out = torch.cat([out1, out2, out3, out4], dim=1)
return out
class InceptionV2ModuleC(nn.Module):
def __init__(self, in_channels, out_channels1, out_channels2reduce, out_channels2, out_channels3reduce,
out_channels3, out_channels4):
super(InceptionV2ModuleC, self).__init__()
self.branch1 = ConvBNReLU(in_channels=in_channels, out_channels=out_channels1, kernel_size=1)
self.branch2_conv1 = ConvBNReLU(in_channels=in_channels, out_channels=out_channels2reduce, kernel_size=1)
self.branch2_conv2a = ConvBNReLUFactorization(in_channels=out_channels2reduce, out_channels=out_channels2,
kernel_sizes=[1, 3], paddings=[0, 1])
self.branch2_conv2b = ConvBNReLUFactorization(in_channels=out_channels2reduce, out_channels=out_channels2,
kernel_sizes=[3, 1], paddings=[1, 0])
self.branch3_conv1 = ConvBNReLU(in_channels=in_channels, out_channels=out_channels3reduce, kernel_size=1)
self.branch3_conv2 = ConvBNReLU(in_channels=out_channels3reduce, out_channels=out_channels3, kernel_size=3,
stride=1, padding=1)
self.branch3_conv3a = ConvBNReLUFactorization(in_channels=out_channels3, out_channels=out_channels3,
kernel_sizes=[3, 1], paddings=[1, 0])
self.branch3_conv3b = ConvBNReLUFactorization(in_channels=out_channels3, out_channels=out_channels3,
kernel_sizes=[1, 3], paddings=[0, 1])
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
ConvBNReLU(in_channels=in_channels, out_channels=out_channels4, kernel_size=1),
)
def forward(self, x):
out1 = self.branch1(x)
x2 = self.branch2_conv1(x)
out2 = torch.cat([self.branch2_conv2a(x2), self.branch2_conv2b(x2)], dim=1)
x3 = self.branch3_conv2(self.branch3_conv1(x))
out3 = torch.cat([self.branch3_conv3a(x3), self.branch3_conv3b(x3)], dim=1)
out4 = self.branch4(x)
out = torch.cat([out1, out2, out3, out4], dim=1)
return out
class InceptionV3ModuleD(nn.Module):
def __init__(self, in_channels, out_channels1reduce, out_channels1, out_channels2reduce, out_channels2):
super(InceptionV3ModuleD, self).__init__()
self.branch1 = nn.Sequential(
ConvBNReLU(in_channels=in_channels, out_channels=out_channels1reduce, kernel_size=1),
ConvBNReLU(in_channels=out_channels1reduce, out_channels=out_channels1, kernel_size=3, stride=2, padding=1)
)
self.branch2 = nn.Sequential(
ConvBNReLU(in_channels=in_channels, out_channels=out_channels2reduce, kernel_size=1),
ConvBNReLU(in_channels=out_channels2reduce, out_channels=out_channels2, kernel_size=3, stride=1, padding=1),
ConvBNReLU(in_channels=out_channels2, out_channels=out_channels2, kernel_size=3, stride=2, padding=1),
)
self.branch3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out = torch.cat([out1, out2, out3], dim=1)
return out
class InceptionAux(nn.Module):
def __init__(self, in_channels, out_channels):
super(InceptionAux, self).__init__()
self.auxiliary_avgpool = nn.AvgPool2d(kernel_size=5, stride=3)
self.auxiliary_conv1 = ConvBNReLU(in_channels=in_channels, out_channels=128, kernel_size=1)
self.auxiliary_conv2 = nn.Conv2d(in_channels=128, out_channels=768, kernel_size=5, stride=1)
self.auxiliary_dropout = nn.Dropout(p=0.7)
self.auxiliary_linear1 = nn.Linear(in_features=768, out_features=out_channels)
def forward(self, x):
x = self.auxiliary_conv1(self.auxiliary_avgpool(x))
x = self.auxiliary_conv2(x)
x = x.view(x.size(0), -1)
out = self.auxiliary_linear1(self.auxiliary_dropout(x))
return out
class InceptionV2(nn.Module):
def __init__(self, num_classes=3, stage='train', num_level=3, pool_type='max_pool', use_spp=False):
super(InceptionV2, self).__init__()
self.stage = stage
self.block1 = nn.Sequential(
ConvBNReLU(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.block1_1 = nn.Sequential(
ConvBNReLU(in_channels=1, out_channels=64, kernel_size=7, stride=2, padding=3),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.block2 = nn.Sequential(
ConvBNReLU(in_channels=64, out_channels=192, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.block3 = nn.Sequential(
InceptionV2ModuleA(in_channels=192, out_channels1=64, out_channels2reduce=64, out_channels2=64,
out_channels3reduce=64, out_channels3=96, out_channels4=32),
InceptionV2ModuleA(in_channels=256, out_channels1=64, out_channels2reduce=64, out_channels2=96,
out_channels3reduce=64, out_channels3=96, out_channels4=64),
InceptionV3ModuleD(in_channels=320, out_channels1reduce=128, out_channels1=160, out_channels2reduce=64,
out_channels2=96),
)
self.block4 = nn.Sequential(
InceptionV2ModuleB(in_channels=576, out_channels1=224, out_channels2reduce=64, out_channels2=96,
out_channels3reduce=96, out_channels3=128, out_channels4=128),
InceptionV2ModuleB(in_channels=576, out_channels1=192, out_channels2reduce=96, out_channels2=128,
out_channels3reduce=96, out_channels3=128, out_channels4=128),
InceptionV2ModuleB(in_channels=576, out_channels1=160, out_channels2reduce=128, out_channels2=160,
out_channels3reduce=128, out_channels3=128, out_channels4=128),
InceptionV2ModuleB(in_channels=576, out_channels1=96, out_channels2reduce=128, out_channels2=192,
out_channels3reduce=160, out_channels3=160, out_channels4=128),
InceptionV3ModuleD(in_channels=576, out_channels1reduce=128, out_channels1=192, out_channels2reduce=192,
out_channels2=256),
)
self.block5 = nn.Sequential(
InceptionV2ModuleC(in_channels=1024, out_channels1=352, out_channels2reduce=192, out_channels2=160,
out_channels3reduce=160, out_channels3=112, out_channels4=128),
InceptionV2ModuleC(in_channels=1024, out_channels1=352, out_channels2reduce=192, out_channels2=160,
out_channels3reduce=192, out_channels3=112, out_channels4=128)
)
# self.max_pool = nn.MaxPool2d(kernel_size=4, stride=1)
# self.dropout = nn.Dropout(p=0.5)
# self.linear = nn.Linear(1024, num_classes)
self.conv1_fusion = nn.Conv2d(3072, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.relu1_fusion = nn.ReLU(inplace=True)
self.conv2_fusion = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.relu2_fusion = nn.ReLU(inplace=True)
self.downsample = nn.Sequential(
nn.Conv2d(3072, 128, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(128),
)
self.conv_fusion = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.ReLU(inplace=True),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(64 * 4 * 4, 512),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(512, 256),
nn.ReLU(inplace=True),
nn.Linear(256, num_classes),
)
self.use_spp = use_spp
if use_spp:
self.num_level = num_level
self.pool_type = pool_type
self.num_grid = self._cal_num_grids(num_level)
self.spp_layer = SpatialPyramidPooling2d(num_level)
self.classifier_spp = nn.Sequential(
nn.Dropout(),
nn.Linear(64 * self.num_grid, 512),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(512, 256),
nn.ReLU(inplace=True),
nn.Linear(256, num_classes),
)
def _cal_num_grids(self, level):
count = 0
for i in range(level):
count += (i + 1) * (i + 1)
return count
def forward_tmp(self, x, channels=3):
if channels == 3:
x = self.block1(x)
else:
x = self.block1_1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
out = self.block5(x)
# x = self.max_pool(x)
# x = self.dropout(x)
# x = x.view(x.size(0), -1)
# out = self.linear(x)
return out
def forward(self, x1, x2, x3, IsUseRGB):
"""
:param x1: [16, 3, 128, 128]
:param x2: [16, 1, 128, 128]
:param x3: [16, 1, 128, 128]
:param IsUseRGB:
:return:
"""
# x1---image ; x2-----dem ; x3 ----slope
# if IsUseRGB == 1:
# x1 = self.features1(x1)
# else:
# x1 = self.features11(x1)
x1 = self.forward_tmp(x1) # [16, 1024, 4, 4]
x2 = self.forward_tmp(x2, 1)
x3 = self.forward_tmp(x3, 1)
x = torch.cat((x1, x2, x3), 1)
h = self.conv1_fusion(x)
h = self.bn1(h)
h = self.relu1_fusion(h)
h = self.conv2_fusion(h)
h = self.bn2(h)
h += self.downsample(x)
h = self.relu1_fusion(h)
h = self.conv_fusion(h)
# print(h.shape)
if not self.use_spp: # 如果use_spp为False
h = h.view(h.size(0), -1) # [16, 576]
h = self.classifier(h) # [16, 3]
else:
h = self.spp_layer(h)
h = self.classifier_spp(h)
return h
# class Inception(nn.Module):
# def __init__(self):
# super(Inception, self).__init__()
# self.inception = models.inception_v3()
# print(self.inception)
#
#
# if __name__ == '__main__':
# inc = Inception()
|
the-stack_0_9146 | """Contains the CLI."""
import sys
import json
import logging
import time
from logging import LogRecord
from typing import (
Callable,
Tuple,
NoReturn,
Optional,
List,
)
import yaml
import click
# For the profiler
import pstats
from io import StringIO
# To enable colour cross platform
import colorama
from tqdm import tqdm
from sqlfluff.cli.formatters import (
format_rules,
format_violation,
format_linting_result_header,
format_linting_stats,
colorize,
format_dialect_warning,
format_dialects,
CallbackFormatter,
)
from sqlfluff.cli.helpers import cli_table, get_package_version
# Import from sqlfluff core.
from sqlfluff.core import (
Linter,
FluffConfig,
SQLLintError,
SQLTemplaterError,
SQLFluffUserError,
dialect_selector,
dialect_readout,
TimingSummary,
)
from sqlfluff.core.config import progress_bar_configuration
from sqlfluff.core.enums import FormatType, Color
from sqlfluff.core.linter import ParsedString
class RedWarningsFilter(logging.Filter):
"""This filter makes all warnings or above red."""
def filter(self, record: logging.LogRecord) -> bool:
"""Filter any warnings (or above) to turn them red."""
if record.levelno >= logging.WARNING:
record.msg = f"{colorize(record.msg, Color.red)} "
return True
class StreamHandlerTqdm(logging.StreamHandler):
"""Modified StreamHandler which takes care of writing within `tqdm` context.
It uses `tqdm` write which takes care of conflicting prints with progressbar.
Without it, there were left artifacts in DEBUG mode (not sure about another ones,
but probably would happen somewhere).
"""
def emit(self, record: LogRecord) -> None:
"""Behaves like original one except uses `tqdm` to write."""
try:
msg = self.format(record)
tqdm.write(msg, file=self.stream)
self.flush()
except Exception: # pragma: no cover
self.handleError(record)
def set_logging_level(
verbosity: int, logger: Optional[logging.Logger] = None, stderr_output: bool = False
) -> None:
"""Set up logging for the CLI.
We either set up global logging based on the verbosity
or, if `logger` is specified, we only limit to a single
sqlfluff logger. Verbosity is applied in the same way.
Implementation: If `logger` is not specified, the handler
is attached to the `sqlfluff` logger. If it is specified
then it attaches the the logger in question. In addition
if `logger` is specified, then that logger will also
not propagate.
"""
fluff_logger = logging.getLogger("sqlfluff")
# Don't propagate logging
fluff_logger.propagate = False
# Enable colorama
colorama.init()
# Set up the log handler which is able to print messages without overlapping
# with progressbars.
handler = StreamHandlerTqdm(stream=sys.stderr if stderr_output else sys.stdout)
# NB: the unicode character at the beginning is to squash any badly
# tamed ANSI colour statements, and return us to normality.
handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s"))
# Set up a handler to colour warnings red.
handler.addFilter(RedWarningsFilter())
if logger:
focus_logger = logging.getLogger(f"sqlfluff.{logger}")
focus_logger.addHandler(handler)
else:
fluff_logger.addHandler(handler)
# NB: We treat the parser logger slightly differently because it's noisier.
# It's important that we set levels for all each time so
# that we don't break tests by changing the granularity
# between tests.
parser_logger = logging.getLogger("sqlfluff.parser")
if verbosity < 3:
fluff_logger.setLevel(logging.WARNING)
parser_logger.setLevel(logging.NOTSET)
elif verbosity == 3:
fluff_logger.setLevel(logging.INFO)
parser_logger.setLevel(logging.WARNING)
elif verbosity == 4:
fluff_logger.setLevel(logging.DEBUG)
parser_logger.setLevel(logging.INFO)
elif verbosity > 4:
fluff_logger.setLevel(logging.DEBUG)
parser_logger.setLevel(logging.DEBUG)
def common_options(f: Callable) -> Callable:
"""Add common options to commands via a decorator.
These are applied to all of the cli commands.
"""
f = click.version_option()(f)
f = click.option(
"-v",
"--verbose",
count=True,
help=(
"Verbosity, how detailed should the output be. This is *stackable*, so `-vv`"
" is more verbose than `-v`. For the most verbose option try `-vvvv` or `-vvvvv`."
),
)(f)
f = click.option(
"-n",
"--nocolor",
is_flag=True,
help="No color - if this is set then the output will be without ANSI color codes.",
)(f)
return f
def core_options(f: Callable) -> Callable:
"""Add core operation options to commands via a decorator.
These are applied to the main (but not all) cli commands like
`parse`, `lint` and `fix`.
"""
f = click.option(
"--dialect", default=None, help="The dialect of SQL to lint (default=ansi)"
)(f)
f = click.option(
"--templater", default=None, help="The templater to use (default=jinja)"
)(f)
f = click.option(
"--rules",
default=None,
help=(
"Narrow the search to only specific rules. For example "
"specifying `--rules L001` will only search for rule `L001` (Unnecessary "
"trailing whitespace). Multiple rules can be specified with commas e.g. "
"`--rules L001,L002` will specify only looking for violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--exclude-rules",
default=None,
help=(
"Exclude specific rules. For example "
"specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary "
"trailing whitespace) from the set of considered rules. This could either "
"be the allowlist, or the general set if there is no specific allowlist. "
"Multiple rules can be specified with commas e.g. "
"`--exclude-rules L001,L002` will exclude violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--config",
"extra_config_path",
default=None,
help=(
"Include additional config file. By default the config is generated "
"from the standard configuration files described in the documentation. This "
"argument allows you to specify an additional configuration file that overrides "
"the standard configuration files. N.B. cfg format is required."
),
)(f)
f = click.option(
"--ignore-local-config",
is_flag=True,
help=(
"Ignore config files in default search path locations. "
"This option allows the user to lint with the default config "
"or can be used in conjunction with --config to only "
"reference the custom config file."
),
)(f)
f = click.option(
"--encoding",
default="autodetect",
help=(
"Specifiy encoding to use when reading and writing files. Defaults to autodetect."
),
)(f)
f = click.option(
"--ignore",
default=None,
help=(
"Ignore particular families of errors so that they don't cause a failed "
"run. For example `--ignore parsing` would mean that any parsing errors "
"are ignored and don't influence the success or fail of a run. Multiple "
"options are possible if comma separated e.g. `--ignore parsing,templating`."
),
)(f)
f = click.option(
"--bench",
is_flag=True,
help="Set this flag to engage the benchmarking tool output.",
)(f)
f = click.option(
"--logger",
type=click.Choice(
["templater", "lexer", "parser", "linter", "rules"], case_sensitive=False
),
help="Choose to limit the logging to one of the loggers.",
)(f)
f = click.option(
"--disable-noqa",
is_flag=True,
default=None,
help="Set this flag to ignore inline noqa comments.",
)(f)
return f
def get_config(
extra_config_path: Optional[str] = None,
ignore_local_config: bool = False,
**kwargs,
) -> FluffConfig:
"""Get a config object from kwargs."""
if "dialect" in kwargs:
try:
# We're just making sure it exists at this stage - it will be fetched properly in the linter
dialect_selector(kwargs["dialect"])
except SQLFluffUserError as err:
click.echo(
colorize(
f"Error loading dialect '{kwargs['dialect']}': {str(err)}",
color=Color.red,
)
)
sys.exit(66)
except KeyError:
click.echo(
colorize(
f"Error: Unknown dialect '{kwargs['dialect']}'", color=Color.red
)
)
sys.exit(66)
# Instantiate a config object (filtering out the nulls)
overrides = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}
try:
return FluffConfig.from_root(
extra_config_path=extra_config_path,
ignore_local_config=ignore_local_config,
overrides=overrides,
)
except SQLFluffUserError as err: # pragma: no cover
click.echo(
colorize(
f"Error loading config: {str(err)}",
color=Color.red,
)
)
sys.exit(66)
def _callback_handler(cfg: FluffConfig) -> Callable:
"""Returns function which will be bound as a callback for printing passed message.
Called in `get_linter_and_formatter`.
"""
def _echo_with_tqdm_lock(message: str) -> None:
"""Makes sure that message printing (echoing) will be not in conflict with tqdm.
It may happen that progressbar conflicts with extra printing. Nothing very
serious happens then, except that there is printed (not removed) progressbar
line. The `external_write_mode` allows to disable tqdm for writing time.
"""
with tqdm.external_write_mode():
click.echo(message=message, color=cfg.get("color"))
return _echo_with_tqdm_lock
def get_linter_and_formatter(
cfg: FluffConfig, silent: bool = False
) -> Tuple[Linter, CallbackFormatter]:
"""Get a linter object given a config."""
try:
# We're just making sure it exists at this stage - it will be fetched properly in the linter
dialect_selector(cfg.get("dialect"))
except KeyError: # pragma: no cover
click.echo(f"Error: Unknown dialect '{cfg.get('dialect')}'")
sys.exit(66)
if not silent:
# Instantiate the linter and return it (with an output function)
formatter = CallbackFormatter(
callback=_callback_handler(cfg=cfg),
verbosity=cfg.get("verbose"),
output_line_length=cfg.get("output_line_length"),
)
return Linter(config=cfg, formatter=formatter), formatter
else:
# Instantiate the linter and return. NB: No formatter
# in the Linter and a black formatter otherwise.
formatter = CallbackFormatter(callback=lambda m: None, verbosity=0)
return Linter(config=cfg), formatter
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option()
def cli():
"""Sqlfluff is a modular sql linter for humans."""
@cli.command()
@common_options
def version(**kwargs) -> None:
"""Show the version of sqlfluff."""
c = get_config(**kwargs)
if c.get("verbose") > 0:
# Instantiate the linter
lnt, formatter = get_linter_and_formatter(c)
# Dispatch the detailed config from the linter.
formatter.dispatch_config(lnt)
else:
# Otherwise just output the package version.
click.echo(get_package_version(), color=c.get("color"))
@cli.command()
@common_options
def rules(**kwargs) -> None:
"""Show the current rules in use."""
c = get_config(**kwargs)
lnt, _ = get_linter_and_formatter(c)
click.echo(format_rules(lnt), color=c.get("color"))
@cli.command()
@common_options
def dialects(**kwargs) -> None:
"""Show the current dialects available."""
c = get_config(**kwargs)
click.echo(format_dialects(dialect_readout), color=c.get("color"))
@cli.command()
@common_options
@core_options
@click.option(
"-f",
"--format",
"format",
default="human",
type=click.Choice([ft.value for ft in FormatType], case_sensitive=False),
help="What format to return the lint result in (default=human).",
)
@click.option(
"--annotation-level",
default="notice",
type=click.Choice(["notice", "warning", "failure"], case_sensitive=False),
help="When format is set to github-annotation, default annotation level (default=notice).",
)
@click.option(
"--nofail",
is_flag=True,
help=(
"If set, the exit code will always be zero, regardless of violations "
"found. This is potentially useful during rollout."
),
)
@click.option(
"--disregard-sqlfluffignores",
is_flag=True,
help="Perform the operation regardless of .sqlfluffignore configurations",
)
@click.option(
"-p",
"--processes",
type=int,
default=1,
help="The number of parallel processes to run.",
)
@click.option(
"--disable_progress_bar",
is_flag=True,
help="Disables progress bars.",
)
@click.argument("paths", nargs=-1)
def lint(
paths: Tuple[str],
processes: int,
format: str,
annotation_level: str,
nofail: bool,
disregard_sqlfluffignores: bool,
logger: Optional[logging.Logger] = None,
bench: bool = False,
disable_progress_bar: Optional[bool] = False,
extra_config_path: Optional[str] = None,
ignore_local_config: bool = False,
**kwargs,
) -> NoReturn:
"""Lint SQL files via passing a list of files or using stdin.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
Linting SQL files:
sqlfluff lint path/to/file.sql
sqlfluff lint directory/of/sql/files
Linting a file via stdin (note the lone '-' character):
cat path/to/file.sql | sqlfluff lint -
echo 'select col from tbl' | sqlfluff lint -
"""
config = get_config(extra_config_path, ignore_local_config, **kwargs)
non_human_output = format != FormatType.human.value
lnt, formatter = get_linter_and_formatter(config, silent=non_human_output)
verbose = config.get("verbose")
progress_bar_configuration.disable_progress_bar = disable_progress_bar
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
# add stdin if specified via lone '-'
if ("-",) == paths:
result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin")
else:
# Output the results as we go
if verbose >= 1:
click.echo(format_linting_result_header())
try:
result = lnt.lint_paths(
paths,
ignore_non_existent_files=False,
ignore_files=not disregard_sqlfluffignores,
processes=processes,
)
except OSError:
click.echo(
colorize(
f"The path(s) '{paths}' could not be accessed. Check it/they exist(s).",
Color.red,
)
)
sys.exit(1)
# Output the final stats
if verbose >= 1:
click.echo(format_linting_stats(result, verbose=verbose))
if format == FormatType.json.value:
click.echo(json.dumps(result.as_records()))
elif format == FormatType.yaml.value:
click.echo(yaml.dump(result.as_records(), sort_keys=False))
elif format == FormatType.github_annotation.value:
github_result = []
for record in result.as_records():
filepath = record["filepath"]
for violation in record["violations"]:
# NOTE: The output format is designed for this GitHub action:
# https://github.com/yuzutech/annotations-action
# It is similar, but not identical, to the native GitHub format:
# https://docs.github.com/en/rest/reference/checks#annotations-items
github_result.append(
{
"file": filepath,
"line": violation["line_no"],
"start_column": violation["line_pos"],
"end_column": violation["line_pos"],
"title": "SQLFluff",
"message": f"{violation['code']}: {violation['description']}",
"annotation_level": annotation_level,
}
)
click.echo(json.dumps(github_result))
if bench:
click.echo("==== overall timings ====")
click.echo(cli_table([("Clock time", result.total_time)]))
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
if not nofail:
if not non_human_output:
_completion_message(config)
sys.exit(result.stats()["exit code"])
else:
sys.exit(0)
def do_fixes(lnt, result, formatter=None, **kwargs):
"""Actually do the fixes."""
click.echo("Persisting Changes...")
res = result.persist_changes(formatter=formatter, **kwargs)
if all(res.values()):
click.echo("Done. Please check your files to confirm.")
return True
# If some failed then return false
click.echo(
"Done. Some operations failed. Please check your files to confirm."
) # pragma: no cover
click.echo(
"Some errors cannot be fixed or there is another error blocking it."
) # pragma: no cover
return False # pragma: no cover
@cli.command()
@common_options
@core_options
@click.option(
"-f",
"--force",
is_flag=True,
help=(
"skip the confirmation prompt and go straight to applying "
"fixes. **Use this with caution.**"
),
)
@click.option(
"--fixed-suffix", default=None, help="An optional suffix to add to fixed files."
)
@click.option(
"-p",
"--processes",
type=int,
default=1,
help="The number of parallel processes to run.",
)
@click.option(
"--disable_progress_bar",
is_flag=True,
help="Disables progress bars.",
)
@click.argument("paths", nargs=-1)
def fix(
force: bool,
paths: Tuple[str],
processes: int,
bench: bool = False,
fixed_suffix: str = "",
logger: Optional[logging.Logger] = None,
disable_progress_bar: Optional[bool] = False,
extra_config_path: Optional[str] = None,
ignore_local_config: bool = False,
**kwargs,
) -> NoReturn:
"""Fix SQL files.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
"""
# some quick checks
fixing_stdin = ("-",) == paths
config = get_config(extra_config_path, ignore_local_config, **kwargs)
lnt, formatter = get_linter_and_formatter(config, silent=fixing_stdin)
verbose = config.get("verbose")
progress_bar_configuration.disable_progress_bar = disable_progress_bar
exit_code = 0
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=fixing_stdin)
# handle stdin case. should output formatted sql to stdout and nothing else.
if fixing_stdin:
stdin = sys.stdin.read()
result = lnt.lint_string_wrapped(stdin, fname="stdin", fix=True)
templater_error = result.num_violations(types=SQLTemplaterError) > 0
unfixable_error = result.num_violations(types=SQLLintError, fixable=False) > 0
if result.num_violations(types=SQLLintError, fixable=True) > 0:
stdout = result.paths[0].files[0].fix_string()[0]
else:
stdout = stdin
if templater_error:
click.echo(
colorize(
"Fix aborted due to unparseable template variables.",
Color.red,
),
err=True,
)
click.echo(
colorize(
"Use '--ignore templating' to attempt to fix anyway.",
Color.red,
),
err=True,
)
if unfixable_error:
click.echo(colorize("Unfixable violations detected.", Color.red), err=True)
click.echo(stdout, nl=False)
sys.exit(1 if templater_error or unfixable_error else 0)
# Lint the paths (not with the fix argument at this stage), outputting as we go.
click.echo("==== finding fixable violations ====")
try:
result = lnt.lint_paths(
paths,
fix=True,
ignore_non_existent_files=False,
processes=processes,
)
except OSError:
click.echo(
colorize(
f"The path(s) '{paths}' could not be accessed. Check it/they exist(s).",
Color.red,
),
err=True,
)
sys.exit(1)
# NB: We filter to linting violations here, because they're
# the only ones which can be potentially fixed.
if result.num_violations(types=SQLLintError, fixable=True) > 0:
click.echo("==== fixing violations ====")
click.echo(
f"{result.num_violations(types=SQLLintError, fixable=True)} fixable linting violations found"
)
if force:
click.echo(f"{colorize('FORCE MODE', Color.red)}: Attempting fixes...")
success = do_fixes(
lnt,
result,
formatter,
types=SQLLintError,
fixed_file_suffix=fixed_suffix,
)
if not success:
sys.exit(1) # pragma: no cover
else:
click.echo(
"Are you sure you wish to attempt to fix these? [Y/n] ", nl=False
)
c = click.getchar().lower()
click.echo("...")
if c in ("y", "\r", "\n"):
click.echo("Attempting fixes...")
success = do_fixes(
lnt,
result,
formatter,
types=SQLLintError,
fixed_file_suffix=fixed_suffix,
)
if not success:
sys.exit(1) # pragma: no cover
else:
_completion_message(config)
elif c == "n":
click.echo("Aborting...")
exit_code = 1
else: # pragma: no cover
click.echo("Invalid input, please enter 'Y' or 'N'")
click.echo("Aborting...")
exit_code = 1
else:
click.echo("==== no fixable linting violations found ====")
_completion_message(config)
if result.num_violations(types=SQLLintError, fixable=False) > 0:
click.echo(
f" [{result.num_violations(types=SQLLintError, fixable=False)} unfixable linting violations found]"
)
exit_code = 1
if result.num_violations(types=SQLTemplaterError) > 0:
click.echo(
f" [{result.num_violations(types=SQLTemplaterError)} templating errors found]"
)
exit_code = 1
if bench:
click.echo("==== overall timings ====")
click.echo(cli_table([("Clock time", result.total_time)]))
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
sys.exit(exit_code)
def _completion_message(config: FluffConfig) -> None:
click.echo(
f"All Finished{'' if (config.get('nocolor') or not sys.stdout.isatty()) else ' 📜 🎉'}!"
)
def quoted_presenter(dumper, data):
"""Re-presenter which always double quotes string values needing escapes."""
if "\n" in data or "\t" in data or "'" in data:
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"')
else:
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="")
@cli.command()
@common_options
@core_options
@click.argument("path", nargs=1)
@click.option(
"--recurse", default=0, help="The depth to recursively parse to (0 for unlimited)"
)
@click.option(
"-c",
"--code-only",
is_flag=True,
help="Output only the code elements of the parse tree.",
)
@click.option(
"-m",
"--include-meta",
is_flag=True,
help=(
"Include meta segments (indents, dedents and placeholders) in the output. "
"This only applies when outputting json or yaml."
),
)
@click.option(
"-f",
"--format",
default=FormatType.human.value,
type=click.Choice(
[
FormatType.human.value,
FormatType.json.value,
FormatType.yaml.value,
],
case_sensitive=False,
),
help="What format to return the parse result in.",
)
@click.option(
"--profiler", is_flag=True, help="Set this flag to engage the python profiler."
)
@click.option(
"--nofail",
is_flag=True,
help=(
"If set, the exit code will always be zero, regardless of violations "
"found. This is potentially useful during rollout."
),
)
def parse(
path: str,
code_only: bool,
include_meta: bool,
format: str,
profiler: bool,
bench: bool,
nofail: bool,
logger: Optional[logging.Logger] = None,
extra_config_path: Optional[str] = None,
ignore_local_config: bool = False,
**kwargs,
) -> NoReturn:
"""Parse SQL files and just spit out the result.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
"""
c = get_config(extra_config_path, ignore_local_config, **kwargs)
# We don't want anything else to be logged if we want json or yaml output
non_human_output = format in (FormatType.json.value, FormatType.yaml.value)
lnt, formatter = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get("verbose")
recurse = c.get("recurse")
progress_bar_configuration.disable_progress_bar = True
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
# TODO: do this better
if profiler:
# Set up the profiler if required
try:
import cProfile
except ImportError: # pragma: no cover
click.echo("The cProfiler is not available on your platform.")
sys.exit(1)
pr = cProfile.Profile()
pr.enable()
try:
t0 = time.monotonic()
# handle stdin if specified via lone '-'
if "-" == path:
parsed_strings = [
lnt.parse_string(
sys.stdin.read(),
"stdin",
recurse=recurse,
config=lnt.config,
),
]
else:
# A single path must be specified for this command
parsed_strings = list(lnt.parse_path(path, recurse=recurse))
total_time = time.monotonic() - t0
violations_count = 0
# iterative print for human readout
if format == FormatType.human.value:
violations_count = _print_out_violations_and_timing(
bench, code_only, total_time, verbose, parsed_strings
)
else:
parsed_strings_dict = [
dict(
filepath=linted_result.fname,
segments=linted_result.tree.as_record(
code_only=code_only, show_raw=True, include_meta=include_meta
)
if linted_result.tree
else None,
)
for linted_result in parsed_strings
]
if format == FormatType.yaml.value:
# For yaml dumping always dump double quoted strings if they contain tabs or newlines.
yaml.add_representer(str, quoted_presenter)
click.echo(yaml.dump(parsed_strings_dict, sort_keys=False))
elif format == FormatType.json.value:
click.echo(json.dumps(parsed_strings_dict))
except OSError: # pragma: no cover
click.echo(
colorize(
f"The path '{path}' could not be accessed. Check it exists.",
Color.red,
),
err=True,
)
sys.exit(1)
if profiler:
pr.disable()
profiler_buffer = StringIO()
ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats("cumulative")
ps.print_stats()
click.echo("==== profiler stats ====")
# Only print the first 50 lines of it
click.echo("\n".join(profiler_buffer.getvalue().split("\n")[:50]))
if violations_count > 0 and not nofail:
sys.exit(66) # pragma: no cover
else:
sys.exit(0)
def _print_out_violations_and_timing(
bench: bool,
code_only: bool,
total_time: float,
verbose: int,
parsed_strings: List[ParsedString],
) -> int:
"""Used by human formatting during the parse."""
violations_count = 0
timing = TimingSummary()
for parsed_string in parsed_strings:
timing.add(parsed_string.time_dict)
if parsed_string.tree:
click.echo(parsed_string.tree.stringify(code_only=code_only))
else:
# TODO: Make this prettier
click.echo("...Failed to Parse...") # pragma: no cover
violations_count += len(parsed_string.violations)
if parsed_string.violations:
click.echo("==== parsing violations ====") # pragma: no cover
for v in parsed_string.violations:
click.echo(format_violation(v)) # pragma: no cover
if parsed_string.violations and parsed_string.config.get("dialect") == "ansi":
click.echo(format_dialect_warning()) # pragma: no cover
if verbose >= 2:
click.echo("==== timings ====")
click.echo(cli_table(parsed_string.time_dict.items()))
if verbose >= 2 or bench:
click.echo("==== overall timings ====")
click.echo(cli_table([("Clock time", total_time)]))
timing_summary = timing.summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
return violations_count
# This "__main__" handler allows invoking SQLFluff using "python -m", which
# simplifies the use of cProfile, e.g.:
# python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql
if __name__ == "__main__":
cli.main(sys.argv[1:]) # pragma: no cover
|
the-stack_0_9147 | import re
import bibtexparser
import arrow
import pprint
from bibtexparser.bibdatabase import BibDatabase
_HOST = 'https://scholar.google.com{0}'
_SCHOLARPUBRE = r'cites=([\w-]*)'
_CITATIONPUB = '/citations?hl=en&view_op=view_citation&citation_for_view={0}'
_SCHOLARPUB = '/scholar?hl=en&oi=bibs&cites={0}'
_CITATIONPUBRE = r'citation_for_view=([\w-]*:[\w-]*)'
_BIBCITE = '/scholar?q=info:{0}:scholar.google.com/\
&output=cite&scirp={1}&hl=en'
# WARNING: CUSTOM BUG-FIX FOR THE API!! (10 June 2020, Laker) #
parser = bibtexparser.bparser.BibTexParser(common_strings=True)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class _SearchScholarIterator(object):
"""Iterator that returns Publication objects from the search page
I have removed all logging from here for simplicity. -V
"""
def __init__(self, nav, url: str):
self._url = url
self._nav = nav
self._load_url(url)
def _load_url(self, url: str):
# this is temporary until setup json file
self._soup = self._nav._get_soup(url)
self._pos = 0
self._rows = self._soup.find_all('div', class_='gs_r gs_or gs_scl')
# Iterator protocol
def __iter__(self):
return self
def __next__(self):
if self._pos < len(self._rows):
row = self._rows[self._pos]
self._pos += 1
return Publication(self._nav, row, 'scholar')
elif self._soup.find(class_='gs_ico gs_ico_nav_next'):
url = self._soup.find(
class_='gs_ico gs_ico_nav_next').parent['href']
self._load_url(url)
return self.__next__()
else:
raise StopIteration
# Pickle protocol
def __getstate__(self):
return {'url': self._url, 'pos': self._pos}
def __setstate__(self, state):
# this needs validation -V
self._load_url(state['url'])
self._pos = state['pos']
class Publication(object):
"""Returns an object for a single publication"""
def __init__(self, nav, __data, pubtype=None):
self.nav = nav
self.bib = dict()
self.source = pubtype
if self.source == 'citations':
self._citation_pub(__data)
elif self.source == 'scholar':
self._scholar_pub(__data)
self._filled = False
def _citation_pub(self, __data):
self.bib['title'] = __data.find('a', class_='gsc_a_at').text
self.id_citations = re.findall(_CITATIONPUBRE, __data.find(
'a', class_='gsc_a_at')['data-href'])[0]
citedby = __data.find(class_='gsc_a_ac')
self.bib["cites"] = "0"
if citedby and not (citedby.text.isspace() or citedby.text == ''):
self.bib["cites"] = citedby.text.strip()
year = __data.find(class_='gsc_a_h')
if (year and year.text
and not year.text.isspace()
and len(year.text) > 0):
self.bib['year'] = year.text.strip()
# # # # # WARNING: CUSTOM FIX FOR API (6/18/2020) # # # # #
authors = __data.find_all('div', class_='gs_gray')[0]
if (authors and authors.text
and not authors.text.isspace()
and len(authors.text) > 0):
self.bib['author'] = ' and '.join(
[i.strip() for i in authors.text.split(',')])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _get_authorlist(self, authorinfo):
authorlist = list()
text = authorinfo.split(' - ')[0]
for i in text.split(','):
i = i.strip()
if bool(re.search(r'\d', i)):
continue
if ("Proceedings" in i or "Conference" in i or "Journal" in i or
"(" in i or ")" in i or "[" in i or "]" in i or
"Transactions" in i):
continue
i = i.replace("…", "")
authorlist.append(i)
return authorlist
def _scholar_pub(self, __data):
databox = __data.find('div', class_='gs_ri')
title = databox.find('h3', class_='gs_rt')
cid = __data.get('data-cid')
pos = __data.get('data-rp')
self.bib['gsrank'] = str(int(pos) + 1)
if title.find('span', class_='gs_ctu'): # A citation
title.span.extract()
elif title.find('span', class_='gs_ctc'): # A book or PDF
title.span.extract()
self.bib['title'] = title.text.strip()
if title.find('a'):
self.bib['url'] = title.find('a')['href']
authorinfo = databox.find('div', class_='gs_a').text
authorinfo = authorinfo.replace(u'\xa0', u' ') # NBSP
authorinfo = authorinfo.replace(u'&', u'&') # Ampersand
self.bib["author"] = self._get_authorlist(authorinfo)
try:
venueyear = authorinfo.split(' - ')[1].split(',')
self.bib['venue'] = ''.join(venueyear[0:-1])
self.bib['year'] = venueyear[-1]
self.bib['year'] = self.bib['year'].strip()
except Exception:
self.bib['venue'], self.bib['year'] = 'NA', 'NA'
if databox.find('div', class_='gs_rs'):
self.bib['abstract'] = databox.find('div', class_='gs_rs').text
self.bib['abstract'] = self.bib['abstract'].replace(u'\u2026', u'')
self.bib['abstract'] = self.bib['abstract'].replace(u'\n', u' ')
self.bib['abstract'] = self.bib['abstract'].strip()
if self.bib['abstract'][0:8].lower() == 'abstract':
self.bib['abstract'] = self.bib['abstract'][9:].strip()
'''
lowerlinks = databox.find('div', class_='gs_fl').find_all('a')
self.bib["cites"] = "0"
for link in lowerlinks:
if (link is not None and
link.get('title') is not None and
'Cite' == link.get('title')):
self.url_scholarbib = self._get_bibtex(cid, pos)
sclib = self.nav.publib.format(id=cid)
self.url_add_sclib = sclib
if 'Cited by' in link.text:
self.bib['cites'] = re.findall(r'\d+', link.text)[0].strip()
self.citations_link = link['href']
if __data.find('div', class_='gs_ggs gs_fl'):
self.bib['eprint'] = __data.find(
'div', class_='gs_ggs gs_fl').a['href']
'''
@property
def filled(self) -> bool:
"""Indicates whether a publication has been filled
:getter: `True` if publication is filled, `False` otherwise.
:type: bool
# TODO: Example
"""
return self._filled
def fill(self):
"""Populate the Publication with information from its profile"""
if self.source == 'citations':
url = _CITATIONPUB.format(self.id_citations)
soup = self.nav._get_soup(url)
self.bib['title'] = soup.find('div', id='gsc_vcd_title').text
if soup.find('a', class_='gsc_vcd_title_link'):
self.bib['url'] = soup.find(
'a', class_='gsc_vcd_title_link')['href']
for item in soup.find_all('div', class_='gs_scl'):
key = item.find(class_='gsc_vcd_field').text.strip().lower()
val = item.find(class_='gsc_vcd_value')
if key == 'authors':
self.bib['author'] = ' and '.join(
[i.strip() for i in val.text.split(',')])
#elif key == 'journal':
# self.bib['journal'] = val.text
#elif key == 'volume':
# self.bib['volume'] = val.text
#elif key == 'issue':
# self.bib['number'] = val.text
#elif key == 'pages':
# self.bib['pages'] = val.text
#elif key == 'publisher':
# self.bib['publisher'] = val.text
elif key == 'Publication date':
patterns = ['YYYY/M',
'YYYY/MM/DD',
'YYYY',
'YYYY/M/DD',
'YYYY/M/D',
'YYYY/MM/D']
self.bib['year'] = arrow.get(val.text, patterns).year
#elif key == 'description':
# if val.text[0:8].lower() == 'abstract':
# val = val.text[9:].strip()
# abstract = val.find(class_='gsh_csp')
# if abstract is None:
# abstract = val.find(class_='gsh_small')
# self.bib['abstract'] = abstract.text
#elif key == 'total citations':
# self.bib['cites'] = re.findall(
# _SCHOLARPUBRE, val.a['href'])[0]
# number of citation per year
years = [int(y.text) for y in soup.find_all(class_='gsc_vcd_g_t')]
cites = [int(c.text) for c in soup.find_all(class_='gsc_vcd_g_al')]
self.cites_per_year = dict(zip(years, cites))
if soup.find('div', class_='gsc_vcd_title_ggi'):
self.bib['eprint'] = soup.find(
'div', class_='gsc_vcd_title_ggi').a['href']
self._filled = True
elif self.source == 'scholar':
bibtex = self.nav._get_page(self.url_scholarbib)
# WARNING: CUSTOM BUG FIX FOR THE API (10 JUNE 2020, LAKER) #
# OLD CODE DIDN'T INCLUDE ", parser". # # # # # # # # # # # #
self.bib.update(bibtexparser.loads(bibtex, parser).entries[0])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
self._filled = True
return self
@property
def citedby(self) -> _SearchScholarIterator or list:
"""Searches GScholar for other articles that cite this Publication and
returns a Publication generator.
:getter: Returns a Generator of Publications that cited the current.
:type: Iterator[:class:`Publication`]
"""
if not self.filled:
self.fill()
return _SearchScholarIterator(self.nav, self.citations_link)
@property
def bibtex(self) -> str:
"""Returns the publication as a bibtex entry
:getter: Returns a bibtex entry in text format
:type: str
"""
if not self._filled:
self.fill()
a = BibDatabase()
a.entries = [self.bib]
return bibtexparser.dumps(a)
def _get_bibtex(self, cid: str, pos: str) -> str:
bib_url = _BIBCITE.format(cid, pos)
soup = self.nav._get_soup(bib_url)
styles = soup.find_all('a', class_='gs_citi')
for link in styles:
if link.string.lower() == "bibtex":
return link.get('href')
return ''
def __str__(self):
pdict = dict(self.__dict__)
try:
pdict["filled"] = self.filled
del pdict['nav']
del pdict['_filled']
except Exception:
raise
return pprint.pformat(pdict)
def __repr__(self):
return self.__str__()
|
the-stack_0_9148 | # coding: utf-8
import json
try:
from unittest.mock import Mock
except Exception:
from mock import Mock
from critics.core import CriticApp
from critics.parsers import Review
def test_poll_store(tmpdir):
fakemodel = tmpdir.join("fakemodel.json")
app = CriticApp(ios=['app1', 'app2'], language=['ru'], persist=True, slack_webhook='http://www')
app.model_file = fakemodel.strpath
fake_fetcher = Mock(return_value=[Review(
id=u'xfkew4ytwqqddid:2e22wdad',
platform='ios',
title=u'Great app! ♡',
rating=2,
summary=u'Here be\nDragons!',
url=u'http://www',
author=u'Here comes more BS',
date='bull',
language='en',
version=None
)])
app.fetchers['ios'] = fake_fetcher
fake_notifier = Mock()
app.notifiers['slack'] = fake_notifier
app.poll_store(platform='ios')
assert fake_fetcher.call_count == 2
assert fake_notifier.call_count == 1
assert len(app.reviews['ios']) == 1
fake_fetcher.reset_mock()
fake_notifier.reset_mock()
app.poll_store(platform='ios')
assert fake_fetcher.call_count == 2
assert fake_notifier.call_count == 0
assert len(app.reviews['ios']) == 1
def test_load_model():
app = CriticApp(persist=False)
assert not app.load_model()
assert app.reviews['ios'] == set()
assert app.reviews['android'] == set()
app = CriticApp(persist=True, model='tests/fixtures/model.json')
assert app.load_model()
assert app.reviews['ios'] == {'123'}
assert app.reviews['android'] == {'xxx', 'yyyy'}
def test_save_model(tmpdir):
fakemodel = tmpdir.join("fakemodel.json")
app = CriticApp(persist=False)
assert not app.save_model()
assert not fakemodel.check()
app = CriticApp(persist=True, model=fakemodel.strpath)
assert app.save_model()
assert fakemodel.check()
assert json.load(fakemodel) == {'android': {'python_object': 'set', 'value': []},
'ios': {'python_object': 'set', 'value': []}}
app.reviews['ios'].add('678')
app.reviews['android'].add('qqq')
assert app.save_model()
assert fakemodel.check()
assert json.load(fakemodel) == {'android': {'python_object': 'set', 'value': ['qqq']},
'ios': {'python_object': 'set', 'value': ['678']}}
|
the-stack_0_9149 | '''
Kattis - textencryption
Yet another time wasty problem :(
This one is somewhat annoying since the question is rather vague on what to do when the pointer
goes back to the same location as a char that has already been used...
The key idea for encryptions is: for each new letter in our plain text P[counter], we put it at the
first unused location accessed by pt (pointer). The movement of pointer starts from i and moves by
n every iteration until it reaches the end of the plain text.
Time: O(l**2), Space: O(l**2)
'''
while 1:
n = int(input())
if n == 0:
break
P = ("".join(input().split())).upper()
C = list("_"*len(P))
l = len(P)
used = [0] * len(C)
counter = 0
for i in range(l):
pt = i
while pt < l:
if (not used[pt]):
C[pt] = P[counter]
used[pt] = 1
counter += 1
pt += n
print("".join(C)) |
the-stack_0_9150 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import extended_volumes
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova import volume
ALIAS = "os-extended-volumes"
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
authorize_attach = extensions.extension_authorizer('compute',
'v3:%s:attach' % ALIAS)
authorize_detach = extensions.extension_authorizer('compute',
'v3:%s:detach' % ALIAS)
authorize_swap = extensions.extension_authorizer('compute',
'v3:%s:swap' % ALIAS)
class ExtendedVolumesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedVolumesController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.volume_api = volume.API()
def _extend_server(self, context, server, instance):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
volume_ids = [bdm['volume_id'] for bdm in bdms if bdm['volume_id']]
key = "%s:volumes_attached" % ExtendedVolumes.alias
server[key] = [{'id': volume_id} for volume_id in volume_ids]
@extensions.expected_errors((400, 404, 409))
@wsgi.action('swap_volume_attachment')
@validation.schema(extended_volumes.swap_volume_attachment)
def swap(self, req, id, body):
context = req.environ['nova.context']
authorize_swap(context)
old_volume_id = body['swap_volume_attachment']['old_volume_id']
new_volume_id = body['swap_volume_attachment']['new_volume_id']
try:
old_volume = self.volume_api.get(context, old_volume_id)
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
found = False
try:
for bdm in bdms:
if bdm.volume_id != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume')
if not found:
msg = _("The volume was either invalid or not attached to the "
"instance.")
raise exc.HTTPNotFound(explanation=msg)
else:
return webob.Response(status_int=202)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('attach')
@validation.schema(extended_volumes.attach)
def attach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_attach(context)
volume_id = body['attach']['volume_id']
device = body['attach'].get('device')
disk_bus = body['attach'].get('disk_bus')
device_type = body['attach'].get('device_type')
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
"at %(device)s"),
{'volume_id': volume_id,
'device': device,
'server_id': server_id},
context=context)
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device,
disk_bus=disk_bus,
device_type=device_type)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(
state_error, 'attach_volume')
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InvalidDevicePath as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.response(202)
@wsgi.action('detach')
@validation.schema(extended_volumes.detach)
def detach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_detach(context)
volume_id = body['detach']['volume_id']
LOG.audit(_("Detach volume %(volume_id)s from "
"instance %(server_id)s"),
{"volume_id": volume_id,
"server_id": id,
"context": context})
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Volume %(volume_id)s is not attached to the "
"instance %(server_id)s") % {'server_id': server_id,
'volume_id': volume_id}
LOG.debug(msg)
raise exc.HTTPNotFound(explanation=msg)
for bdm in bdms:
if bdm.volume_id != volume_id:
continue
if bdm.is_root:
msg = _("Can't detach root device volume")
raise exc.HTTPForbidden(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(
state_error, 'detach_volume')
else:
msg = _("Volume %(volume_id)s is not attached to the "
"instance %(server_id)s") % {'server_id': server_id,
'volume_id': volume_id}
raise exc.HTTPNotFound(explanation=msg)
class ExtendedVolumes(extensions.V3APIExtensionBase):
"""Extended Volumes support."""
name = "ExtendedVolumes"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ExtendedVolumesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
the-stack_0_9151 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from bigdl.chronos.autots.utils import recalculate_n_sampling
DEFAULT_BEST_MODEL_DIR = "best_model.ckpt"
DEFAULT_BEST_CONFIG_DIR = "best_config.json"
class BasePytorchAutomodel:
def __init__(self, **kwargs):
self.best_model = None
def fit(self,
data,
epochs=1,
batch_size=32,
validation_data=None,
metric_threshold=None,
n_sampling=1,
search_alg=None,
search_alg_params=None,
scheduler=None,
scheduler_params=None,
):
"""
Automatically fit the model and search for the best hyper parameters.
:param data: train data.
data can be a tuple of ndarrays or a PyTorch DataLoader
or a function that takes a config dictionary as parameter and returns a
PyTorch DataLoader.
:param epochs: Max number of epochs to train in each trial. Defaults to 1.
If you have also set metric_threshold, a trial will stop if either it has been
optimized to the metric_threshold or it has been trained for {epochs} epochs.
:param batch_size: Int or hp sampling function from an integer space. Training batch size.
It defaults to 32.
:param validation_data: Validation data. Validation data type should be the same as data.
:param metric_threshold: a trial will be terminated when metric threshold is met.
:param n_sampling: Number of trials to evaluate in total. Defaults to 1.
If hp.grid_search is in search_space, the grid will be run n_sampling of trials
and round up n_sampling according to hp.grid_search.
If this is -1, (virtually) infinite samples are generated
until a stopping condition is met.
:param search_alg: str, all supported searcher provided by ray tune
(i.e."variant_generator", "random", "ax", "dragonfly", "skopt",
"hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and
"sigopt").
:param search_alg_params: extra parameters for searcher algorithm besides search_space,
metric and searcher mode.
:param scheduler: str, all supported scheduler provided by ray tune.
:param scheduler_params: parameters for scheduler.
"""
self.search_space["batch_size"] = batch_size
n_sampling = recalculate_n_sampling(self.search_space,
n_sampling) if n_sampling != -1 else -1
self.auto_est.fit(
data=data,
epochs=epochs,
validation_data=validation_data,
metric=self.metric,
metric_mode=self.metric_mode,
metric_threshold=metric_threshold,
n_sampling=n_sampling,
search_space=self.search_space,
search_alg=search_alg,
search_alg_params=search_alg_params,
scheduler=scheduler,
scheduler_params=scheduler_params,
)
self.best_model = self.auto_est._get_best_automl_model()
self.best_config = self.auto_est.get_best_config()
def predict(self, data, batch_size=32):
'''
Predict using a the trained model after HPO(Hyper Parameter Optimization).
:param data: a numpy ndarray x, where x's shape is (num_samples, lookback, feature_dim)
where lookback and feature_dim should be the same as past_seq_len and
input_feature_num.
:param batch_size: predict batch size. The value will not affect predict
result but will affect resources cost(e.g. memory and time). The value
defaults to 32.
:return: A numpy array with shape (num_samples, horizon, target_dim).
'''
if self.best_model is None:
raise RuntimeError("You must call fit or load first before calling predict!")
return self.best_model.predict(data, batch_size=batch_size)
def predict_with_onnx(self, data, batch_size=32, dirname=None):
'''
Predict using a the trained model after HPO(Hyper Parameter Optimization).
Be sure to install onnx and onnxruntime to enable this function. The method
will give exactly the same result as .predict() but with higher throughput
and lower latency.
:param data: a numpy ndarray x, where x's shape is (num_samples, lookback, feature_dim)
where lookback and feature_dim should be the same as past_seq_len and
input_feature_num.
:param batch_size: predict batch size. The value will not affect predict
result but will affect resources cost(e.g. memory and time). The value
defaults to 32.
:param dirname: The directory to save onnx model file. This value defaults
to None for no saving file.
:return: A numpy array with shape (num_samples, horizon, target_dim).
'''
if self.best_model is None:
raise RuntimeError("You must call fit or load first before calling predict!")
return self.best_model.predict_with_onnx(data, batch_size=batch_size, dirname=dirname)
def evaluate(self, data,
batch_size=32,
metrics=["mse"],
multioutput="raw_values"):
'''
Evaluate using a the trained model after HPO(Hyper Parameter Optimization).
Please note that evaluate result is calculated by scaled y and yhat. If you scaled
your data (e.g. use .scale() on the TSDataset) please follow the following code
snap to evaluate your result if you need to evaluate on unscaled data.
>>> from bigdl.orca.automl.metrics import Evaluator
>>> y_hat = automodel.predict(x)
>>> y_hat_unscaled = tsdata.unscale_numpy(y_hat) # or other customized unscale methods
>>> y_unscaled = tsdata.unscale_numpy(y) # or other customized unscale methods
>>> Evaluator.evaluate(metric=..., y_unscaled, y_hat_unscaled, multioutput=...)
:param data: a numpy ndarray tuple (x, y) x's shape is (num_samples, lookback,
feature_dim) where lookback and feature_dim should be the same as
past_seq_len and input_feature_num. y's shape is (num_samples, horizon,
target_dim), where horizon and target_dim should be the same as
future_seq_len and output_target_num.
:param batch_size: evaluate batch size. The value will not affect evaluate
result but will affect resources cost(e.g. memory and time).
:param metrics: list of string or callable. e.g. ['mse'] or [customized_metrics]
If callable function, it signature should be func(y_true, y_pred), where y_true and
y_pred are numpy ndarray. The function should return a float value as evaluation
result.
:param multioutput: Defines aggregating of multiple output values.
String in ['raw_values', 'uniform_average']. The value defaults to
'raw_values'.
:return: A list of evaluation results. Each item represents a metric.
'''
if self.best_model is None:
raise RuntimeError("You must call fit or load first before calling predict!")
return self.best_model.evaluate(data[0], data[1], metrics=metrics,
multioutput=multioutput, batch_size=batch_size)
def evaluate_with_onnx(self, data,
batch_size=32,
metrics=["mse"],
dirname=None,
multioutput="raw_values"):
'''
Evaluate using a the trained model after HPO(Hyper Parameter Optimization).
Be sure to install onnx and onnxruntime to enable this function. The method
will give exactly the same result as .evaluate() but with higher throughput
and lower latency.
Please note that evaluate result is calculated by scaled y and yhat. If you scaled
your data (e.g. use .scale() on the TSDataset) please follow the following code
snap to evaluate your result if you need to evaluate on unscaled data.
>>> from bigdl.orca.automl.metrics import Evaluator
>>> y_hat = automodel.predict_with_onnx(x)
>>> y_hat_unscaled = tsdata.unscale_numpy(y_hat) # or other customized unscale methods
>>> y_unscaled = tsdata.unscale_numpy(y) # or other customized unscale methods
>>> Evaluator.evaluate(metric=..., y_unscaled, y_hat_unscaled, multioutput=...)
:param data: a numpy ndarray tuple (x, y) x's shape is (num_samples, lookback,
feature_dim) where lookback and feature_dim should be the same as
past_seq_len and input_feature_num. y's shape is (num_samples, horizon,
target_dim), where horizon and target_dim should be the same as
future_seq_len and output_target_num.
:param batch_size: evaluate batch size. The value will not affect evaluate
result but will affect resources cost(e.g. memory and time).
:param metrics: list of string or callable. e.g. ['mse'] or [customized_metrics]
If callable function, it signature should be func(y_true, y_pred), where y_true and
y_pred are numpy ndarray. The function should return a float value as evaluation
result.
:param dirname: The directory to save onnx model file. This value defaults
to None for no saving file.
:param multioutput: Defines aggregating of multiple output values.
String in ['raw_values', 'uniform_average']. The value defaults to
'raw_values'.
:return: A list of evaluation results. Each item represents a metric.
'''
if self.best_model is None:
raise RuntimeError("You must call fit or load first before calling predict!")
return self.best_model.evaluate_with_onnx(data[0], data[1],
metrics=metrics,
dirname=dirname,
multioutput=multioutput,
batch_size=batch_size)
def save(self, checkpoint_path):
"""
Save the best model.
Please note that if you only want the pytorch model or onnx model
file, you can call .get_model() or .export_onnx_file(). The checkpoint
file generated by .save() method can only be used by .load() in automodel.
:param checkpoint_path: The location you want to save the best model.
"""
if self.best_model is None:
raise RuntimeError("You must call fit or load first before calling predict!")
if not os.path.isdir(checkpoint_path):
os.mkdir(checkpoint_path)
model_path = os.path.join(checkpoint_path, DEFAULT_BEST_MODEL_DIR)
best_config_path = os.path.join(checkpoint_path, DEFAULT_BEST_CONFIG_DIR)
self.best_model.save(model_path)
with open(best_config_path, "w") as f:
json.dump(self.best_config, f)
def load(self, checkpoint_path):
"""
restore the best model.
:param checkpoint_path: The checkpoint location you want to load the best model.
"""
model_path = os.path.join(checkpoint_path, DEFAULT_BEST_MODEL_DIR)
best_config_path = os.path.join(checkpoint_path, DEFAULT_BEST_CONFIG_DIR)
self.best_model.restore(model_path)
with open(best_config_path, "r") as f:
self.best_config = json.load(f)
def build_onnx(self, thread_num=None, sess_options=None):
'''
Build onnx model to speed up inference and reduce latency.
The method is Not required to call before predict_with_onnx,
evaluate_with_onnx or export_onnx_file.
It is recommended to use when you want to:
| 1. Strictly control the thread to be used during inferencing.
| 2. Alleviate the cold start problem when you call predict_with_onnx
for the first time.
:param thread_num: int, the num of thread limit. The value is set to None by
default where no limit is set.
:param sess_options: an onnxruntime.SessionOptions instance, if you set this
other than None, a new onnxruntime session will be built on this setting
and ignore other settings you assigned(e.g. thread_num...).
Example:
>>> # to pre build onnx sess
>>> automodel.build_onnx(thread_num=1) # build onnx runtime sess for single thread
>>> pred = automodel.predict_with_onnx(data)
>>> # ------------------------------------------------------
>>> # directly call onnx related method is also supported
>>> pred = automodel.predict_with_onnx(data)
'''
import onnxruntime
if sess_options is not None and not isinstance(sess_options, onnxruntime.SessionOptions):
raise RuntimeError("sess_options should be an onnxruntime.SessionOptions instance"
f", but found {type(sess_options)}")
if self.distributed:
raise NotImplementedError("build_onnx has not been supported for distributed "
"forecaster. You can call .to_local() to transform the "
"forecaster to a non-distributed version.")
import torch
dummy_input = torch.rand(1, self.best_config["past_seq_len"],
self.best_config["input_feature_num"])
self.best_model._build_onnx(dummy_input,
dirname=None,
thread_num=thread_num,
sess_options=None)
def export_onnx_file(self, dirname):
"""
Save the onnx model file to the disk.
:param dirname: The dir location you want to save the onnx file.
"""
if self.distributed:
raise NotImplementedError("export_onnx_file has not been supported for distributed "
"forecaster. You can call .to_local() to transform the "
"forecaster to a non-distributed version.")
import torch
dummy_input = torch.rand(1, self.best_config["past_seq_len"],
self.best_config["input_feature_num"])
self.best_model._build_onnx(dummy_input, dirname)
def get_best_model(self):
"""
Get the best pytorch model.
"""
return self.auto_est.get_best_model()
def get_best_config(self):
"""
Get the best configuration
:return: A dictionary of best hyper parameters
"""
return self.best_config
def _get_best_automl_model(self):
return self.best_model
|
the-stack_0_9152 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import uuid
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from neutronclient.common import exceptions as exc
from openstack_dashboard.api import keystone
from openstack_dashboard.api import neutron
from oslo_log import log as logging
from muranodashboard.environments import api as env_api
LOG = logging.getLogger(__name__)
NEUTRON_NET_HELP = _("The VMs of the applications in this environment will "
"join this net by default, unless configured "
"individually. Choosing 'Create New' will generate a new "
"Network with a Subnet having an IP range allocated "
"among the ones available for the default Murano Router "
"of this project")
NN_HELP = _("OpenStack Networking (Neutron) is not available in current "
"environment. Custom Network Settings cannot be applied")
def get_project_assigned_network(request):
tenant_id = request.user.tenant_id
tenant = keystone.tenant_get(request, tenant_id)
network_name = getattr(settings, 'FIXED_MURANO_NETWORK', 'murano_network')
tenant_network_id = getattr(tenant, network_name, None)
if not tenant_network_id:
LOG.warning(("murano_network property is not "
"defined for project '%s'") % tenant_id)
return []
try:
tenant_network = neutron.network_get(request, tenant_network_id)
return [((tenant_network.id, None), tenant_network.name_or_id)]
except exc.NeutronClientException:
return []
def get_available_networks(request, filter=None, murano_networks=None):
if murano_networks:
env_names = [e.name for e in env_api.environments_list(request)]
def get_net_env(name):
for env_name in env_names:
if name.startswith(env_name + '-network'):
return env_name
network_choices = []
tenant_id = request.user.tenant_id
try:
networks = neutron.network_list_for_tenant(request,
tenant_id=tenant_id)
except exceptions.ServiceCatalogException:
LOG.warning("Neutron not found. Assuming Nova Network usage")
return []
# Remove external networks
networks = [network for network in networks
if network.router__external is False]
if filter:
networks = [network for network in networks
if re.match(filter, network.name) is not None]
for net in networks:
env = None
netname = None
if murano_networks and len(net.subnets) == 1:
env = get_net_env(net.name)
if env:
if murano_networks == 'exclude':
continue
else:
netname = _("Network of '%s'") % env
for subnet in net.subnets:
if not netname:
full_name = (
"%(net)s: %(cidr)s %(subnet)s" %
dict(net=net.name_or_id,
cidr=subnet.cidr,
subnet=subnet.name_or_id))
network_choices.append(
((net.id, subnet.id), netname or full_name))
netname = _("%s: random subnet") % (
netname or net.name_or_id)
network_choices.append(((net.id, None), netname))
return network_choices
def generate_join_existing_net(net_config):
res = {
"defaultNetworks": {
'environment': {
'?': {
'id': uuid.uuid4().hex,
'type': 'io.murano.resources.ExistingNeutronNetwork'
},
'internalNetworkName': net_config[0],
'internalSubnetworkName': net_config[1]
},
'flat': None
}
}
return res
|
the-stack_0_9154 | #!/usr/bin/env python3
import os
import json
import sys
import logging
from github import Github
from pr_info import PRInfo
from get_robot_token import get_best_robot_token
from commit_status_helper import get_commit
NAME = 'Run Check (actions)'
TRUSTED_ORG_IDS = {
7409213, # yandex
28471076, # altinity
54801242, # clickhouse
}
OK_TEST_LABEL = set(["can be tested", "release", "pr-documentation", "pr-doc-fix"])
DO_NOT_TEST_LABEL = "do not test"
# Individual trusted contirbutors who are not in any trusted organization.
# Can be changed in runtime: we will append users that we learned to be in
# a trusted org, to save GitHub API calls.
TRUSTED_CONTRIBUTORS = {
"achimbab",
"adevyatova ", # DOCSUP
"Algunenano", # Raúl Marín, Tinybird
"AnaUvarova", # DOCSUP
"anauvarova", # technical writer, Yandex
"annvsh", # technical writer, Yandex
"atereh", # DOCSUP
"azat",
"bharatnc", # Newbie, but already with many contributions.
"bobrik", # Seasoned contributor, CloundFlare
"BohuTANG",
"damozhaeva", # DOCSUP
"den-crane",
"gyuton", # DOCSUP
"gyuton", # technical writer, Yandex
"hagen1778", # Roman Khavronenko, seasoned contributor
"hczhcz",
"hexiaoting", # Seasoned contributor
"ildus", # adjust, ex-pgpro
"javisantana", # a Spanish ClickHouse enthusiast, ex-Carto
"ka1bi4", # DOCSUP
"kirillikoff", # DOCSUP
"kitaisreal", # Seasoned contributor
"kreuzerkrieg",
"lehasm", # DOCSUP
"michon470", # DOCSUP
"MyroTk", # Tester in Altinity
"myrrc", # Michael Kot, Altinity
"nikvas0",
"nvartolomei",
"olgarev", # DOCSUP
"otrazhenia", # Yandex docs contractor
"pdv-ru", # DOCSUP
"podshumok", # cmake expert from QRator Labs
"s-mx", # Maxim Sabyanin, former employee, present contributor
"sevirov", # technical writer, Yandex
"spongedu", # Seasoned contributor
"ucasFL", # Amos Bird's friend
"vdimir", # Employee
"vzakaznikov",
"YiuRULE",
"zlobober" # Developer of YT
}
def pr_is_by_trusted_user(pr_user_login, pr_user_orgs):
if pr_user_login in TRUSTED_CONTRIBUTORS:
logging.info("User '%s' is trusted", pr_user_login)
return True
logging.info("User '%s' is not trusted", pr_user_login)
for org_id in pr_user_orgs:
if org_id in TRUSTED_ORG_IDS:
logging.info("Org '%s' is trusted; will mark user %s as trusted", org_id, pr_user_login)
return True
logging.info("Org '%s' is not trusted", org_id)
return False
# Returns whether we should look into individual checks for this PR. If not, it
# can be skipped entirely.
def should_run_checks_for_pr(pr_info):
# Consider the labels and whether the user is trusted.
force_labels = set(['force tests']).intersection(pr_info.labels)
if force_labels:
return True, "Labeled '{}'".format(', '.join(force_labels))
if 'do not test' in pr_info.labels:
return False, "Labeled 'do not test'"
if 'can be tested' not in pr_info.labels and not pr_is_by_trusted_user(pr_info.user_login, pr_info.user_orgs):
return False, "Needs 'can be tested' label"
if 'release' in pr_info.labels or 'pr-backport' in pr_info.labels or 'pr-cherrypick' in pr_info.labels:
return False, "Don't try new checks for release/backports/cherry-picks"
return True, "No special conditions apply"
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file:
event = json.load(event_file)
pr_info = PRInfo(event, need_orgs=True)
can_run, description = should_run_checks_for_pr(pr_info)
gh = Github(get_best_robot_token())
commit = get_commit(gh, pr_info.sha)
url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}"
if not can_run:
print("::notice ::Cannot run")
commit.create_status(context=NAME, description=description, state="failure", target_url=url)
sys.exit(1)
else:
if 'pr-documentation' in pr_info.labels or 'pr-doc-fix' in pr_info.labels:
commit.create_status(context=NAME, description="Skipping checks for documentation", state="success", target_url=url)
print("::notice ::Can run, but it's documentation PR, skipping")
else:
print("::notice ::Can run")
commit.create_status(context=NAME, description=description, state="pending", target_url=url)
|
the-stack_0_9157 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The **gridpath.project.operations.costs** module is a project-level
module that adds to the formulation components that describe the
operations-related costs of projects (e.g. variable O&M costs, fuel costs,
startup and shutdown costs).
For the purpose, this module calls the respective method from the
operational type modules.
"""
import csv
import os.path
from pyomo.environ import Set, Var, Expression, Constraint, \
NonNegativeReals, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.auxiliary import get_required_subtype_modules_from_projects_file
from gridpath.project.operations.common_functions import \
load_operational_type_modules
from gridpath.auxiliary.db_interface import setup_results_import
import gridpath.project.operations.operational_types as op_type
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
The following Pyomo model components are defined in this module:
+-------------------------------------------------------------------------+
| Sets |
+=========================================================================+
| | :code:`VAR_OM_COST_SIMPLE_PRJ_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which a simple variable O&M |
| cost is specified and their operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS` |
| |
| The three-dimensional set of projects for which a VOM cost curve is |
| specified along with the VOM curve segments and the project |
| operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`VAR_OM_COST_CURVE_PRJS_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which a VOM cost curve is |
| specified along with their operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`VAR_OM_COST_ALL_PRJS_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which either or both a simple |
| VOM or a VOM curve is specified along with their operational |
| timepoints. |
+-------------------------------------------------------------------------+
| | :code:`STARTUP_COST_PRJ_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which a startup cost is |
| specified along with their operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`SHUTDOWN_COST_PRJ_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which a shutdown cost curve is |
| specified along with their operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`VIOL_ALL_PRJ_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which an operational constraint |
| can be violated along with their operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`CURTAILMENT_COST_PRJ_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which an curtailment costs are |
| incurred along with their operational timepoints. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Variables |
+=========================================================================+
| | :code:`Variable_OM_Curve_Cost` |
| | *Defined over*: :code:`VAR_OM_COST_CURVE_PRJS_OPR_TMPS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| Variable cost in each operational timepoint of projects with a VOM cost |
| curve. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Constraints |
+=========================================================================+
| | :code:`Variable_OM_Curve_Constraint` |
| | *Defined over*: :code:`VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS` |
| |
| Determines variable cost from the project in each timepoint based on |
| its VOM curve. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Expressions |
+=========================================================================+
| | :code:`Variable_OM_Cost` |
| | *Defined over*: :code:`VAR_OM_COST_ALL_PRJS_OPR_TMPS` |
| |
| This is the variable cost incurred in each operational timepoints for |
| projects for which either a simple VOM or a VOM curve is specified. |
| If both are specified, the two are additive. We obtain the simple VOM |
| by calling the *variable_om_cost_rule* method of a project's |
| *operational_type* module. We obtain the VOM curve cost by calling the |
| *variable_om_cost_by_ll_rule* method of a project's operational type, |
| using that to create the *Variable_OM_Curve_Constraint* on the |
| Variable_OM_Curve_Cost variable, and the using the variable in this |
| expression. |
+-------------------------------------------------------------------------+
| | :code:`Fuel_Cost` |
| | *Defined over*: :code:`FUEL_PRJ_OPR_TMPS` |
| |
| This expression defines the fuel cost of a project in all of its |
| operational timepoints. We obtain the expression by calling the |
| *fuel_cost_rule* method of a project's *operational_type* module. |
+-------------------------------------------------------------------------+
| | :code:`Startup_Cost` |
| | *Defined over*: :code:`STARTUP_COST_PRJ_OPR_TMPS` |
| |
| This expression defines the startup cost of a project in all of its |
| operational timepoints. We obtain the expression by calling the |
| *startup_cost_rule* method of a project's *operational_type* module. |
+-------------------------------------------------------------------------+
| | :code:`Shutdown_Cost` |
| | *Defined over*: :code:`SHUTDOWN_COST_PRJ_OPR_TMPS` |
| |
| This expression defines the shutdown cost of a project in all of its |
| operational timepoints. We obtain the expression by calling the |
| *shutdown_cost_rule* method of a project's *operational_type* module. |
+-------------------------------------------------------------------------+
| | :code:`Operational_Violation_Cost` |
| | *Defined over*: :code:`VIOL_ALL_PRJ_OPR_TMPS` |
| |
| This expression defines the operational constraint violation cost of a |
| project in all of its operational timepoints. We obtain the expression |
| by calling the *operational_violation_cost_rule* method of a project's |
| *operational_type* module. |
+-------------------------------------------------------------------------+
| | :code:`Curtailment_Cost` |
| | *Defined over*: :code:`CURTAILMENT_COST_PRJ_OPR_TMPS` |
| |
| This expression defines the curtailment cost of a project in all of its |
| operational timepoints. We obtain the expression by calling the |
| *curtailment_cost_rule* method of a project's *operational_type* module.|
+-------------------------------------------------------------------------+
"""
# Dynamic Inputs
###########################################################################
required_operational_modules = get_required_subtype_modules_from_projects_file(
scenario_directory=scenario_directory, subproblem=subproblem,
stage=stage, which_type="operational_type"
)
imported_operational_modules = load_operational_type_modules(
required_operational_modules
)
# Sets
###########################################################################
m.VAR_OM_COST_SIMPLE_PRJ_OPR_TMPS = Set(
dimen=2,
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: [(p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS
if p in mod.VAR_OM_COST_SIMPLE_PRJS]
)
m.VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS = Set(
dimen=3,
initialize=lambda mod: list(
set((g, tmp, s) for (g, tmp) in mod.PRJ_OPR_TMPS
for _g, p, s in mod.VAR_OM_COST_CURVE_PRJS_PRDS_SGMS
if g == _g and mod.period[tmp] == p)
)
)
m.VAR_OM_COST_CURVE_PRJS_OPR_TMPS = Set(
dimen=2,
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: list(
set((g, tmp) for (g, tmp, s)
in mod.VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS)
)
)
# All VOM projects
m.VAR_OM_COST_ALL_PRJS_OPR_TMPS = Set(
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: list(
set(mod.VAR_OM_COST_SIMPLE_PRJ_OPR_TMPS
| mod.VAR_OM_COST_CURVE_PRJS_OPR_TMPS)
)
)
m.STARTUP_COST_PRJ_OPR_TMPS = Set(
dimen=2,
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: [(p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS
if p in mod.STARTUP_COST_PRJS]
)
m.SHUTDOWN_COST_PRJ_OPR_TMPS = Set(
dimen=2,
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: [(p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS
if p in mod.SHUTDOWN_COST_PRJS]
)
m.VIOL_ALL_PRJ_OPR_TMPS = Set(
dimen=2,
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: [(p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS
if p in mod.VIOL_ALL_PRJS]
)
m.CURTAILMENT_COST_PRJ_OPR_TMPS = Set(
dimen=2,
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: [(p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS
if p in mod.CURTAILMENT_COST_PRJS]
)
# Variables
###########################################################################
m.Variable_OM_Curve_Cost = Var(
m.VAR_OM_COST_CURVE_PRJS_OPR_TMPS,
within=NonNegativeReals
)
# Constraints
###########################################################################
def variable_om_cost_curve_constraint_rule(mod, prj, tmp, s):
"""
**Constraint Name**: GenCommitBin_Variable_OM_Constraint
**Enforced Over**: GEN_COMMIT_BIN_VOM_PRJS_OPR_TMPS_SGMS
Variable O&M cost by loading level is set by piecewise linear
representation of the input/output curve (variable O&M cost vs.loading
level).
Note: we assume that when projects are derated for availability, the
input/output curve is derated by the same amount. The implicit
assumption is that when a generator is de-rated, some of its units
are out rather than it being forced to run below minimum stable level
at very costly operating points.
"""
gen_op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[gen_op_type],
"variable_om_cost_by_ll_rule"):
var_cost_by_ll = imported_operational_modules[gen_op_type]. \
variable_om_cost_by_ll_rule(mod, prj, tmp, s)
else:
var_cost_by_ll = \
op_type.variable_om_cost_by_ll_rule(mod, prj, tmp, s)
return mod.Variable_OM_Curve_Cost[prj, tmp] \
>= var_cost_by_ll
m.Variable_OM_Curve_Constraint = Constraint(
m.VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS,
rule=variable_om_cost_curve_constraint_rule
)
# Expressions
###########################################################################
def variable_om_cost_rule(mod, prj, tmp):
"""
**Expression Name**: Variable_OM_Cost
**Defined Over**: VAR_OM_COST_ALL_PRJS_OPR_TMPS
This is the variable cost incurred in each operational timepoints for
projects for which either a simple VOM or a VOM curve is specified.
If both are specified, the two are additive.
"""
# Simple VOM cost
gen_op_type = mod.operational_type[prj]
if prj in mod.VAR_OM_COST_SIMPLE_PRJS:
if hasattr(imported_operational_modules[gen_op_type],
"variable_om_cost_rule"):
var_cost_simple = imported_operational_modules[gen_op_type]. \
variable_om_cost_rule(mod, prj, tmp)
else:
var_cost_simple = op_type.variable_om_cost_rule(mod, prj, tmp)
else:
var_cost_simple = 0
# VOM curve cost
if prj in mod.VAR_OM_COST_CURVE_PRJS:
var_cost_curve = mod.Variable_OM_Curve_Cost[prj, tmp]
else:
var_cost_curve = 0
# The two are additive
return var_cost_simple + var_cost_curve
m.Variable_OM_Cost = Expression(
m.VAR_OM_COST_ALL_PRJS_OPR_TMPS,
rule=variable_om_cost_rule
)
def fuel_cost_rule(mod, prj, tmp):
"""
**Expression Name**: Fuel_Cost
**Defined Over**: FUEL_PRJS_OPR_TMPS
"""
return mod.Total_Fuel_Burn_MMBtu[prj, tmp] * \
mod.fuel_price_per_mmbtu[
mod.fuel[prj],
mod.period[tmp],
mod.month[tmp]
]
m.Fuel_Cost = Expression(
m.FUEL_PRJ_OPR_TMPS,
rule=fuel_cost_rule
)
def startup_cost_rule(mod, prj, tmp):
"""
Startup costs are defined for some operational types while they are
zero for others. Get the appropriate expression for each generator
based on its operational type.
"""
gen_op_type = mod.operational_type[prj]
if prj in mod.STARTUP_COST_SIMPLE_PRJS:
if hasattr(imported_operational_modules[gen_op_type],
"startup_cost_simple_rule"):
startup_cost_simple = \
imported_operational_modules[gen_op_type]. \
startup_cost_simple_rule(mod, prj, tmp)
else:
startup_cost_simple = \
op_type.startup_cost_simple_rule(mod, prj, tmp)
else:
startup_cost_simple = 0
if prj in mod.STARTUP_BY_ST_PRJS:
if hasattr(imported_operational_modules[gen_op_type],
"startup_cost_by_st_rule"):
startup_cost_by_st = \
imported_operational_modules[gen_op_type]. \
startup_cost_by_st_rule(mod, prj, tmp)
else:
startup_cost_by_st = \
op_type.startup_cost_by_st_rule(mod, prj, tmp)
else:
startup_cost_by_st = 0
return startup_cost_simple + startup_cost_by_st
m.Startup_Cost = Expression(
m.STARTUP_COST_PRJ_OPR_TMPS,
rule=startup_cost_rule
)
def shutdown_cost_rule(mod, prj, tmp):
"""
Shutdown costs are defined for some operational types while they are
zero for others. Get the appropriate expression for each generator
based on its operational type.
"""
gen_op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[gen_op_type],
"shutdown_cost_rule"):
return imported_operational_modules[gen_op_type]. \
shutdown_cost_rule(mod, prj, tmp)
else:
return op_type.shutdown_cost_rule(mod, prj, tmp)
m.Shutdown_Cost = Expression(
m.SHUTDOWN_COST_PRJ_OPR_TMPS,
rule=shutdown_cost_rule
)
def operational_violation_cost_rule(mod, prj, tmp):
"""
Get any operational constraint violation costs.
"""
gen_op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[gen_op_type],
"operational_violation_cost_rule"):
return imported_operational_modules[gen_op_type]. \
operational_violation_cost_rule(mod, prj, tmp)
else:
return op_type.operational_violation_cost_rule(mod, prj, tmp)
m.Operational_Violation_Cost = Expression(
m.VIOL_ALL_PRJ_OPR_TMPS,
rule=operational_violation_cost_rule
)
def curtailment_cost_rule(mod, prj, tmp):
"""
Curtailment costs are defined for some operational types while they are
zero for others. Get the appropriate expression for each generator
based on its operational type.
"""
gen_op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[gen_op_type],
"curtailment_cost_rule"):
return imported_operational_modules[gen_op_type]. \
curtailment_cost_rule(mod, prj, tmp)
else:
return op_type.curtailment_cost_rule(mod, prj, tmp)
m.Curtailment_Cost = Expression(
m.CURTAILMENT_COST_PRJ_OPR_TMPS,
rule=curtailment_cost_rule
)
# Input-Output
###############################################################################
def export_results(scenario_directory, subproblem, stage, m, d):
"""
Export operations results. Note: fuel cost includes startup fuel as well
if applicable, in which case this is startup fuel cost is additional to
the startup costs reported here.
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
The Pyomo abstract model
:param d:
Dynamic components
:return:
Nothing
"""
with open(os.path.join(scenario_directory, str(subproblem), str(stage),
"results",
"costs_operations.csv"), "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(
["project", "period", "horizon", "timepoint", "timepoint_weight",
"number_of_hours_in_timepoint", "load_zone", "technology",
"variable_om_cost", "fuel_cost", "startup_cost", "shutdown_cost",
"operational_violation_cost", "curtailment_cost"]
)
for (p, tmp) in m.PRJ_OPR_TMPS:
writer.writerow([
p,
m.period[tmp],
m.horizon[tmp, m.balancing_type_project[p]],
tmp,
m.tmp_weight[tmp],
m.hrs_in_tmp[tmp],
m.load_zone[p],
m.technology[p],
value(m.Variable_OM_Cost[p, tmp])
if p in m.VAR_OM_COST_ALL_PRJS else None,
value(m.Fuel_Cost[p, tmp]) if p in m.FUEL_PRJS else None,
value(m.Startup_Cost[p, tmp])
if p in m.STARTUP_COST_PRJS else None,
value(m.Shutdown_Cost[p, tmp])
if p in m.SHUTDOWN_COST_PRJS else None,
value(m.Operational_Violation_Cost[p, tmp])
if p in m.VIOL_ALL_PRJ_OPR_TMPS else None,
value(m.Curtailment_Cost[p, tmp])
if p in m.CURTAILMENT_COST_PRJS else None,
])
# Database
###############################################################################
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
if not quiet:
print("project costs operations")
# costs_operations.csv
# Delete prior results and create temporary import table for ordering
setup_results_import(conn=db, cursor=c,
table="results_project_costs_operations",
scenario_id=scenario_id, subproblem=subproblem,
stage=stage)
# Load results into the temporary table
results = []
with open(os.path.join(results_directory, "costs_operations.csv"),
"r") as dispatch_file:
reader = csv.reader(dispatch_file)
next(reader) # skip header
for row in reader:
project = row[0]
period = row[1]
horizon = row[2]
timepoint = row[3]
timepoint_weight = row[4]
number_of_hours_in_timepoint = row[5]
load_zone = row[6]
technology = row[7]
variable_om_cost = row[8]
fuel_cost = row[9]
startup_cost = row[10]
shutdown_cost = row[11]
results.append(
(scenario_id, project, period, subproblem, stage,
horizon, timepoint, timepoint_weight,
number_of_hours_in_timepoint, load_zone, technology,
variable_om_cost, fuel_cost, startup_cost, shutdown_cost)
)
insert_temp_sql = """
INSERT INTO
temp_results_project_costs_operations{}
(scenario_id, project, period, subproblem_id, stage_id,
horizon, timepoint, timepoint_weight,
number_of_hours_in_timepoint, load_zone, technology,
variable_om_cost, fuel_cost, startup_cost, shutdown_cost)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);""".format(
scenario_id)
spin_on_database_lock(conn=db, cursor=c, sql=insert_temp_sql, data=results)
# Insert sorted results into permanent results table
insert_sql = """
INSERT INTO
results_project_costs_operations
(scenario_id, project, period, subproblem_id, stage_id,
horizon, timepoint, timepoint_weight,
number_of_hours_in_timepoint, load_zone, technology,
variable_om_cost, fuel_cost, startup_cost, shutdown_cost)
SELECT
scenario_id, project, period, subproblem_id, stage_id,
horizon, timepoint, timepoint_weight,
number_of_hours_in_timepoint, load_zone, technology,
variable_om_cost, fuel_cost, startup_cost, shutdown_cost
FROM temp_results_project_costs_operations{}
ORDER BY scenario_id, project, subproblem_id, stage_id, timepoint;
""".format(scenario_id)
spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=(),
many=False)
def process_results(db, c, scenario_id, subscenarios, quiet):
"""
Aggregate costs by zone and period
TODO: by technology too?
:param db:
:param c:
:param subscenarios:
:param quiet:
:return:
"""
if not quiet:
print("aggregate costs")
# Delete old results
del_sql = """
DELETE FROM results_project_costs_operations_agg
WHERE scenario_id = ?
"""
spin_on_database_lock(conn=db, cursor=c, sql=del_sql,
data=(scenario_id,),
many=False)
# Aggregate operational costs by period and load zone
agg_sql = """
INSERT INTO results_project_costs_operations_agg
(scenario_id, subproblem_id, stage_id, period,
load_zone, spinup_or_lookahead,
variable_om_cost, fuel_cost, startup_cost, shutdown_cost)
SELECT scenario_id, subproblem_id, stage_id, period, load_zone,
spinup_or_lookahead,
SUM(fuel_cost * timepoint_weight * number_of_hours_in_timepoint)
AS fuel_cost,
SUM(variable_om_cost * timepoint_weight * number_of_hours_in_timepoint)
AS variable_om_cost,
SUM(startup_cost * timepoint_weight) AS startup_cost,
SUM(shutdown_cost * timepoint_weight) AS shutdown_cost
FROM results_project_costs_operations
WHERE scenario_id = ?
GROUP BY subproblem_id, stage_id, period, load_zone, spinup_or_lookahead
ORDER BY subproblem_id, stage_id, period, load_zone, spinup_or_lookahead
;"""
spin_on_database_lock(conn=db, cursor=c, sql=agg_sql,
data=(scenario_id,),
many=False)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.