ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a377778a9ac33590aa7a5d0104cd8cf2fab8e6d | # -----------------------------------------------------------------------------
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Provides QtNetworkAuth classes and functions."""
# Local imports
from . import PYQT5, PYQT6, PYSIDE2, PYSIDE6, PythonQtError
if PYQT6:
from PyQt6.QtNetworkAuth import *
elif PYQT5:
from PyQt5.QtNetworkAuth import *
elif PYSIDE6:
from PySide6.QtNetworkAuth import *
else:
raise PythonQtError('No Qt bindings could be found')
|
py | 1a377783dd303582b994d2507fb1ad356907b998 | # SPDX-FileCopyrightText: 2021 Dylan Herrada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import math
import board
import busio
from digitalio import DigitalInOut
import displayio
from adafruit_display_shapes.rect import Rect
import adafruit_imageload
import adafruit_touchscreen
# ESP32 SPI
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
# Import NeoPixel Library
import neopixel
# Import Adafruit IO HTTP Client
from adafruit_io.adafruit_io import IO_HTTP, AdafruitIO_RequestError
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XL,
board.TOUCH_XR,
board.TOUCH_YD,
board.TOUCH_YU,
calibration=((5200, 59000), (5800, 57000)),
size=(480, 320),
)
RED = 0xFF0000
YELLOW = 0xFF9600
ORANGE = 0xFF2800
GREEN = 0x00FF00
TEAL = 0x00FF78
CYAN = 0x00FFFF
BLUE = 0x0000FF
PURPLE = 0xB400FF
MAGENTA = 0xFF0014
WHITE = 0xFFFFFF
BLACK = 0x000000
GOLD = 0xFFDE1E
PINK = 0xF15AFF
AQUA = 0x32FFFF
JADE = 0x00FF28
AMBER = 0xFF6400
"""
colors = [None, None, None, None,
None, None, None, None,
GREEN, ORANGE, YELLOW, RED,
PURPLE, BLUE, CYAN, TEAL,
GOLD, BLACK, WHITE, MAGENTA,
AMBER, JADE, AQUA, PINK]
"""
colors = [
None,
None,
GREEN,
PURPLE,
GOLD,
AMBER,
None,
None,
ORANGE,
BLUE,
BLACK,
JADE,
None,
None,
YELLOW,
CYAN,
WHITE,
AQUA,
None,
None,
RED,
TEAL,
MAGENTA,
PINK,
]
print(colors)
group = displayio.Group()
background, palette = adafruit_imageload.load(
"pyportal_setter.bmp", bitmap=displayio.Bitmap, palette=displayio.Palette
)
tile_grid = displayio.TileGrid(background, pixel_shader=palette)
group.append(tile_grid)
rect = Rect(0, 0, 160, 320, fill=0x000000)
group.append(rect)
print(len(group))
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# PyPortal ESP32 Setup
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Set your Adafruit IO Username and Key in secrets.py
# (visit io.adafruit.com if you need to create an account,
# or if you need your Adafruit IO key.)
ADAFRUIT_IO_USER = secrets["aio_username"]
ADAFRUIT_IO_KEY = secrets["aio_key"]
# Create an instance of the Adafruit IO HTTP client
io = IO_HTTP(ADAFRUIT_IO_USER, ADAFRUIT_IO_KEY, wifi)
try:
# Get the 'temperature' feed from Adafruit IO
neopixel_feed = io.get_feed("neopixel")
except AdafruitIO_RequestError:
neopixel_feed = io.create_new_feed("neopixel")
board.DISPLAY.show(group)
print("ready")
last_color = 257
last_index = 0
while True:
p = ts.touch_point
if p:
x = math.floor(p[0] / 80)
y = math.floor(p[1] / 80)
index = 6 * y + x
# Used to prevent the touchscreen sending incorrect results
if last_index == index:
color = colors[index]
if colors[index]:
group[1].fill = color
if last_color != color:
color_str = "#{:06x}".format(color)
print(color_str)
io.send_data(neopixel_feed["key"], color_str)
last_color = color
last_index = index
time.sleep(0.1)
|
py | 1a3777a3acd222b0f06664c34ace25f86e817822 | from django.urls import reverse
import pytest
from pytest_django.asserts import assertTemplateUsed
from Post.views import get_post_by_query_text
class TestViews:
@pytest.mark.django_db
def test_view_posts_GET(self, client):
response = client.get(reverse('view posts'))
assert response.status_code == 200
assertTemplateUsed(response, 'Post/postList.html')
@pytest.mark.parametrize(
"query_text, expected_output",
[
("Sea", ["Dead Sea", "Sea of Galilee", "Eilat"]),
("beautiful", ["Dead Sea", "Eilat"]),
("nice", ["`En Yorqe`am"]),
("place", ["`En Yorqe`am", "Eilat", "Dead Sea"]),
("Tal aviv", []),
("", ["Dead Sea", "Sea of Galilee", "Eilat", "`En Yorqe`am"]),
],
)
@pytest.mark.django_db
def test_post_exists_after_query(self, query_text, expected_output):
posts = get_post_by_query_text(query_text)
assert all(post.nameOfLocation in expected_output for post in posts)
# assert all(course.location in expected_output for course in courses)
@pytest.mark.django_db
def test_verify_respone_GET(self, client):
response = client.get(reverse('post_list_Search'), {'query_text': 'Galilee'})
posts_not_found = [b'Eilat', b'Dead Sea', b'`En Yorqe`am']
assert response.status_code == 200
assert b'Galilee' in response.content
assert all(post not in response.content for post in posts_not_found)
|
py | 1a3778486414af97113c607aae1b449e7bff1602 | # flake8: noqa F403
from forks.baselines.baselines.common.console_util import *
from forks.baselines.baselines.common.dataset import Dataset
from forks.baselines.baselines.common.math_util import *
from forks.baselines.baselines.common.misc_util import *
|
py | 1a3779b2c828a9e2f8801810d68d080d33708b35 | # -*- coding: utf-8 -*-
"""The base32 decoder object implementation."""
import base64
from dfvfs.encoding import decoder
from dfvfs.encoding import manager
from dfvfs.lib import definitions
from dfvfs.lib import errors
class Base32Decoder(decoder.Decoder):
"""Class that implements a base32 decoder using base64."""
ENCODING_METHOD = definitions.ENCODING_METHOD_BASE32
def Decode(self, encoded_data):
"""Decode the encoded data.
Args:
encoded_data: a byte string containing the encoded data.
Returns:
A tuple containing a byte string of the decoded data and
the remaining encoded data.
Raises:
BackEndError: if the base32 stream cannot be decoded.
"""
try:
decoded_data = base64.b32decode(encoded_data, casefold=False)
except TypeError as exception:
raise errors.BackEndError(
u'Unable to decode base32 stream with error: {0:s}.'.format(
exception))
return decoded_data, b''
# Register the decoder with the encoding manager.
manager.EncodingManager.RegisterDecoder(Base32Decoder)
|
py | 1a377a60b155b0a5356afefd703a6de3b604b22c | import os
import pickle
import time
import torch
from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
class TextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
):
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory,
"cached_lm_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
)
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
class LineByLineTextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info("Creating features from dataset file at %s", file_path)
with open(file_path, encoding="utf-8") as f:
lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding["input_ids"]
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
class TextDatasetForNextSentencePrediction(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
):
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=True)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory,
"cached_nsp_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
self.tokenizer = tokenizer
self.examples = []
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
#
# Example:
# I am very happy.
# Here is the second sentence.
#
# A new document.
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.examples = [[]]
with open(file_path, encoding="utf-8") as f:
while True:
line = f.readline()
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line and len(self.examples[-1]) != 0:
self.examples.append([])
tokens = tokenizer.tokenize(line)
tokens = tokenizer.convert_tokens_to_ids(tokens)
if tokens:
self.examples[-1].append(tokens)
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
|
py | 1a377a74d83629c57a239e5242ce6f9c652b8f3a | # Generated by Django 3.2.9 on 2022-01-25 12:37
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('Teacher', '0014_auto_20220125_1607'),
('Student', '0010_homeworkstudentcanadd'),
]
operations = [
migrations.CreateModel(
name='HomeworkStudentAdd',
fields=[
('title', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='HomeworkImages/')),
('created', models.DateTimeField(auto_now_add=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Teacher.classroom')),
],
),
migrations.DeleteModel(
name='HomeworkStudentCanAdd',
),
]
|
py | 1a377a8e2756abe8cf7c5ab461c0a5056048253b | #!/usr/bin/env python3
""" for testing the module awsbuild """
import sys
import logging
from bao_config import AwsConfig
from bao_connector import AwsConnector
from bao_vpc import AwsVPC
from bao_target_group import AwsTargetGroup
def main():
""" main """
my_logfile = 'logs/awsbuild.log'
my_region = 'us-east-1'
#my_vpc = 'vpc-xxx'
my_tag = 'momo-us-east-1'
# setup logging
log_formatter = logging.Formatter("%(asctime)s %(filename)s %(name)s %(levelname)s %(message)s")
root_logger = logging.getLogger()
file_handler = logging.FileHandler(my_logfile)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
config = AwsConfig(cfgdir='configs',\
cfgfile='target_group.yaml',\
cfgkey='target_groups')
conn = AwsConnector(credentials=config.settings['aws_cfg']['credentials'], region=my_region)
aws_conn = conn.get_all_conn()
if not aws_conn:
print('error AwsConnector\n')
sys.exit(-1)
vpc_conn = AwsVPC(aws_conn=aws_conn, tag=my_tag)
if not vpc_conn:
print('error AwsVPC\n')
sys.exit(-1)
vpc_id = vpc_conn.get_vpc_id()
target_grp_conn = AwsTargetGroup(aws_conn=aws_conn, target_group=config.settings['target_groups'], \
vpc_id=vpc_id, tag='tau-dev' \
)
if not target_grp_conn:
print('error AwsTargetGroup\n')
sys.exit(-1)
target_grp_conn.create()
if __name__ == '__main__':
main()
|
py | 1a377a99df25b517bfe972a5cb8dcdb6f975cd13 | #!/usr/bin/env python
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='PLTable',
version='1.0.2',
license='BSD (3 clause)',
description='Python library for easily displaying tabular data in a visually appealing text table format',
long_description=long_description,
long_description_content_type='text/markdown',
author='Luke Maurits',
author_email='[email protected]',
maintainer='Plato Mavropoulos',
url='https://github.com/platomav/PLTable',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Text Processing'
],
) |
py | 1a377beb2020a5fedef02defe5a20236019089bb | from flask import Response, request, stream_with_context, abort
from fHDHR.exceptions import TunerError
class Tuner():
endpoints = ['/tuner<tuner_number>/<channel>']
endpoint_name = "tuner"
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, tuner_number, channel, *args):
return self.get(tuner_number, channel, *args)
def get(self, tuner_number, channel, *args):
full_url = request.url
if channel.startswith("v"):
channel_number = channel.replace('v', '')
elif channel.startswith("ch"):
channel_freq = channel.replace('ch', '').split("-")[0]
subchannel = 0
if "-" in channel:
subchannel = channel.replace('ch', '').split("-")[1]
abort(501, "Not Implemented %s-%s" % (str(channel_freq), str(subchannel)))
if channel_number not in list(self.fhdhr.device.channels.list.keys()):
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
abort(response)
method = request.args.get('method', default=self.fhdhr.config.dict["fhdhr"]["stream_type"], type=str)
duration = request.args.get('duration', default=0, type=int)
transcode = request.args.get('transcode', default=None, type=str)
valid_transcode_types = [None, "heavy", "mobile", "internet720", "internet480", "internet360", "internet240"]
if transcode not in valid_transcode_types:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = "802 - Unknown Transcode Profile"
abort(response)
stream_args = {
"channel": channel_number,
"method": method,
"duration": duration,
"transcode": transcode,
"accessed": full_url,
}
try:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
abort(response)
tuner = self.fhdhr.device.tuners.tuners[int(tunernum)]
try:
stream_args = self.fhdhr.device.tuners.get_stream_info(stream_args)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
tuner.close()
abort(response)
self.fhdhr.logger.info("Tuner #" + str(tunernum) + " to be used for stream.")
tuner.set_status(stream_args)
if stream_args["method"] == "direct":
return Response(tuner.get_stream(stream_args, tuner), content_type=stream_args["content_type"], direct_passthrough=True)
elif stream_args["method"] in ["ffmpeg", "vlc"]:
return Response(stream_with_context(tuner.get_stream(stream_args, tuner)), mimetype=stream_args["content_type"])
"""
try:
if stream_args["method"] == "direct":
return Response(tuner.get_stream(stream_args, tuner), content_type=stream_args["content_type"], direct_passthrough=True)
elif stream_args["method"] in ["ffmpeg", "vlc"]:
return Response(stream_with_context(tuner.get_stream(stream_args, tuner)), mimetype=stream_args["content_type"])
except TunerError as e:
tuner.close()
self.fhdhr.logger.info("A %s stream request for channel %s failed due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
abort(response)
"""
|
py | 1a377d0cbd48c5db662537f2bdc62042554f1a9f | """
Mascaret module
===============
This module use two database consituted of snapshots simulated using Mascaret
flow solver on the Garonne river.
"""
import logging
import numpy as np
from .db_generic import DbGeneric
from .data import mascaret
class db_Mascaret(DbGeneric):
"""Mascaret class."""
logger = logging.getLogger(__name__)
def __init__(self, fname=None, multizone=False):
"""Mascaret database function.
From a given set of input parameters, it gets the closest point from
the database. Two cases are available with respectively: 3 Ks, 1 Q,
463 H (5000 samples); and 1 Ks, 1 Q, 14 H (100000 samples).
:param bool multizone: Use only one global Ks or 3 Ks.
:param list fname: list of the input and the output file name.
"""
dataset = mascaret(multizone, fname)
super(db_Mascaret, self).__init__(space=dataset.space, data=dataset.data)
self.x = [float(label) for label in dataset.flabels]
if self.d_in == 2:
self.s_second_full = np.array([[[0., -0.03020037], [-0.03020037, 0.]],
[[0., -0.03881756], [-0.03881756, 0.]],
[[0., -0.04251338], [-0.04251338, 0.]],
[[0., -0.0426679], [-0.0426679, 0.]],
[[0., -0.04966869], [-0.04966869, 0.]],
[[0., -0.03019764], [-0.03019764, 0.]],
[[0., -0.02242943], [-0.02242943, 0.]],
[[0., -0.02222612], [-0.02222612, 0.]],
[[0., -0.02279468], [-0.02279468, 0.]],
[[0., -0.02418406], [-0.02418406, 0.]],
[[0., -0.0261341], [-0.0261341, 0.]],
[[0., -0.03064743], [-0.03064743, 0.]],
[[0., -0.03868296], [-0.03868296, 0.]],
[[0., 0.00282709], [0.00282709, 0.]]])
self.s_first_full = np.array([[0.10107270978302792, 0.8959930919247889],
[0.18120319110283745, 0.8273998127843324],
[0.23451964408156595, 0.7800462867106654],
[0.23685958750154648, 0.7779717432101445],
[0.4098437677793702, 0.6191189440935079],
[0.7751331218908732, 0.2495823405408702],
[0.8742876967854374, 0.1451778693930793],
[0.8742603876671973, 0.14530386765866726],
[0.8722028358385836, 0.14773687242711417],
[0.8714371617522463, 0.14967046813425272],
[0.8579152536671296, 0.1656617547600983],
[0.8146262099773994, 0.21113331675809266],
[0.7333161075183993, 0.2961806754581718],
[-0.0009836372837096455, 0.9692830624285731]])
self.s_total_full = np.array([[0.1116597072943773, 0.875221090352921],
[0.1965660368992014, 0.7969560350458335],
[0.2532846779268521, 0.7456644739879672],
[0.25573942637517416, 0.7433651143730953],
[0.4359824833681346, 0.5741773108039986],
[0.8071753364503617, 0.21183223499031062],
[0.9023827296735317, 0.11319757917424246],
[0.902784341465201, 0.11326732390157283],
[0.9005785778706416, 0.11547577068029803],
[0.8993969774433382, 0.11685853224140431],
[0.8849832956790847, 0.1308629902137176],
[0.8436987264754154, 0.1736232878097748],
[0.7644789560062502, 0.25458734925061216],
[-0.0017544241163383715, 0.9730845491814776]])
self.s_second = np.array([[0., 0.01882222], [0.01882222, 0.]])
self.s_first = np.array([0.75228948, 0.21880863])
self.s_total = np.array([0.76464851, 0.24115337])
|
py | 1a377da0e22f705d779d94eafcb9155a3cc7b4d5 | # import static logger and create shortcut function
from logger import Logger
log = Logger.log
class FakePlants:
""" FakePlants JSON for use in local testing/debugging. """
@staticmethod
def get_fake_plants():
log("Loading FAKE PLANTS.", "success", title="FakePlants::get_fake_plants")
return [
{
"id": 157884,
"created_at": "2019-07-29T10:41:01.765Z",
"updated_at": "2019-08-12T00:00:10.826Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-12 00:00:09.818638",
},
"x": 2000,
"y": 500,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:03.233Z",
"radius": 25.0,
},
{
"id": 157886,
"created_at": "2019-07-29T10:41:06.469Z",
"updated_at": "2019-08-12T00:00:16.349Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-12 00:00:14.962869",
},
"x": 220,
"y": 330,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:09.145Z",
"radius": 25.0,
},
{
"id": 157903,
"created_at": "2019-07-29T10:43:39.831Z",
"updated_at": "2019-08-12T00:00:21.302Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-12 00:00:20.766326",
},
"x": 220,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:14.988Z",
"radius": 25.0,
},
{
"id": 158387,
"created_at": "2019-07-31T00:35:17.660Z",
"updated_at": "2019-08-12T00:00:26.609Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-12 00:00:26.046715",
},
"x": 270,
"y": 570,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:21.090Z",
"radius": 25.0,
},
{
"id": 157862,
"created_at": "2019-07-29T10:15:58.553Z",
"updated_at": "2019-08-12T00:00:32.290Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-12 00:00:31.718903",
},
"x": 270,
"y": 730,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:28.732Z",
"radius": 25.0,
},
{
"id": 157910,
"created_at": "2019-07-29T10:48:54.507Z",
"updated_at": "2019-08-12T00:00:38.029Z",
"device_id": 5520,
"name": "Arugula",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-12 00:00:37.487245",
},
"x": 270,
"y": 900,
"z": 0,
"openfarm_slug": "arugula",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:35.334Z",
"radius": 25.0,
},
{
"id": 157062,
"created_at": "2019-07-26T03:32:48.654Z",
"updated_at": "2019-08-12T00:00:51.742Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-12 00:00:51.155648",
},
"x": 280,
"y": 110,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:49.989Z",
"radius": 25.0,
},
{
"id": 157891,
"created_at": "2019-07-29T10:41:23.208Z",
"updated_at": "2019-08-11T23:41:12.403Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:41:07.564078",
},
"x": 280,
"y": 380,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:16:57.927Z",
"radius": 25.0,
},
{
"id": 157902,
"created_at": "2019-07-29T10:43:38.353Z",
"updated_at": "2019-08-11T23:19:32.630Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:19:31.708659",
},
"x": 340,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:03.735Z",
"radius": 25.0,
},
{
"id": 157927,
"created_at": "2019-07-29T10:54:03.224Z",
"updated_at": "2019-08-11T23:19:43.506Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:19:42.157493",
},
"x": 370,
"y": 690,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:11.691Z",
"radius": 25.0,
},
{
"id": 157916,
"created_at": "2019-07-29T10:50:03.648Z",
"updated_at": "2019-08-11T23:19:50.840Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:19:49.457818",
},
"x": 370,
"y": 790,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:17.991Z",
"radius": 25.0,
},
{
"id": 157877,
"created_at": "2019-07-29T10:37:01.503Z",
"updated_at": "2019-08-11T23:20:06.911Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:20:06.374074",
},
"x": 380,
"y": 280,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:28.934Z",
"radius": 25.0,
},
{
"id": 157900,
"created_at": "2019-07-29T10:43:34.235Z",
"updated_at": "2019-08-11T23:20:14.975Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:20:14.308913",
},
"x": 420,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:36.580Z",
"radius": 25.0,
},
{
"id": 158386,
"created_at": "2019-07-31T00:35:16.022Z",
"updated_at": "2019-08-11T23:20:24.246Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:20:21.705610",
},
"x": 470,
"y": 570,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:44.263Z",
"radius": 25.0,
},
{
"id": 157922,
"created_at": "2019-07-29T10:51:14.621Z",
"updated_at": "2019-08-11T23:20:32.442Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:20:31.605609",
},
"x": 470,
"y": 730,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:51.476Z",
"radius": 25.0,
},
{
"id": 157911,
"created_at": "2019-07-29T10:48:57.702Z",
"updated_at": "2019-08-11T23:20:41.519Z",
"device_id": 5520,
"name": "Arugula",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:20:40.099783",
},
"x": 470,
"y": 900,
"z": 0,
"openfarm_slug": "arugula",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:17:59.027Z",
"radius": 25.0,
},
{
"id": 157063,
"created_at": "2019-07-26T03:32:48.663Z",
"updated_at": "2019-08-11T23:21:04.183Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:21:03.566111",
},
"x": 480,
"y": 110,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:18:13.685Z",
"radius": 25.0,
},
{
"id": 157887,
"created_at": "2019-07-29T10:41:15.967Z",
"updated_at": "2019-08-11T23:21:15.075Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:21:14.158671",
},
"x": 480,
"y": 380,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:18:21.584Z",
"radius": 25.0,
},
{
"id": 157899,
"created_at": "2019-07-29T10:43:32.981Z",
"updated_at": "2019-08-11T23:21:22.615Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:21:21.797685",
},
"x": 540,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:18:27.708Z",
"radius": 25.0,
},
{
"id": 157928,
"created_at": "2019-07-29T10:54:05.025Z",
"updated_at": "2019-08-11T23:21:32.931Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:21:32.117416",
},
"x": 570,
"y": 690,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:18:35.289Z",
"radius": 25.0,
},
{
"id": 157917,
"created_at": "2019-07-29T10:50:11.635Z",
"updated_at": "2019-08-11T23:21:39.668Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:21:38.851899",
},
"x": 570,
"y": 790,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:18:41.080Z",
"radius": 25.0,
},
{
"id": 157878,
"created_at": "2019-07-29T10:37:03.200Z",
"updated_at": "2019-08-11T23:21:57.209Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:21:55.183215",
},
"x": 580,
"y": 280,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:18:52.084Z",
"radius": 25.0,
},
{
"id": 157898,
"created_at": "2019-07-29T10:43:31.611Z",
"updated_at": "2019-08-11T23:22:07.271Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:22:04.669530",
},
"x": 620,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:00.207Z",
"radius": 25.0,
},
{
"id": 158385,
"created_at": "2019-07-31T00:35:12.699Z",
"updated_at": "2019-08-11T23:22:14.514Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:22:13.966250",
},
"x": 670,
"y": 570,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:07.407Z",
"radius": 25.0,
},
{
"id": 157923,
"created_at": "2019-07-29T10:51:16.578Z",
"updated_at": "2019-08-11T23:22:22.654Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:22:21.929243",
},
"x": 670,
"y": 730,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:13.979Z",
"radius": 25.0,
},
{
"id": 157912,
"created_at": "2019-07-29T10:48:59.887Z",
"updated_at": "2019-08-11T23:22:31.351Z",
"device_id": 5520,
"name": "Arugula",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:22:30.301043",
},
"x": 670,
"y": 900,
"z": 0,
"openfarm_slug": "arugula",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:20.740Z",
"radius": 25.0,
},
{
"id": 157873,
"created_at": "2019-07-29T10:36:51.917Z",
"updated_at": "2019-08-11T23:22:55.576Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:22:53.395691",
},
"x": 680,
"y": 110,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:35.103Z",
"radius": 25.0,
},
{
"id": 157888,
"created_at": "2019-07-29T10:41:17.482Z",
"updated_at": "2019-08-11T23:23:11.244Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:23:05.560596",
},
"x": 680,
"y": 380,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:43.006Z",
"radius": 25.0,
},
{
"id": 157897,
"created_at": "2019-07-29T10:43:29.483Z",
"updated_at": "2019-08-11T23:23:16.661Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:23:15.937858",
},
"x": 740,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:48.907Z",
"radius": 25.0,
},
{
"id": 157929,
"created_at": "2019-07-29T10:54:07.236Z",
"updated_at": "2019-08-11T23:23:26.836Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:23:26.177171",
},
"x": 770,
"y": 690,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:19:56.615Z",
"radius": 25.0,
},
{
"id": 157918,
"created_at": "2019-07-29T10:50:13.478Z",
"updated_at": "2019-08-11T23:23:34.054Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:23:32.794679",
},
"x": 770,
"y": 790,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:20:02.352Z",
"radius": 25.0,
},
{
"id": 157879,
"created_at": "2019-07-29T10:37:04.760Z",
"updated_at": "2019-08-11T23:23:50.893Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:23:49.589157",
},
"x": 780,
"y": 280,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:20:14.170Z",
"radius": 25.0,
},
{
"id": 158924,
"created_at": "2019-08-04T09:59:14.300Z",
"updated_at": "2019-08-11T23:24:03.064Z",
"device_id": 5520,
"name": "Red Carrot",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:24:00.840587"},
"x": 800,
"y": 550,
"z": 0,
"openfarm_slug": "red-carrot",
"plant_stage": "planned",
"planted_at": None,
"radius": 25.0,
},
{
"id": 157896,
"created_at": "2019-07-29T10:43:25.792Z",
"updated_at": "2019-08-11T23:24:20.544Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:24:09.293269",
},
"x": 820,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:20:20.727Z",
"radius": 25.0,
},
{
"id": 158388,
"created_at": "2019-07-31T00:46:28.963Z",
"updated_at": "2019-08-11T23:41:40.968Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:41:29.377036",
},
"x": 850,
"y": 620,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:20:27.891Z",
"radius": 25.0,
},
{
"id": 157924,
"created_at": "2019-07-29T10:51:18.053Z",
"updated_at": "2019-08-11T23:41:52.531Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:41:47.163603",
},
"x": 870,
"y": 730,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:20:33.787Z",
"radius": 25.0,
},
{
"id": 157913,
"created_at": "2019-07-29T10:49:01.406Z",
"updated_at": "2019-08-11T23:42:01.570Z",
"device_id": 5520,
"name": "Arugula",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:42:00.120073",
},
"x": 870,
"y": 900,
"z": 0,
"openfarm_slug": "arugula",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:20:41.413Z",
"radius": 25.0,
},
{
"id": 157874,
"created_at": "2019-07-29T10:36:54.133Z",
"updated_at": "2019-08-11T23:42:27.607Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:42:23.606761",
},
"x": 880,
"y": 110,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:20:56.090Z",
"radius": 25.0,
},
{
"id": 157889,
"created_at": "2019-07-29T10:41:19.169Z",
"updated_at": "2019-08-11T23:42:55.385Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:42:37.453925",
},
"x": 880,
"y": 380,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:04.128Z",
"radius": 25.0,
},
{
"id": 158389,
"created_at": "2019-07-31T00:46:31.398Z",
"updated_at": "2019-08-11T23:43:02.864Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:43:02.274462",
},
"x": 880,
"y": 520,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:10.538Z",
"radius": 25.0,
},
{
"id": 157895,
"created_at": "2019-07-29T10:43:19.777Z",
"updated_at": "2019-08-11T23:43:10.522Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:43:08.175133",
},
"x": 940,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:18.161Z",
"radius": 25.0,
},
{
"id": 158302,
"created_at": "2019-07-29T22:56:07.166Z",
"updated_at": "2019-08-11T23:43:20.217Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:43:17.901248",
},
"x": 960,
"y": 600,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:24.703Z",
"radius": 25.0,
},
{
"id": 157919,
"created_at": "2019-07-29T10:50:15.091Z",
"updated_at": "2019-08-11T23:43:30.935Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:43:28.270291",
},
"x": 970,
"y": 790,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:31.688Z",
"radius": 25.0,
},
{
"id": 157880,
"created_at": "2019-07-29T10:37:06.111Z",
"updated_at": "2019-08-11T23:43:49.518Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:43:46.455966",
},
"x": 980,
"y": 280,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:42.543Z",
"radius": 25.0,
},
{
"id": 158293,
"created_at": "2019-07-29T22:55:05.668Z",
"updated_at": "2019-08-11T23:43:59.326Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:43:58.533894",
},
"x": 980,
"y": 510,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:50.319Z",
"radius": 25.0,
},
{
"id": 157930,
"created_at": "2019-07-29T10:54:08.914Z",
"updated_at": "2019-08-11T23:44:07.940Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:44:07.189389",
},
"x": 980,
"y": 690,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:21:57.034Z",
"radius": 25.0,
},
{
"id": 157894,
"created_at": "2019-07-29T10:43:14.369Z",
"updated_at": "2019-08-11T23:44:18.161Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:44:17.472200",
},
"x": 1020,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:22:04.708Z",
"radius": 25.0,
},
{
"id": 158301,
"created_at": "2019-07-29T22:56:02.069Z",
"updated_at": "2019-08-11T23:44:25.365Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:44:24.854660",
},
"x": 1030,
"y": 570,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:22:11.349Z",
"radius": 25.0,
},
{
"id": 157925,
"created_at": "2019-07-29T10:51:19.509Z",
"updated_at": "2019-08-11T23:44:37.385Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:44:32.785404",
},
"x": 1070,
"y": 730,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:22:17.832Z",
"radius": 25.0,
},
{
"id": 157914,
"created_at": "2019-07-29T10:49:03.187Z",
"updated_at": "2019-08-11T23:44:50.821Z",
"device_id": 5520,
"name": "Arugula",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:44:44.978458",
},
"x": 1070,
"y": 900,
"z": 0,
"openfarm_slug": "arugula",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:22:25.267Z",
"radius": 25.0,
},
{
"id": 157875,
"created_at": "2019-07-29T10:36:55.847Z",
"updated_at": "2019-08-11T23:45:15.049Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:45:14.360395",
},
"x": 1080,
"y": 110,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:22:42.793Z",
"radius": 25.0,
},
{
"id": 157890,
"created_at": "2019-07-29T10:41:20.941Z",
"updated_at": "2019-08-11T23:45:25.570Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:45:24.932770",
},
"x": 1080,
"y": 380,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:23:00.629Z",
"radius": 25.0,
},
{
"id": 158294,
"created_at": "2019-07-29T22:55:10.332Z",
"updated_at": "2019-08-11T23:45:33.500Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:45:32.223822",
},
"x": 1080,
"y": 510,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:23:09.488Z",
"radius": 25.0,
},
{
"id": 158303,
"created_at": "2019-07-29T22:56:13.059Z",
"updated_at": "2019-08-11T23:45:41.838Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:45:40.936701",
},
"x": 1080,
"y": 640,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:23:19.405Z",
"radius": 25.0,
},
{
"id": 158300,
"created_at": "2019-07-29T22:56:00.571Z",
"updated_at": "2019-08-11T23:45:47.503Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:45:46.838074",
},
"x": 1130,
"y": 570,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:23:29.996Z",
"radius": 25.0,
},
{
"id": 157905,
"created_at": "2019-07-29T10:47:18.363Z",
"updated_at": "2019-08-11T23:45:54.866Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:45:54.176231",
},
"x": 1140,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:23:38.721Z",
"radius": 25.0,
},
{
"id": 157931,
"created_at": "2019-07-29T10:54:10.383Z",
"updated_at": "2019-08-11T23:46:04.889Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:46:04.320081",
},
"x": 1170,
"y": 690,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:23:48.489Z",
"radius": 25.0,
},
{
"id": 157920,
"created_at": "2019-07-29T10:50:16.487Z",
"updated_at": "2019-08-11T23:46:11.323Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:46:10.764451",
},
"x": 1170,
"y": 790,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:23:55.964Z",
"radius": 25.0,
},
{
"id": 157881,
"created_at": "2019-07-29T10:37:07.845Z",
"updated_at": "2019-08-11T23:46:27.370Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:46:26.789071",
},
"x": 1180,
"y": 280,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:24:07.579Z",
"radius": 25.0,
},
{
"id": 158295,
"created_at": "2019-07-29T22:55:12.866Z",
"updated_at": "2019-08-11T23:46:37.193Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:46:36.391515",
},
"x": 1180,
"y": 510,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:24:15.809Z",
"radius": 25.0,
},
{
"id": 157906,
"created_at": "2019-07-29T10:47:19.292Z",
"updated_at": "2019-08-11T23:46:43.327Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {
"planted_at": "31-07-2019",
"last_watering_at": "2019-08-11 23:46:42.192686",
},
"x": 1220,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:24:27.984Z",
"radius": 25.0,
},
{
"id": 158299,
"created_at": "2019-07-29T22:55:57.898Z",
"updated_at": "2019-08-11T23:46:50.611Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:46:49.985861"},
"x": 1230,
"y": 570,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:27:10.319Z",
"radius": 25.0,
},
{
"id": 157926,
"created_at": "2019-07-29T10:51:21.285Z",
"updated_at": "2019-08-11T23:46:58.690Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:46:58.005311"},
"x": 1270,
"y": 730,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-07-31T00:37:18.229Z",
"radius": 25.0,
},
{
"id": 157915,
"created_at": "2019-07-29T10:49:05.664Z",
"updated_at": "2019-08-11T23:47:12.221Z",
"device_id": 5520,
"name": "Arugula",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:47:06.242892"},
"x": 1270,
"y": 900,
"z": 0,
"openfarm_slug": "arugula",
"plant_stage": "planted",
"planted_at": "2019-07-31T03:00:54.335Z",
"radius": 25.0,
},
{
"id": 157876,
"created_at": "2019-07-29T10:36:57.354Z",
"updated_at": "2019-08-11T23:47:35.432Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:47:34.248559"},
"x": 1280,
"y": 110,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-07-30T23:36:18.712Z",
"radius": 25.0,
},
{
"id": 157904,
"created_at": "2019-07-29T10:47:16.515Z",
"updated_at": "2019-08-11T23:47:46.757Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:47:45.320576"},
"x": 1280,
"y": 380,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:27:04.208Z",
"radius": 25.0,
},
{
"id": 158296,
"created_at": "2019-07-29T22:55:15.513Z",
"updated_at": "2019-08-11T23:47:54.703Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:47:53.446105"},
"x": 1280,
"y": 510,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:26:58.244Z",
"radius": 25.0,
},
{
"id": 158304,
"created_at": "2019-07-29T22:56:15.959Z",
"updated_at": "2019-08-11T23:48:03.347Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:48:01.366383"},
"x": 1280,
"y": 640,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:27:27.556Z",
"radius": 25.0,
},
{
"id": 158298,
"created_at": "2019-07-29T22:55:54.740Z",
"updated_at": "2019-08-11T23:48:10.152Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:48:09.353194"},
"x": 1330,
"y": 570,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:26:51.498Z",
"radius": 25.0,
},
{
"id": 157908,
"created_at": "2019-07-29T10:47:22.246Z",
"updated_at": "2019-08-11T23:48:17.918Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:48:16.813059"},
"x": 1340,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:26:45.844Z",
"radius": 25.0,
},
{
"id": 157932,
"created_at": "2019-07-29T10:54:12.378Z",
"updated_at": "2019-08-11T23:48:28.043Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:48:27.324033"},
"x": 1370,
"y": 690,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-07-31T00:37:16.821Z",
"radius": 25.0,
},
{
"id": 157921,
"created_at": "2019-07-29T10:50:18.007Z",
"updated_at": "2019-08-11T23:48:34.525Z",
"device_id": 5520,
"name": "Beets",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:48:33.962492"},
"x": 1370,
"y": 790,
"z": 0,
"openfarm_slug": "beets",
"plant_stage": "planted",
"planted_at": "2019-07-31T00:37:15.981Z",
"radius": 25.0,
},
{
"id": 157882,
"created_at": "2019-07-29T10:37:37.226Z",
"updated_at": "2019-08-11T23:48:50.913Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:48:50.020104"},
"x": 1380,
"y": 280,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-07-30T23:59:02.845Z",
"radius": 25.0,
},
{
"id": 158297,
"created_at": "2019-07-29T22:55:17.367Z",
"updated_at": "2019-08-11T23:49:00.663Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:48:59.923415"},
"x": 1380,
"y": 510,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planned",
"planted_at": None,
"radius": 25.0,
},
{
"id": 157909,
"created_at": "2019-07-29T10:47:23.057Z",
"updated_at": "2019-08-11T23:49:06.273Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:49:05.705351"},
"x": 1420,
"y": 440,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:26:27.053Z",
"radius": 25.0,
},
{
"id": 158305,
"created_at": "2019-07-29T22:58:10.330Z",
"updated_at": "2019-08-11T23:49:13.756Z",
"device_id": 5520,
"name": "Cherry Belle Radish",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:49:12.925629"},
"x": 1430,
"y": 570,
"z": 0,
"openfarm_slug": "cherry-belle-radish",
"plant_stage": "planted",
"planted_at": "2019-08-01T20:26:20.036Z",
"radius": 25.0,
},
{
"id": 158306,
"created_at": "2019-07-29T23:00:24.712Z",
"updated_at": "2019-08-11T23:49:25.964Z",
"device_id": 5520,
"name": "Arugula",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 23:49:25.090320"},
"x": 1470,
"y": 900,
"z": 0,
"openfarm_slug": "arugula",
"plant_stage": "planted",
"planted_at": "2019-07-31T03:00:55.008Z",
"radius": 25.0,
},
{
"id": 158307,
"created_at": "2019-07-29T23:00:44.093Z",
"updated_at": "2019-08-11T20:33:36.645Z",
"device_id": 5520,
"name": "Spinach",
"pointer_type": "Plant",
"meta": {"last_watering_at": "2019-08-11 20:33:36.044854"},
"x": 1480,
"y": 110,
"z": 0,
"openfarm_slug": "spinach",
"plant_stage": "planted",
"planted_at": "2019-07-30T23:59:01.902Z",
"radius": 25.0,
},
]
|
py | 1a377e11e958014b0fa92daed354701f254db566 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity=fuel_capacity,
brake_deadband=brake_deadband,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = None
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1)
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# You should only publish the control commands if dbw is enabled
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled, self.linear_vel, self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
# msg type: Bool
self.dbw_enabled = msg
def twist_cb(self, msg):
# msg type: TwistStamped
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
# msg type: TwistStamped
self.current_vel = msg.twist.linear.x
self.curr_ang_vel = msg.twist.angular.z
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
|
py | 1a377e61edb2eea036bf0b730e26b646b1043034 | #!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
import textwrap
import sys
SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js"
# Generates 2 files. Found by trial and error.
SHARD_SIZE = 97
PREAMBLE = """
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo --no-always-opt
// This test file was generated by tools/gen-inlining-tests.py .
// Global variables
var deopt = undefined; // either true or false
var counter = 0;
function resetState() {
counter = 0;
}
function warmUp(f) {
try {
f();
} catch (ex) {
// ok
}
try {
f();
} catch (ex) {
// ok
}
}
function resetOptAndAssertResultEquals(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
assertEquals(expected, f());
}
function resetOptAndAssertThrowsWith(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
try {
var result = f();
fail("resetOptAndAssertThrowsWith",
"exception: " + expected,
"result: " + result);
} catch (ex) {
assertEquals(expected, ex);
}
}
function increaseAndReturn15() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
function increaseAndThrow42() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
function increaseAndReturn15_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
function increaseAndThrow42_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
// Alternative 1
function returnOrThrow(doReturn) {
if (doReturn) {
return increaseAndReturn15();
} else {
return increaseAndThrow42();
}
}
// Alternative 2
function increaseAndReturn15_calls_noopt() {
return increaseAndReturn15_noopt_inner();
}
function increaseAndThrow42_calls_noopt() {
return increaseAndThrow42_noopt_inner();
}
// Alternative 3.
// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
// as the other one.
function invertFunctionCall(f) {
var result;
try {
result = f();
} catch (ex) {
return ex - 27;
}
throw result + 27;
}
// Alternative 4: constructor
function increaseAndStore15Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 15;
}
function increaseAndThrow42Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 42;
throw this.x;
}
// Alternative 5: property
var magic = {};
Object.defineProperty(magic, 'prop', {
get: function () {
if (deopt) %DeoptimizeFunction(f);
return 15 + 0 * ++counter;
},
set: function(x) {
// argument should be 37
if (deopt) %DeoptimizeFunction(f);
counter -= 36 - x; // increments counter
throw 42;
}
})
// Generate type feedback.
assertEquals(15, increaseAndReturn15_calls_noopt());
assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
assertEquals(15, (new increaseAndStore15Constructor()).x);
assertThrowsEquals(function() {
return (new increaseAndThrow42Constructor()).x;
},
42);
function runThisShard() {
""".strip()
def booltuples(n):
"""booltuples(2) yields 4 tuples: (False, False), (False, True),
(True, False), (True, True)."""
assert isinstance(n, int)
if n <= 0:
yield ()
else:
for initial in booltuples(n-1):
yield initial + (False,)
yield initial + (True,)
def fnname(flags):
assert len(FLAGLETTERS) == len(flags)
return "f_" + ''.join(
FLAGLETTERS[i] if b else '_'
for (i, b) in enumerate(flags))
NUM_TESTS_PRINTED = 0
NUM_TESTS_IN_SHARD = 0
def printtest(flags):
"""Print a test case. Takes a couple of boolean flags, on which the
printed Javascript code depends."""
assert all(isinstance(flag, bool) for flag in flags)
# The alternative flags are in reverse order so that if we take all possible
# tuples, ordered lexicographically from false to true, we get first the
# default, then alternative 1, then 2, etc.
(
alternativeFn5, # use alternative #5 for returning/throwing:
# return/throw using property
alternativeFn4, # use alternative #4 for returning/throwing:
# return/throw using constructor
alternativeFn3, # use alternative #3 for returning/throwing:
# return/throw indirectly, based on function argument
alternativeFn2, # use alternative #2 for returning/throwing:
# return/throw indirectly in unoptimized code,
# no branching
alternativeFn1, # use alternative #1 for returning/throwing:
# return/throw indirectly, based on boolean arg
tryThrows, # in try block, call throwing function
tryReturns, # in try block, call returning function
tryFirstReturns, # in try block, returning goes before throwing
tryResultToLocal, # in try block, result goes to local variable
doCatch, # include catch block
catchReturns, # in catch block, return
catchWithLocal, # in catch block, modify or return the local variable
catchThrows, # in catch block, throw
doFinally, # include finally block
finallyReturns, # in finally block, return local variable
finallyThrows, # in finally block, throw
endReturnLocal, # at very end, return variable local
deopt, # deopt inside inlined function
) = flags
# BASIC RULES
# Only one alternative can be applied at any time.
if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4
+ alternativeFn5 > 1):
return
# In try, return or throw, or both.
if not (tryReturns or tryThrows): return
# Either doCatch or doFinally.
if not doCatch and not doFinally: return
# Catch flags only make sense when catching
if not doCatch and (catchReturns or catchWithLocal or catchThrows):
return
# Finally flags only make sense when finallying
if not doFinally and (finallyReturns or finallyThrows):
return
# tryFirstReturns is only relevant when both tryReturns and tryThrows are
# true.
if tryFirstReturns and not (tryReturns and tryThrows): return
# From the try and finally block, we can return or throw, but not both.
if catchReturns and catchThrows: return
if finallyReturns and finallyThrows: return
# If at the end we return the local, we need to have touched it.
if endReturnLocal and not (tryResultToLocal or catchWithLocal): return
# PRUNING
anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3,
alternativeFn4, alternativeFn5])
specificAlternative = any([alternativeFn2, alternativeFn3])
rareAlternative = not specificAlternative
# If try returns and throws, then don't catchWithLocal, endReturnLocal, or
# deopt, or do any alternative.
if (tryReturns and tryThrows and
(catchWithLocal or endReturnLocal or deopt or anyAlternative)):
return
# We don't do any alternative if we do a finally.
if doFinally and anyAlternative: return
# We only use the local variable if we do alternative #2 or #3.
if ((tryResultToLocal or catchWithLocal or endReturnLocal) and
not specificAlternative):
return
# We don't need to test deopting into a finally.
if doFinally and deopt: return
# We're only interested in alternative #2 if we have endReturnLocal, no
# catchReturns, and no catchThrows, and deopt.
if (alternativeFn2 and
(not endReturnLocal or catchReturns or catchThrows or not deopt)):
return
# Flag check succeeded.
trueFlagNames = [name for (name, value) in flags._asdict().items() if value]
flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames))
write(textwrap.fill(flagsMsgLine, subsequent_indent=' // '))
write("")
if not anyAlternative:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15()',
'increaseAndThrow42': 'increaseAndThrow42()',
}
elif alternativeFn1:
fragments = {
'increaseAndReturn15': 'returnOrThrow(true)',
'increaseAndThrow42': 'returnOrThrow(false)',
}
elif alternativeFn2:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()',
'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()',
}
elif alternativeFn3:
fragments = {
'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)',
'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)',
}
elif alternativeFn4:
fragments = {
'increaseAndReturn15': '(new increaseAndStore15Constructor()).x',
'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x',
}
else:
assert alternativeFn5
fragments = {
'increaseAndReturn15': 'magic.prop /* returns 15 */',
'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)',
}
# As we print code, we also maintain what the result should be. Variable
# {result} can be one of three things:
#
# - None, indicating returning JS null
# - ("return", n) with n an integer
# - ("throw", n), with n an integer
result = None
# We also maintain what the counter should be at the end.
# The counter is reset just before f is called.
counter = 0
write( " f = function {} () {{".format(fnname(flags)))
write( " var local = 888;")
write( " deopt = {};".format("true" if deopt else "false"))
local = 888
write( " try {")
write( " counter++;")
counter += 1
resultTo = "local +=" if tryResultToLocal else "return"
if tryReturns and not (tryThrows and not tryFirstReturns):
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
if tryThrows:
write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments))
if result == None:
counter += 1
result = ("throw", 42)
if tryReturns and tryThrows and not tryFirstReturns:
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
write( " counter++;")
if result == None:
counter += 1
if doCatch:
write( " } catch (ex) {")
write( " counter++;")
if isinstance(result, tuple) and result[0] == 'throw':
counter += 1
if catchThrows:
write(" throw 2 + ex;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('throw', 2 + result[1])
elif catchReturns and catchWithLocal:
write(" return 2 + local;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + local)
elif catchReturns and not catchWithLocal:
write(" return 2 + ex;");
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + result[1])
elif catchWithLocal:
write(" local += ex;");
if isinstance(result, tuple) and result[0] == "throw":
local += result[1]
result = None
counter += 1
else:
if isinstance(result, tuple) and result[0] == "throw":
result = None
counter += 1
write( " counter++;")
if doFinally:
write( " } finally {")
write( " counter++;")
counter += 1
if finallyThrows:
write(" throw 25;")
result = ('throw', 25)
elif finallyReturns:
write(" return 3 + local;")
result = ('return', 3 + local)
elif not finallyReturns and not finallyThrows:
write(" local += 2;")
local += 2
counter += 1
else: assert False # unreachable
write( " counter++;")
write( " }")
write( " counter++;")
if result == None:
counter += 1
if endReturnLocal:
write( " return 5 + local;")
if result == None:
result = ('return', 5 + local)
write( " }")
if result == None:
write( " resetOptAndAssertResultEquals(undefined, f);")
else:
tag, value = result
if tag == "return":
write( " resetOptAndAssertResultEquals({}, f);".format(value))
else:
assert tag == "throw"
write( " resetOptAndAssertThrowsWith({}, f);".format(value))
write( " assertEquals({}, counter);".format(counter))
write( "")
global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD
NUM_TESTS_PRINTED += 1
NUM_TESTS_IN_SHARD += 1
FILE = None # to be initialised to an open file
SHARD_NUM = 1
def write(*args):
return print(*args, file=FILE)
def rotateshard():
global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE
if MODE != 'shard':
return
if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE:
return
if FILE != None:
finishshard()
assert FILE == None
FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w')
write_shard_header()
NUM_TESTS_IN_SHARD = 0
def finishshard():
global FILE, SHARD_NUM, MODE
assert FILE
write_shard_footer()
if MODE == 'shard':
print("Wrote shard {}.".format(SHARD_NUM))
FILE.close()
FILE = None
SHARD_NUM += 1
def write_shard_header():
if MODE == 'shard':
write("// Shard {}.".format(SHARD_NUM))
write("")
write(PREAMBLE)
write("")
def write_shard_footer():
write("}")
write("%NeverOptimizeFunction(runThisShard);")
write("")
write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD))
write("// {} tests up to here.".format(NUM_TESTS_PRINTED))
write("")
write("runThisShard();")
FLAGLETTERS="54321trflcrltfrtld"
flagtuple = namedtuple('flagtuple', (
"alternativeFn5",
"alternativeFn4",
"alternativeFn3",
"alternativeFn2",
"alternativeFn1",
"tryThrows",
"tryReturns",
"tryFirstReturns",
"tryResultToLocal",
"doCatch",
"catchReturns",
"catchWithLocal",
"catchThrows",
"doFinally",
"finallyReturns",
"finallyThrows",
"endReturnLocal",
"deopt"
))
emptyflags = flagtuple(*((False,) * len(flagtuple._fields)))
f1 = emptyflags._replace(tryReturns=True, doCatch=True)
# You can test function printtest with f1.
allFlagCombinations = [
flagtuple(*bools)
for bools in booltuples(len(flagtuple._fields))
]
if __name__ == '__main__':
global MODE
if sys.argv[1:] == []:
MODE = 'stdout'
print("// Printing all shards together to stdout.")
print("")
write_shard_header()
FILE = sys.stdout
elif sys.argv[1:] == ['--shard-and-overwrite']:
MODE = 'shard'
else:
print("Usage:")
print("")
print(" python {}".format(sys.argv[0]))
print(" print all tests to standard output")
print(" python {} --shard-and-overwrite".format(sys.argv[0]))
print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE))
print("")
print(sys.argv[1:])
print("")
sys.exit(1)
rotateshard()
for flags in allFlagCombinations:
printtest(flags)
rotateshard()
finishshard()
if MODE == 'shard':
print("Total: {} tests.".format(NUM_TESTS_PRINTED))
|
py | 1a377f54af92da1bc9425b8d7f61130de0f1acc4 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lint changed code in current branch."""
from __future__ import print_function
import os
import sys
import yaml
from local.butler import appengine
from local.butler import common
_GOLINT_EXCEPTIONS = [
'types.go' # Not all model names conform to Go naming conventions.
]
_LICENSE_CHECK_FILENAMES = ['Dockerfile']
_LICENSE_CHECK_EXTENSIONS = [
'.bash',
'.c',
'.cc',
'.cpp',
'.css',
'.h',
'.htm',
'.html',
'.js',
'.go',
'.proto',
'.ps1',
'.py',
'.sh',
'.yaml',
]
_LICENSE_CHECK_IGNORE_FILENAMES = ['technology.css']
_LICENSE_CHECK_IGNORE_DIRECTORIES = [
'third_party',
'templates', # Generated code.
]
_LICENSE_CHECK_STRING = 'http://www.apache.org/licenses/LICENSE-2.0'
_PY_TEST_SUFFIX = '_test.py'
_PY_INIT_FILENAME = '__init__.py'
_YAML_EXCEPTIONS = ['bad.yaml']
_error_occurred = False
def _error(message=None):
"""Print error and track state via a global."""
if message:
print(message)
global _error_occurred
_error_occurred = True
def _execute_command_and_track_error(command):
"""Executes command, tracks error state."""
returncode, output = common.execute(command, exit_on_error=False)
if returncode != 0:
_error()
return output
def license_validate(file_path):
"""Run license header validation."""
filename = os.path.basename(file_path)
extension = os.path.splitext(file_path)[1]
if (filename not in _LICENSE_CHECK_FILENAMES and
extension not in _LICENSE_CHECK_EXTENSIONS):
return
path_directories = file_path.split(os.sep)
if any(d in _LICENSE_CHECK_IGNORE_DIRECTORIES for d in path_directories):
return
source_filename = os.path.basename(file_path)
if source_filename in _LICENSE_CHECK_IGNORE_FILENAMES:
return
with open(file_path) as f:
if _LICENSE_CHECK_STRING in f.read():
return
_error('Failed: Missing license header for %s.' % file_path)
def py_import_order(file_path):
"""Validate that python imports are alphabetized."""
def _validate_block(import_block):
"""Ensure that a single block is ordered properly."""
if not import_block:
return []
sorted_import_block = sorted(import_block, key=lambda i: i.lower())
if sorted_import_block == import_block:
return []
return ['\n'.join(sorted_import_block)]
with open(file_path) as f:
file_content = f.read()
imports = []
from_imports = []
corrected_import_blocks = []
for line in file_content.splitlines():
if line.startswith('import '):
imports.append(line)
else:
corrected_import_blocks += _validate_block(imports)
imports = []
if line.startswith('from '):
from_imports.append(line)
else:
corrected_import_blocks += _validate_block(from_imports)
from_imports = []
# Though rare, if a file ends with an import we must still validate them.
corrected_import_blocks += _validate_block(imports)
corrected_import_blocks += _validate_block(from_imports)
if not corrected_import_blocks:
return
suggestions = '\n\n--------\n\n'.join(corrected_import_blocks)
_error(('Failed: File {filename} has non-alphabetized import blocks. '
'Suggested order:\n\n{suggestions}').format(
filename=file_path, suggestions=suggestions))
def py_test_init_check(file_path):
"""Check test directory has a __init__.py file. Otherwise, the test does not
execute at all."""
if not file_path.endswith(_PY_TEST_SUFFIX):
return
test_directory = os.path.dirname(file_path)
if _PY_INIT_FILENAME not in os.listdir(test_directory):
_error('Failed: Missing {filename} file in test directory {dir}.'.format(
filename=_PY_INIT_FILENAME, dir=test_directory))
def yaml_validate(file_path):
"""Run yaml validation."""
if os.path.basename(file_path) in _YAML_EXCEPTIONS:
return
try:
with open(file_path) as f:
yaml.safe_load(f.read())
except Exception as e:
_error('Failed: Invalid yaml file %s.\n\n%s' % (file_path, e))
def execute(_):
"""Lint changed code."""
pythonpath = os.getenv('PYTHONPATH', '')
os.environ['PYTHONPATH'] = appengine.find_sdk_path() + ':' + pythonpath
if 'GOOGLE_CLOUDBUILD' in os.environ:
# Explicitly compare against master if we're running on the CI
_, output = common.execute('git diff --name-only master FETCH_HEAD')
elif 'TRAVIS_BRANCH' in os.environ:
_, output = common.execute(
'git diff --name-only HEAD $(git merge-base HEAD FETCH_HEAD)')
else:
_, output = common.execute('git diff --name-only FETCH_HEAD')
file_paths = [f for f in output.splitlines() if os.path.exists(f)]
py_changed_file_paths = [
f for f in file_paths if f.endswith('.py') and
# Exclude auto-generated files.
not f.endswith('_pb2.py') and not f.endswith('_pb2_grpc.py')
]
go_changed_file_paths = [f for f in file_paths if f.endswith('.go')]
yaml_changed_file_paths = [f for f in file_paths if f.endswith('.yaml')]
for file_path in py_changed_file_paths:
_execute_command_and_track_error('pylint ' + file_path)
_execute_command_and_track_error('yapf -d ' + file_path)
py_import_order(file_path)
py_test_init_check(file_path)
golint_path = os.path.join('local', 'bin', 'golint')
for file_path in go_changed_file_paths:
if not os.path.basename(file_path) in _GOLINT_EXCEPTIONS:
_execute_command_and_track_error(golint_path + ' ' + file_path)
output = _execute_command_and_track_error('gofmt -d ' + file_path)
if output.strip():
_error()
for file_path in yaml_changed_file_paths:
yaml_validate(file_path)
for file_path in file_paths:
license_validate(file_path)
if _error_occurred:
print('Linting failed, see errors above.')
sys.exit(1)
else:
print('Linting passed.')
|
py | 1a377f820f751a924cc01dbfc18f27d6543b5fbb | #!/usr/bin/env python
# encoding=utf-8
"""
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
from .storage_register import storage
from . import db
from . import file
from .path_exporter import PathExporter
__all__ = ['PathExporter', 'storage']
|
py | 1a377fbfab4126f97a73f23585b2d151f7560fe8 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The restores api."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from webob import exc
from karbor.api import common
from karbor.api.openstack import wsgi
from karbor.api.schemas import restores as restore_schema
from karbor.api import validation
from karbor.common import constants
from karbor.common import notification
from karbor.common.notification import StartNotification
from karbor import exception
from karbor.i18n import _
from karbor import objects
from karbor.objects import base as objects_base
from karbor.policies import restores as restore_policy
from karbor.services.protection import api as protection_api
from karbor import utils
import six
query_restore_filters_opt = cfg.ListOpt(
'query_restore_filters',
default=['status'],
help="Restore filter options which "
"non-admin user could use to "
"query restores. Default values "
"are: ['status']")
CONF = cfg.CONF
CONF.register_opt(query_restore_filters_opt)
LOG = logging.getLogger(__name__)
class RestoreViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "restores"
def detail(self, request, restore):
"""Detailed view of a single restore."""
restore_ref = {
'restore': {
'id': restore.get('id'),
'project_id': restore.get('project_id'),
'provider_id': restore.get('provider_id'),
'checkpoint_id': restore.get('checkpoint_id'),
'restore_target': restore.get('restore_target'),
'parameters': restore.get('parameters'),
'status': restore.get('status'),
'resources_status': restore.get('resources_status'),
'resources_reason': restore.get('resources_reason'),
}
}
return restore_ref
def detail_list(self, request, restores, restore_count=None):
"""Detailed view of a list of restores."""
return self._list_view(self.detail, request, restores,
restore_count,
self._collection_name)
def _list_view(self, func, request, restores, restore_count,
coll_name=_collection_name):
"""Provide a view for a list of restores.
:param func: Function used to format the restore data
:param request: API request
:param restores: List of restores in dictionary format
:param restore_count: Length of the original list of restores
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: restore data in dictionary format
"""
restores_list = [func(request, restore)['restore']
for restore in restores]
restores_links = self._get_collection_links(request,
restores,
coll_name,
restore_count)
restores_dict = {
'restores': restores_list
}
if restores_links:
restores_dict['restores_links'] = restores_links
return restores_dict
class RestoresController(wsgi.Controller):
"""The Restores API controller for the OpenStack API."""
_view_builder_class = RestoreViewBuilder
def __init__(self):
self.protection_api = protection_api.API()
super(RestoresController, self).__init__()
def show(self, req, id):
"""Return data about the given restore."""
context = req.environ['karbor.context']
LOG.info("Show restore with id: %s", id, context=context)
if not uuidutils.is_uuid_like(id):
msg = _("Invalid restore id provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
restore = self._restore_get(context, id)
except exception.RestoreNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
LOG.info("Show restore request issued successfully.",
resource={'id': restore.id})
return self._view_builder.detail(req, restore)
def index(self, req):
"""Returns a list of restores, transformed through view builder."""
context = req.environ['karbor.context']
LOG.info("Show restore list", context=context)
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
utils.remove_invalid_filter_options(
context,
filters,
self._get_restore_filter_options())
utils.check_filters(filters)
restores = self._get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
retval_restores = self._view_builder.detail_list(req, restores)
LOG.info("Show restore list request issued successfully.")
return retval_restores
def _get_all(self, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
context.can(restore_policy.GET_ALL_POLICY)
if filters is None:
filters = {}
all_tenants = utils.get_bool_param('all_tenants', filters)
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and all_tenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
restores = objects.RestoreList.get_all(
context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
else:
restores = objects.RestoreList.get_all_by_project(
context, context.project_id, marker, limit,
sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters,
offset=offset)
LOG.info("Get all restores completed successfully.")
return restores
def _get_restore_filter_options(self):
"""Return restores search options allowed by non-admin."""
return CONF.query_restore_filters
@validation.schema(restore_schema.create)
def create(self, req, body):
"""Creates a new restore."""
LOG.debug('Create restore request body: %s', body)
context = req.environ['karbor.context']
context.can(restore_policy.CREATE_POLICY)
context.notification = notification.KarborRestoreCreate(
context, request=req)
restore = body['restore']
LOG.debug('Create restore request : %s', restore)
parameters = restore.get("parameters")
restore_auth = restore.get("restore_auth", None)
restore_properties = {
'project_id': context.project_id,
'provider_id': restore.get('provider_id'),
'checkpoint_id': restore.get('checkpoint_id'),
'restore_target': restore.get('restore_target'),
'parameters': parameters,
'status': constants.RESTORE_STATUS_IN_PROGRESS,
}
restoreobj = objects.Restore(context=context,
**restore_properties)
restoreobj.create()
LOG.debug('call restore RPC : restoreobj:%s', restoreobj)
# call restore rpc API of protection service
try:
with StartNotification(context, parameters=parameters):
self.protection_api.restore(context, restoreobj, restore_auth)
except exception.AccessCheckpointNotAllowed as error:
raise exc.HTTPForbidden(explanation=error.msg)
except Exception:
# update the status of restore
update_dict = {
"status": constants.RESTORE_STATUS_FAILURE
}
context.can(restore_policy.UPDATE_POLICY, restoreobj)
restoreobj = self._restore_update(context,
restoreobj.get("id"),
update_dict)
retval = self._view_builder.detail(req, restoreobj)
return retval
def _restore_get(self, context, restore_id):
if not uuidutils.is_uuid_like(restore_id):
msg = _("Invalid restore id provided.")
raise exc.HTTPBadRequest(explanation=msg)
restore = objects.Restore.get_by_id(context, restore_id)
try:
context.can(restore_policy.GET_POLICY, restore)
except exception.PolicyNotAuthorized:
# raise RestoreNotFound instead to make sure karbor behaves
# as it used to
raise exception.RestoreNotFound(restore_id=restore_id)
LOG.info("Restore info retrieved successfully.")
return restore
def _restore_update(self, context, restore_id, fields):
try:
restore = self._restore_get(context, restore_id)
except exception.RestoreNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
if isinstance(restore, objects_base.KarborObject):
restore.update(fields)
restore.save()
LOG.info("restore updated successfully.")
return restore
else:
msg = _("The parameter restore must be a object of "
"KarborObject class.")
raise exception.InvalidInput(reason=msg)
def create_resource():
return wsgi.Resource(RestoresController())
|
py | 1a3780bec0d719498045687d286969d78ae744b5 | from dltranz.data_load import augmentation_chain
def build_augmentations(conf):
def _chain():
from .all_time_shuffle import AllTimeShuffle
from .dropout_trx import DropoutTrx
from .random_slice import RandomSlice
from .seq_len_limit import SeqLenLimit
from .drop_day import DropDay
for cls_name, params in conf:
cls_f = locals().get(cls_name)
if cls_f is None:
raise AttributeError(f'Can not find augmentation for "{cls_name}"')
yield cls_f(**params)
return augmentation_chain(*_chain())
|
py | 1a3781170cb72cc23d6c4e3d4ce85ec704e3e45b | ''' Header reading / writing functions for nifti1 image format
Author: Matthew Brett
'''
import numpy as np
import numpy.linalg as npl
from nifti.volumeutils import Recoder, make_dt_codes, \
HeaderDataError, HeaderTypeError, allopen
from nifti.batteryrunners import Report
from nifti.quaternions import fillpositive, quat2mat, mat2quat
from nifti import analyze # module import
from nifti.spm99analyze import SpmAnalyzeHeader
from nifti import filetuples # module import
from nifti.spatialimages import SpatialImage
from nifti.header_ufuncs import write_data, adapt_header
# nifti1 flat header definition for Analyze-like first 348 bytes
# first number in comments indicates offset in file header in bytes
header_dtd = [
('sizeof_hdr', 'i4'), # 0; must be 348
('data_type', 'S10'), # 4; unused
('db_name', 'S18'), # 14; unused
('extents', 'i4'), # 32; unused
('session_error', 'i2'), # 36; unused
('regular', 'S1'), # 38; unused
('dim_info', 'u1'), # 39; MRI slice ordering code
('dim', 'i2', 8), # 40; data array dimensions
('intent_p1', 'f4'), # 56; first intent parameter
('intent_p2', 'f4'), # 60; second intent parameter
('intent_p3', 'f4'), # 64; third intent parameter
('intent_code', 'i2'),# 68; NIFTI intent code
('datatype', 'i2'), # 70; it's the datatype
('bitpix', 'i2'), # 72; number of bits per voxel
('slice_start', 'i2'),# 74; first slice index
('pixdim', 'f4', 8), # 76; grid spacings (units below)
('vox_offset', 'f4'), # 108; offset to data in image file
('scl_slope', 'f4'), # 112; data scaling slope
('scl_inter', 'f4'), # 116; data scaling intercept
('slice_end', 'i2'), # 120; last slice index
('slice_code', 'u1'), # 122; slice timing order
('xyzt_units', 'u1'), # 123; inits of pixdim[1..4]
('cal_max', 'f4'), # 124; max display intensity
('cal_min', 'f4'), # 128; min display intensity
('slice_duration', 'f4'), # 132; time for 1 slice
('toffset', 'f4'), # 136; time axis shift
('glmax', 'i4'), # 140; unused
('glmin', 'i4'), # 144; unused
('descrip', 'S80'), # 148; any text
('aux_file', 'S24'), # 228; auxiliary filename
('qform_code', 'i2'), # 252; xform code
('sform_code', 'i2'), # 254; xform code
('quatern_b', 'f4'), # 256; quaternion b param
('quatern_c', 'f4'), # 260; quaternion c param
('quatern_d', 'f4'), # 264; quaternion d param
('qoffset_x', 'f4'), # 268; quaternion x shift
('qoffset_y', 'f4'), # 272; quaternion y shift
('qoffset_z', 'f4'), # 276; quaternion z shift
('srow_x', 'f4', 4), # 280; 1st row affine transform
('srow_y', 'f4', 4), # 296; 2nd row affine transform
('srow_z', 'f4', 4), # 312; 3rd row affine transform
('intent_name', 'S16'), # 328; name or meaning of data
('magic', 'S4') # 344; must be 'ni1\0' or 'n+1\0'
]
# Full header numpy dtype
header_dtype = np.dtype(header_dtd)
# datatypes not in analyze format, with codes
try:
_float128t = np.float128
except AttributeError:
_float128t = np.void
try:
_complex256t = np.complex256
except AttributeError:
_complex256t = np.void
_added_dtdefs = ( # code, label, dtype definition
(256, 'int8', np.int8),
(512, 'uint16', np.uint16),
(768, 'uint32', np.uint32),
(1024,'int64', np.int64),
(1280, 'int64', np.uint64),
(1536, 'float128', _float128t), # Only numpy defined on 64 bit
(1792, 'complex128', np.complex128),
(2048, 'complex256', _complex256t), # 64 bit again
(2304, 'RGBA', np.dtype([('R','u1'),
('G', 'u1'),
('B', 'u1'),
('A', 'u1')]))
)
# Make full code alias bank, including dtype column
data_type_codes = make_dt_codes(analyze._dtdefs + _added_dtdefs)
# Transform (qform, sform) codes
xform_codes = Recoder(( # code, label
(0, 'unknown'), # Code for transform unknown or absent
(1, 'scanner'),
(2, 'aligned'),
(3, 'talairach'),
(4, 'mni')), fields=('code', 'label'))
# unit codes
unit_codes = Recoder(( # code, label
(0, 'unknown'),
(1, 'meter'),
(2, 'mm'),
(3, 'micron'),
(8, 'sec'),
(16, 'msec'),
(24, 'usec'),
(32, 'hz'),
(40, 'ppm'),
(48, 'rads')), fields=('code', 'label'))
slice_order_codes = Recoder(( # code, label
(0, 'unknown'),
(1, 'sequential increasing', 'seq inc'),
(2, 'sequential decreasing', 'seq dec'),
(3, 'alternating increasing', 'alt inc'),
(4, 'alternating decreasing', 'alt dec'),
(5, 'alternating increasing 2', 'alt inc 2'),
(6, 'alternating decreasing 2', 'alt dec 2')),
fields=('code', 'label'))
intent_codes = Recoder((
# code, label, parameters description tuple
(0, 'none', ()),
(2, 'correlation',('p1 = DOF',)),
(3, 't test', ('p1 = DOF',)),
(4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF')),
(5, 'z score', ()),
(6, 'chi2', ('p1 = DOF',)),
(7, 'beta', ('p1=a', 'p2=b')), # two parameter beta distribution
(8, 'binomial', ('p1 = number of trials', 'p2 = probability per trial')),
# Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1
(9, 'gamma', ('p1 = shape, p2 = scale', 2)), # 2 parameter gamma
(10, 'poisson', ('p1 = mean',)), # Density(x) proportional to x^(p1-1) * exp(-p2*x)
(11, 'normal', ('p1 = mean', 'p2 = standard deviation',)),
(12, 'non central f test', ('p1 = numerator DOF',
'p2 = denominator DOF',
'p3 = numerator noncentrality parameter',)),
(13, 'non central chi2', ('p1 = DOF', 'p2 = noncentrality parameter',)),
(14, 'logistic', ('p1 = location', 'p2 = scale',)),
(15, 'laplace', ('p1 = location', 'p2 = scale')),
(16, 'uniform', ('p1 = lower end', 'p2 = upper end')),
(17, 'non central t test', ('p1 = DOF', 'p2 = noncentrality parameter')),
(18, 'weibull', ('p1 = location', 'p2 = scale, p3 = power')),
(19, 'chi', ('p1 = DOF',)),
# p1 = 1 = 'half normal' distribution
# p1 = 2 = Rayleigh distribution
# p1 = 3 = Maxwell-Boltzmann distribution. */
(20, 'inverse gaussian', ('pi = mu', 'p2 = lambda')),
(21, 'extreme value 1', ('p1 = location', 'p2 = scale')),
(22, 'p value', ()),
(23, 'log p value', ()),
(24, 'log10 p value', ()),
(1001, 'estimate', ()),
(1002, 'label', ()),
(1003, 'neuroname', ()),
(1004, 'general matrix', ('p1 = M', 'p2 = N')),
(1005, 'symmetric matrix', ('p1 = M',)),
(1006, 'displacement vector', ()),
(1007, 'vector', ()),
(1008, 'poinset', ()),
(1009, 'triangle', ()),
(1010, 'quaternion', ()),
(1011, 'dimensionless', ()),
(2001, 'time series', ()),
(2002, 'node index', ()),
(2003, 'rgb vector', ()),
(2004, 'rgba vector', ()),
(2005, 'shape', ())),
fields=('code', 'label', 'parameters'))
class Nifti1Extension(object):
"""Baseclass for NIfTI1 header extensions.
This class is sufficient to handle very simple text-based extensions, such
as `comment`. More sophisticated extensions should/will be supported by
dedicated subclasses.
"""
def __init__(self, code, content):
"""
Parameters
----------
code : int|str
Canonical extension code as defined in the NIfTI standard, given
either as integer or corresponding label
(see :data:`~nifti.nifti1.extension_codes`)
content : str
Extension content as read from the NIfTI file header. This content is
converted into a runtime representation.
"""
try:
self._code = extension_codes.code[code]
except KeyError:
# XXX or fail or at least complain?
self._code = code
self._content = self._unmangle(content)
def _unmangle(self, value):
"""Convert the extension content into its runtime representation.
The default implementation does nothing at all.
Parameters
----------
value : str
Extension content as read from file.
Returns
-------
The same object that was passed as `value`.
Notes
-----
Subclasses should reimplement this method to provide the desired
unmangling procedure and may return any type of object.
"""
return value
def _mangle(self, value):
"""Convert the extension content into NIfTI file header representation.
The default implementation does nothing at all.
Parameters
----------
value : str
Extension content in runtime form.
Returns
-------
str
Notes
-----
Subclasses should reimplement this method to provide the desired
mangling procedure.
"""
return value
def get_code(self):
"""Return the canonical extension type code."""
return self._code
def get_content(self):
"""Return the extension content in its runtime representation."""
return self._content
def get_sizeondisk(self):
"""Return the size of the extension in the NIfTI file.
"""
# need raw value size plus 8 bytes for esize and ecode
size = len(self._mangle(self._content))
size += 8
# extensions size has to be a multiple of 16 bytes
size += 16 - (size % 16)
return size
def __repr__(self):
try:
code = extension_codes.label[self._code]
except KeyError:
# deal with unknown codes
code = self._code
s = "Nifti1Extension('%s', '%s')" % (code, self._content)
return s
def __eq__(self, other):
if self._code != other._code \
or self._content != other._content:
return False
else:
return True
def write_to(self, fileobj):
''' Write header extensions to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
'''
extstart = fileobj.tell()
rawsize = self.get_sizeondisk()
# write esize and ecode first
fileobj.write(np.array((rawsize, self._code),
dtype=np.int32).tostring())
# followed by the actual extension content
# XXX if mangling upon load is implemented, it should be reverted here
fileobj.write(self._mangle(self._content))
# be nice and zero out remaining part of the extension till the
# next 16 byte border
fileobj.write('\x00' * (extstart + rawsize - fileobj.tell()))
# NIfTI header extension type codes (ECODE)
# see nifti1_io.h for a complete list of all known extensions and
# references to their description or contacts of the respective
# initiators
extension_codes = Recoder((
(0, "ignore", Nifti1Extension),
(2, "dicom", Nifti1Extension),
(4, "afni", Nifti1Extension),
(6, "comment", Nifti1Extension),
(8, "xcede", Nifti1Extension),
(10, "jimdiminfo", Nifti1Extension),
(12, "workflow_fwds", Nifti1Extension),
(14, "freesurfer", Nifti1Extension),
(16, "pypickle", Nifti1Extension)
),
fields=('code', 'label', 'handler'))
class Nifti1Extensions(list):
"""Simple extension collection, implemented as a list-subclass.
"""
def count(self, ecode):
"""Returns the number of extensions matching a given *ecode*.
Parameter
---------
code : int | str
The ecode can be specified either literal or as numerical value.
"""
count = 0
code = extension_codes.code[ecode]
for e in self:
if e.get_code() == code:
count += 1
return count
def get_codes(self):
"""Return a list of the extension code of all available extensions"""
return [e.get_code() for e in self]
def get_sizeondisk(self):
"""Return the size of the complete header extensions in the NIfTI file.
"""
# add four bytes for the NIfTI extension flag!
return np.sum([e.get_sizeondisk() for e in self]) + 4
def __repr__(self):
s = "Nifti1Extensions(%s)" \
% ', '.join([str(e) for e in self])
return s
def __eq__(self, other):
for i, e in enumerate(self):
if not e == other[i]:
return False
return True
def write_to(self, fileobj):
''' Write header extensions to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
'''
# not extensions -> nothing to do
if not len(self):
return
# since we have extensions write the appropriate flag
fileobj.write(np.array((1,0,0,0), dtype=np.int8).tostring())
# and now each extension
for e in self:
e.write_to(fileobj)
@classmethod
def from_fileobj(klass, fileobj, size):
'''Read header extensions from a fileobj
Parameters
----------
fileobj : file-like object
It is assumed to be positions right after the NIfTI magic field.
size : int
Number of bytes to read. If negative, fileobj will be read till its
end.
Returns
-------
An extension list. This list might be empty in case not extensions
were present in fileobj.
'''
# make empty extension list
extensions = klass()
# assume the fileptr is just after header (magic field)
# try reading the next 4 bytes after the initial header
extension_status = fileobj.read(4)
if not len(extension_status):
# if there is nothing the NIfTI standard requires to assume zeros
extension_status = np.zeros((4,), dtype=np.int8)
else:
extension_status = np.fromstring(extension_status, dtype=np.int8)
# NIfTI1 says: if first element is non-zero there are extensions present
# if not there is nothing left to do
if not extension_status[0]:
return extensions
# note that we read the extension flag
if not size < 0:
size = size - 4
# read until the whole header is parsed (each extension is a multiple
# of 16 bytes) or in case of a separate header file till the end
# (break inside the body)
# XXX not sure if the separate header behavior is sane
while size >= 16 or size < 0:
# the next 8 bytes should have esize and ecode
ext_def = fileobj.read(8)
# nothing was read and instructed to read till the end
# -> assume all extensions where parsed and break
if not len(ext_def) and size < 0:
break
# otherwise there should be a full extension header
if not len(ext_def) == 8:
raise HeaderDataError('failed to read extension header')
ext_def = np.fromstring(ext_def, dtype=np.int32)
# be extra verbose
ecode = ext_def[1]
esize = ext_def[0]
if esize % 16:
raise HeaderDataError(
'extension size is not a multiple of 16 bytes')
# read extension itself; esize includes the 8 bytes already read
evalue = fileobj.read(esize - 8)
if not len(evalue) == esize - 8:
raise HeaderDataError('failed to read extension content')
# note that we read a full extension
size -= esize
# store raw extension content, but strip trailing NULL chars
evalue = evalue.rstrip('\x00')
# 'extension_codes' also knows the best implementation to handle
# a particular extension type
try:
ext = extension_codes.handler[ecode](ecode, evalue)
except KeyError:
# unknown extension type
# XXX complain or fail or go with a generic extension
ext = Nifti1Extension(ecode, evalue)
extensions.append(ext)
return extensions
class Nifti1Header(SpmAnalyzeHeader):
''' Class for NIFTI1 header '''
# Copies of module level definitions
_dtype = header_dtype
_data_type_codes = data_type_codes
_xform_codes = xform_codes
_unit_codes = unit_codes
_intent_codes = intent_codes
_slice_order_codes = slice_order_codes
# data scaling capabilities
has_data_slope = True
has_data_intercept = True
def get_best_affine(self):
''' Select best of available transforms '''
hdr = self._header_data
if hdr['sform_code']:
return self.get_sform()
if hdr['qform_code']:
return self.get_qform()
return self.get_base_affine()
def _empty_headerdata(self, endianness=None):
''' Create empty header binary block with given endianness '''
hdr_data = analyze.AnalyzeHeader._empty_headerdata(self, endianness)
hdr_data['scl_slope'] = 1
hdr_data['magic'] = 'n+1'
hdr_data['vox_offset'] = 352
return hdr_data
def get_qform_quaternion(self):
''' Compute quaternion from b, c, d of quaternion
Fills a value by assuming this is a unit quaternion
'''
hdr = self._header_data
bcd = [hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d']]
return fillpositive(bcd)
def get_qform(self):
''' Return 4x4 affine matrix from qform parameters in header '''
hdr = self._header_data
quat = self.get_qform_quaternion()
R = quat2mat(quat)
vox = hdr['pixdim'][1:4].copy()
if np.any(vox) < 0:
raise HeaderDataError('pixdims[1,2,3] should be positive')
qfac = hdr['pixdim'][0]
if qfac not in (-1,1):
raise HeaderDataError('qfac (pixdim[0]) should be 1 or -1')
vox[-1] *= qfac
S = np.diag(vox)
M = np.dot(R, S)
out = np.eye(4)
out[0:3,0:3] = M
out[0:3,3] = [hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z']]
return out
def set_qform(self, affine, code=None):
''' Set qform header values from 4x4 affine
Parameters
----------
hdr : nifti1 header
affine : 4x4 array
affine transform to write into qform
code : None, string or integer
String or integer giving meaning of transform in *affine*.
The default is None. If code is None, then {if current
qform code is not 0, leave code as it is in the header; else
set to 1 ('scanner')}.
Notes
-----
The qform transform only encodes translations, rotations and
zooms. If there are shear components to the *affine* transform,
the written qform gives the closest approximation where the
rotation matrix is orthogonal. This is to allow quaternion
representation. The orthogonal representation enforces orthogonal
axes.
Examples
--------
>>> hdr = Nifti1Header()
>>> int(hdr['qform_code']) # gives 0 - unknown
0
>>> affine = np.diag([1,2,3,1])
>>> np.all(hdr.get_qform() == affine)
False
>>> hdr.set_qform(affine)
>>> np.all(hdr.get_qform() == affine)
True
>>> int(hdr['qform_code']) # gives 1 - scanner
1
>>> hdr.set_qform(affine, code='talairach')
>>> int(hdr['qform_code'])
3
>>> hdr.set_qform(affine, code=None)
>>> int(hdr['qform_code'])
3
>>> hdr.set_qform(affine, code='scanner')
>>> int(hdr['qform_code'])
1
'''
hdr = self._header_data
if code is None:
code = hdr['qform_code']
if code == 0:
hdr['qform_code'] = 1
else:
code = self._xform_codes[code]
hdr['qform_code'] = code
if not affine.shape == (4,4):
raise TypeError('Need 4x4 affine as input')
trans = affine[:3,3]
RZS = affine[:3,:3]
zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
R = RZS / zooms
# Set qfac to make R determinant positive
if npl.det(R) > 0:
qfac = 1
else:
qfac = -1
R[:,-1] *= -1
# Make R orthogonal (to allow quaternion representation)
# The orthogonal representation enforces orthogonal axes
# (a subtle requirement of the NIFTI format qform transform)
# Transform below is polar decomposition, returning the closest
# orthogonal matrix PR, to input R
P, S, Qs = npl.svd(R)
PR = np.dot(P, Qs)
# Convert to quaternion
quat = mat2quat(PR)
# Set into header
hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z'] = trans
hdr['pixdim'][0] = qfac
hdr['pixdim'][1:4] = zooms
hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d'] = quat[1:]
def get_sform(self):
''' Return sform 4x4 affine matrix from header '''
hdr = self._header_data
out = np.eye(4)
out[0,:] = hdr['srow_x'][:]
out[1,:] = hdr['srow_y'][:]
out[2,:] = hdr['srow_z'][:]
return out
def set_sform(self, affine, code=None):
''' Set sform transform from 4x4 affine
Parameters
----------
hdr : nifti1 header
affine : 4x4 array
affine transform to write into sform
code : None, string or integer
String or integer giving meaning of transform in *affine*.
The default is None. If code is None, then {if current
sform code is not 0, leave code as it is in the header; else
set to 1 ('scanner')}.
Examples
--------
>>> hdr = Nifti1Header()
>>> int(hdr['sform_code']) # gives 0 - unknown
0
>>> affine = np.diag([1,2,3,1])
>>> np.all(hdr.get_sform() == affine)
False
>>> hdr.set_sform(affine)
>>> np.all(hdr.get_sform() == affine)
True
>>> int(hdr['sform_code']) # gives 1 - scanner
1
>>> hdr.set_sform(affine, code='talairach')
>>> int(hdr['sform_code'])
3
>>> hdr.set_sform(affine, code=None)
>>> int(hdr['sform_code'])
3
>>> hdr.set_sform(affine, code='scanner')
>>> int(hdr['sform_code'])
1
'''
hdr = self._header_data
if code is None:
code = hdr['sform_code']
if code == 0:
hdr['sform_code'] = 1
else:
code = self._xform_codes[code]
hdr['sform_code'] = code
hdr['srow_x'][:] = affine[0,:]
hdr['srow_y'][:] = affine[1,:]
hdr['srow_z'][:] = affine[2,:]
def get_qform_code(self, code_repr='label'):
''' Return representation of qform code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
qform_code : string or integer
string label for qform code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['qform_code'] = 3
>>> hdr.get_qform_code()
'talairach'
'''
return self._get_code_field(
code_repr,
'qform_code',
self._xform_codes)
def get_sform_code(self, code_repr='label'):
''' Return representation of sform code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
sform_code : string or integer
string label for sform code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['sform_code'] = 3
>>> hdr.get_sform_code()
'talairach'
'''
return self._get_code_field(
code_repr,
'sform_code',
self._xform_codes)
def get_slope_inter(self):
''' Get data scaling (slope) and DC offset (intercept) from header data
Parameters
----------
self : header object
Should have fields (keys)
* scl_slope - slope
* scl_inter - intercept
Returns
-------
slope : None or float
scaling (slope). None if there is no valid scaling from
these fields
inter : None or float
offset (intercept). Also None if there is no valid scaling, offset
Examples
--------
>>> fields = {'scl_slope':1,'scl_inter':0}
>>> hdr = Nifti1Header()
>>> hdr.get_slope_inter()
(1.0, 0.0)
>>> hdr['scl_slope'] = 0
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['scl_slope'] = np.nan
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['scl_slope'] = 1
>>> hdr['scl_inter'] = 1
>>> hdr.get_slope_inter()
(1.0, 1.0)
>>> hdr['scl_inter'] = np.inf
>>> hdr.get_slope_inter()
(1.0, 0.0)
'''
scale = float(self['scl_slope'])
dc_offset = float(self['scl_inter'])
if not scale or not np.isfinite(scale):
return None, None
if not np.isfinite(dc_offset):
dc_offset = 0.0
return scale, dc_offset
def set_slope_inter(self, slope, inter):
self._header_data['scl_slope'] = slope
self._header_data['scl_inter'] = inter
def get_dim_info(self):
''' Gets nifti MRI slice etc dimension information
Returns
-------
freq : {None,0,1,2}
Which data array axis is freqency encode direction
phase : {None,0,1,2}
Which data array axis is phase encode direction
slice : {None,0,1,2}
Which data array axis is slice encode direction
where ``data array`` is the array returned by ``get_data``
Because nifti1 files are natively Fortran indexed:
0 is fastest changing in file
1 is medium changing in file
2 is slowest changing in file
``None`` means the axis appears not to be specified.
Examples
--------
See set_dim_info function
'''
hdr = self._header_data
info = int(hdr['dim_info'])
freq = info & 3
phase = (info >> 2) & 3
slice = (info >> 4) & 3
return (freq-1 if freq else None,
phase-1 if phase else None,
slice-1 if slice else None)
def set_dim_info(self, freq=None, phase=None, slice=None):
''' Sets nifti MRI slice etc dimension information
Parameters
----------
hdr : nifti1 header
freq : {None, 0, 1, 2}
axis of data array refering to freqency encoding
phase : {None, 0, 1, 2}
axis of data array refering to phase encoding
slice : {None, 0, 1, 2}
axis of data array refering to slice encoding
``None`` means the axis is not specified.
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(1, 2, 0)
>>> hdr.get_dim_info()
(1, 2, 0)
>>> hdr.set_dim_info(freq=1, phase=2, slice=0)
>>> hdr.get_dim_info()
(1, 2, 0)
>>> hdr.set_dim_info()
>>> hdr.get_dim_info()
(None, None, None)
>>> hdr.set_dim_info(freq=1, phase=None, slice=0)
>>> hdr.get_dim_info()
(1, None, 0)
Notes
-----
This is stored in one byte in the header
'''
for inp in (freq, phase, slice):
if inp not in (None, 0, 1, 2):
raise HeaderDataError('Inputs must be in [None, 0, 1, 2]')
info = 0
if not freq is None:
info = info | ((freq+1) & 3)
if not phase is None:
info = info | (((phase+1) & 3) << 2)
if not slice is None:
info = info | (((slice+1) & 3) << 4)
self._header_data['dim_info'] = info
def get_intent_code(self, code_repr='label'):
''' Return representation of intent code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
intent_code : string or integer
string label for intent code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent_code()
't test'
'''
return self._get_code_field(
code_repr,
'intent_code',
self._intent_codes)
def get_intent(self, code_repr='label'):
''' Get intent code, parameters and name
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
code : string or integer
intent code, or string describing code
parameters : tuple
parameters for the intent
name : string
intent name
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent()
('t test', (10.0,), 'some score')
>>> hdr.get_intent('code')
(3, (10.0,), 'some score')
'''
hdr = self._header_data
code = int(hdr['intent_code'])
recode = self.get_intent_code(code_repr)
n_params = len(self._intent_codes.parameters[code])
params = (float(hdr['intent_p%d' % (i+1)]) for i in range(n_params))
return recode, tuple(params), str(hdr['intent_name'])
def set_intent(self, code, params=(), name=''):
''' Set the intent code, parameters and name
If parameters are not specified, assumed to be all zero. Each
intent code has a set number of parameters associated. If you
specify any parameters, then it will need to be the correct number
(e.g the "f test" intent requires 2). However, parameters can
also be set in the file data, so we also allow not setting any
parameters (empty parameter tuple).
Parameters
----------
code : integer or string
code specifying nifti intent
params : list, tuple of scalars
parameters relating to intent (see intent_codes)
defaults to (). Unspecified parameters are set to 0.0
name : string
intent name (description). Defaults to ''
Returns
-------
None
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent(0) # unknown code
>>> hdr.set_intent('z score')
>>> hdr.get_intent()
('z score', (), '')
>>> hdr.get_intent('code')
(5, (), '')
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent()
('t test', (10.0,), 'some score')
>>> hdr.set_intent('f test', (2, 10), name='another score')
>>> hdr.get_intent()
('f test', (2.0, 10.0), 'another score')
>>> hdr.set_intent('f test')
>>> hdr.get_intent()
('f test', (0.0, 0.0), '')
'''
hdr = self._header_data
icode = intent_codes.code[code]
p_descr = intent_codes.parameters[code]
if len(params) and len(params) != len(p_descr):
raise HeaderDataError('Need params of form %s, or empty' % (p_descr,))
all_params = [0] * 3
all_params[:len(params)] = params[:]
for i, param in enumerate(all_params):
hdr['intent_p%d' % (i+1)] = param
hdr['intent_code'] = icode
hdr['intent_name'] = name
def get_slice_duration(self):
''' Get slice duration
Returns
-------
slice_duration : float
time to acquire one slice
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_slice_duration(0.3)
>>> print "%0.1f" % hdr.get_slice_duration()
0.3
Notes
-----
The Nifti1 spec appears to require the slice dimension to be
defined for slice_duration to have meaning.
'''
_, _, slice_dim = self.get_dim_info()
if slice_dim is None:
raise HeaderDataError('Slice dimension must be set '
'for duration to be valid')
return float(self._header_data['slice_duration'])
def set_slice_duration(self, duration):
''' Set slice duration
Parameters
----------
duration : scalar
time to acquire one slice
Examples
--------
See ``get_slice_duration``
'''
_, _, slice_dim = self.get_dim_info()
if slice_dim is None:
raise HeaderDataError('Slice dimension must be set '
'for duration to be valid')
self._header_data['slice_duration'] = duration
def get_slice_code(self, code_repr='label'):
''' Return representation of slice order code
Parameters
----------
code_repr : string
string giving output form of slice order code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
slice_code : string or integer
string label for slice ordering code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['slice_code'] = 4 # alternating decreasing
>>> hdr.get_slice_code()
'alternating decreasing'
'''
return self._get_code_field(
code_repr,
'slice_code',
self._slice_order_codes)
def get_slice_times(self):
''' Get slice times from slice timing information
Returns
-------
slice_times : tuple
Times of acquisition of slices, where 0 is the beginning of
the acquisition, ordered by position in file. nifti allows
slices at the top and bottom of the volume to be excluded from
the standard slice timing specification, and calls these
"padding slices". We give padding slices ``None`` as a time
of acquisition
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_data_shape((1, 1, 7))
>>> hdr.set_slice_duration(0.1)
We need a function to print out the Nones and floating point
values in a predictable way, for the tests below.
>>> _stringer = lambda val: val is not None and '%2.1f' % val or None
>>> _print_me = lambda s: map(_stringer, s)
The following examples are from the nifti1.h documentation.
>>> hdr['slice_code'] = slice_order_codes['sequential increasing']
>>> _print_me(hdr.get_slice_times())
['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']
>>> hdr['slice_start'] = 1
>>> hdr['slice_end'] = 5
>>> _print_me(hdr.get_slice_times())
[None, '0.0', '0.1', '0.2', '0.3', '0.4', None]
>>> hdr['slice_code'] = slice_order_codes['sequential decreasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.4', '0.3', '0.2', '0.1', '0.0', None]
>>> hdr['slice_code'] = slice_order_codes['alternating increasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.0', '0.3', '0.1', '0.4', '0.2', None]
>>> hdr['slice_code'] = slice_order_codes['alternating decreasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.2', '0.4', '0.1', '0.3', '0.0', None]
>>> hdr['slice_code'] = slice_order_codes['alternating increasing 2']
>>> _print_me(hdr.get_slice_times())
[None, '0.2', '0.0', '0.3', '0.1', '0.4', None]
>>> hdr['slice_code'] = slice_order_codes['alternating decreasing 2']
>>> _print_me(hdr.get_slice_times())
[None, '0.4', '0.1', '0.3', '0.0', '0.2', None]
'''
hdr = self._header_data
_, _, slice_dim = self.get_dim_info()
shape = self.get_data_shape()
slice_len = shape[slice_dim]
duration = self.get_slice_duration()
slabel = self.get_slice_code()
if slabel == 'unknown':
raise HeaderDataError('Cannot get slice times when '
'Slice code is "unknown"')
slice_start, slice_end = (int(hdr['slice_start']),
int(hdr['slice_end']))
if slice_start < 0:
raise HeaderDataError('slice_start should be >= 0')
if slice_end == 0:
slice_end = slice_len-1
n_timed = slice_end - slice_start + 1
if n_timed < 1:
raise HeaderDataError('slice_end should be > slice_start')
st_order = self._slice_time_order(slabel, n_timed)
times = st_order * duration
return ((None,)*slice_start +
tuple(times) +
(None,)*(slice_len-slice_end-1))
def set_slice_times(self, slice_times):
''' Set slice times into *hdr*
Parameters
----------
slice_times : tuple
tuple of slice times, one value per slice
tuple can include None to indicate no slice time for that slice
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_data_shape([1, 1, 7])
>>> hdr.set_slice_duration(0.1)
>>> times = [None, 0.2, 0.4, 0.1, 0.3, 0.0, None]
>>> hdr.set_slice_times(times)
>>> hdr.get_slice_code()
'alternating decreasing'
>>> int(hdr['slice_start'])
1
>>> int(hdr['slice_end'])
5
'''
# Check if number of slices matches header
hdr = self._header_data
_, _, slice_dim = self.get_dim_info()
shape = self.get_data_shape()
slice_len = shape[slice_dim]
if slice_len != len(slice_times):
raise HeaderDataError('Number of slice times does not '
'match number of slices')
# Extract Nones at beginning and end. Check for others
for ind, time in enumerate(slice_times):
if time is not None:
slice_start = ind
break
else:
raise HeaderDataError('Not all slice times can be None')
for ind, time in enumerate(slice_times[::-1]):
if time is not None:
slice_end = slice_len-ind-1
break
timed = slice_times[slice_start:slice_end+1]
for time in timed:
if time is None:
raise HeaderDataError('Cannot have None in middle '
'of slice time vector')
# Find slice duration, check times are compatible with single
# duration
tdiffs = np.diff(np.sort(timed))
if not np.allclose(np.diff(tdiffs), 0):
raise HeaderDataError('Slice times not compatible with '
'single slice duration')
duration = np.mean(tdiffs)
# To slice time order
st_order = np.round(np.array(timed) / duration)
# Check if slice times fit known schemes
n_timed = len(timed)
labels = self._slice_order_codes.value_set('label')
labels.remove('unknown')
for label in labels:
if np.all(st_order == self._slice_time_order(
label,
n_timed)):
break
else:
raise HeaderDataError('slice ordering of %s fits '
'with no known scheme' % st_order)
# Set values into header
hdr['slice_start'] = slice_start
hdr['slice_end'] = slice_end
hdr['slice_duration'] = duration
hdr['slice_code'] = slice_order_codes.code[label]
def for_file_pair(self, is_pair=True):
''' Adapt header to separate or same image and header file
Parameters
----------
is_pair : bool, optional
True if adapting header to file pair state, False for single
Returns
-------
hdr : Nifti1Header
copied and possibly modified header
Examples
--------
The header starts off as being for a single file
>>> hdr = Nifti1Header()
>>> str(hdr['magic'])
'n+1'
>>> hdr.get_data_offset()
352
But we can switch it to be for two files (a pair)
>>> pair_hdr = hdr.for_file_pair()
>>> str(pair_hdr['magic'])
'ni1'
>>> pair_hdr.get_data_offset()
0
The original header is not affected (a copy is returned)
>>> hdr.get_data_offset()
352
Back to single again
>>> unpair_hdr = pair_hdr.for_file_pair(False)
>>> str(unpair_hdr['magic'])
'n+1'
>>> unpair_hdr.get_data_offset()
352
'''
hdr = self.copy()
if not is_pair:
# one file version
if hdr['magic'] == 'n+1':
if hdr['vox_offset'] < 352:
hdr['vox_offset'] = 352
return hdr
hdr['magic'] = 'n+1'
hdr['vox_offset'] = 352
return hdr
# two file version
if hdr['magic'] == 'ni1':
return hdr
hdr['magic'] = 'ni1'
hdr['vox_offset'] = 0
return hdr
def _slice_time_order(self, slabel, n_slices):
''' Supporting function to give time order of slices from label '''
if slabel == 'sequential increasing':
sp_ind_time_order = range(n_slices)
elif slabel == 'sequential decreasing':
sp_ind_time_order = range(n_slices)[::-1]
elif slabel == 'alternating increasing':
sp_ind_time_order = range(0,n_slices,2) + range(1, n_slices, 2)
elif slabel == 'alternating decreasing':
sp_ind_time_order = range(n_slices-1,-1,-2) + range(n_slices-2,-1,-2)
elif slabel == 'alternating increasing 2':
sp_ind_time_order = range(1,n_slices,2) + range(0, n_slices, 2)
elif slabel == 'alternating decreasing 2':
sp_ind_time_order = range(n_slices-2,-1,-2) + range(n_slices-1,-1,-2)
else:
raise HeaderDataError('We do not handle slice ordering "%s"'
% slabel)
return np.argsort(sp_ind_time_order)
''' Checks only below here '''
@classmethod
def _get_checks(klass):
# We need to return our own versions of - e.g. chk_datatype, to
# pick up the Nifti datatypes from our class
return (klass._chk_sizeof_hdr,
klass._chk_datatype,
klass._chk_bitpix,
klass._chk_pixdims,
klass._chk_scale_slope,
klass._chk_scale_inter,
klass._chk_qfac,
klass._chk_magic_offset,
klass._chk_qform_code,
klass._chk_sform_code)
@staticmethod
def _chk_scale_slope(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
scale = hdr['scl_slope']
if scale and np.isfinite(scale):
return ret
ret.problem_msg = '"scl_slope" is %s; should !=0 and be finite' % scale
if fix:
hdr['scl_slope'] = 1
ret.fix_msg = 'setting "scl_slope" to 1'
else:
ret.level = 30
return ret
@staticmethod
def _chk_scale_inter(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
scale = hdr['scl_inter']
if np.isfinite(scale):
return ret
ret.problem_msg = '"scl_inter" is %s; should be finite' % scale
if fix:
hdr['scl_inter'] = 0
ret.fix_msg = 'setting "scl_inter" to 0'
else:
ret.level = 30
return ret
@staticmethod
def _chk_qfac(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
if hdr['pixdim'][0] in (-1, 1):
return ret
ret.problem_msg = 'pixdim[0] (qfac) should be 1 (default) or -1'
if fix:
hdr['pixdim'][0] = 1
ret.fix_msg = 'setting qfac to 1'
else:
ret.level = 20
return ret
@staticmethod
def _chk_magic_offset(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
magic = hdr['magic']
offset = hdr['vox_offset']
if magic == 'ni1': # two files
if offset == 0:
return ret
ret.problem_msg = ('vox offset should be 0 (is %s)'
'with two-file nifti images' % offset)
ret.level = 40
if fix:
ret.fix_msg = 'leaving at current value'
elif magic == 'n+1': # one file
if offset >= 352:
if not offset % 16:
return ret
else:
# XXX Michael wonders, if this warning really valid? NIfTI
# says that each extension's length has to be a multiple of
# 16, therefore the test should be (offset-352) % 16 and
# not offset % 16, or does SPM have additional artifical
# limitations?
ret.problem_msg = ('vox offset (=%s) not divisible '
'by 16, not SPM compatible' % offset)
ret.level = 30
if fix:
ret.fix_msg = 'leaving at current value'
return ret
ret.problem_msg = ('vox offset %d too low for '
'single file nifti1' % offset)
if fix:
hdr['vox_offset'] = 352
ret.fix_msg = 'setting to minimum value of 352'
else:
ret.level = 50
else: # unrecognized nii magic string, oh dear
ret.problem_msg = 'magic string %s is not valid' % magic
ret.level = 50
if fix:
ret.fix_msg = 'leaving as is, but future errors are likely'
return ret
@classmethod
def _chk_qform_code(klass, hdr, fix=True):
ret = Report(hdr, HeaderDataError)
code = int(hdr['qform_code'])
if int(hdr['qform_code']) in klass._xform_codes.value_set():
return ret
ret.problem_msg = 'qform code %d not valid' % code
if fix:
hdr['qform_code'] = 0
ret.fix_msg = 'setting to 0'
else:
ret.level = 30
return ret
@classmethod
def _chk_sform_code(klass, hdr, fix=True):
ret = Report(hdr, HeaderDataError)
code = int(hdr['sform_code'])
if int(hdr['sform_code']) in klass._xform_codes.value_set():
return ret
ret.problem_msg = 'sform code %d not valid' % code
if fix:
hdr['sform_code'] = 0
ret.fix_msg = 'setting to 0'
else:
ret.level = 30
return ret
class Nifti1Image(analyze.AnalyzeImage):
_header_maker = Nifti1Header
def _set_header(self, header=None):
SpatialImage._set_header(self, header)
@staticmethod
def filespec_to_files(filespec):
ft1 = filetuples.FileTuples(
(('header', '.nii'), ('image', '.nii')),
ignored_suffixes=('.gz', '.bz2')
)
ft2 = filetuples.FileTuples(
(('header', '.hdr'), ('image', '.img')),
ignored_suffixes=('.gz', '.bz2')
)
for ftups in (ft1, ft2):
try:
ftups.set_filenames(filespec)
except filetuples.FileTuplesError:
continue
break
else:
raise ValueError('Filespec "%s" does not '
'look like Nifti1' % filespec)
files = dict(zip(('header', 'image'), ftups.get_filenames()))
return files
@classmethod
def from_files(klass, files):
fname = files['header']
fileobj = allopen(fname)
header = klass._header_maker.from_fileobj(fileobj)
extra = None
# handle extensions
# assume the fileptr is just after header (magic field)
# determine how much to read when parsing the extensions
if header['vox_offset'] == 0:
# read till the end of the header
extsize = -1
else:
extsize = header['vox_offset'] - fileobj.tell()
extensions = Nifti1Extensions.from_fileobj(fileobj, extsize)
# XXX maybe always do that?
if len(extensions):
extra = {'extensions': extensions}
affine = header.get_best_affine()
ret = klass(None, affine, header=header, extra=extra)
ret._files = files
return ret
def to_files(self, files=None):
''' Write image to files passed, or self._files
'''
# XXX the whole method is candidate for refactoring, since it started as
# verbatim copy of AnalyzeImage.to_files()
if files is None:
files = self._files
if files is None:
raise ValueError('Need files to write data')
data = self.get_data()
# Adapt header to possible two<->one file difference
is_pair = files['header'] != files['image']
hdr = self.get_header().for_file_pair(is_pair)
# if any extensions, figure out necessary vox_offset for extensions to
# fit
if self.extra.has_key('extensions') and len(self.extra['extensions']):
hdr['vox_offset'] = len(hdr.binaryblock) \
+ self.extra['extensions'].get_sizeondisk()
slope, inter, mn, mx = adapt_header(hdr, data)
hdrf = allopen(files['header'], 'wb')
hdr.write_to(hdrf)
# write all extensions to file
# assumes that the file ptr is right after the magic string
if not self.extra.has_key('extensions'):
# no extensions: be nice and write appropriate flag
hdrf.write(np.array((0,0,0,0), dtype=np.int8).tostring())
else:
self.extra['extensions'].write_to(hdrf)
if is_pair:
imgf = allopen(files['image'], 'wb')
else: # single file for header and image
imgf = hdrf
# streams like bz2 do not allow seeks, even forward. We
# check where to go, and write zeros up until the data part
# of the file
offset = hdr.get_data_offset()
diff = offset-hdrf.tell()
if diff > 0:
hdrf.write('\x00' * diff)
write_data(hdr, data, imgf, inter, slope, mn, mx)
self._header = hdr
self._files = files
def _update_header(self):
''' Harmonize header with image data and affine
See AnalyzeImage._update_header for more examples
Examples
--------
>>> data = np.zeros((2,3,4))
>>> affine = np.diag([1.0,2.0,3.0,1.0])
>>> img = Nifti1Image(data, affine)
>>> hdr = img.get_header()
>>> np.all(hdr.get_qform() == affine)
True
>>> np.all(hdr.get_sform() == affine)
True
'''
super(Nifti1Image, self)._update_header()
hdr = self._header
if not self._affine is None:
hdr.set_sform(self._affine)
hdr.set_qform(self._affine)
load = Nifti1Image.load
save = Nifti1Image.save
|
py | 1a378154ad0f264f41f6e9c3989ed43aff7cce3c | """functions for evaluating embeddings and neighbor networks"""
import scanpy as sc
import numpy as np
import pandas as pd
def fidelity(adata, name, groupby='label'):
"""look in .obsp[name_connectivities] for connectivity data and see how well it preserves the given labelling"""
labels = adata.obs[groupby].values
adj = (adata.obsp[name+'_'*(len(name)>0)+'connectivities']>0)
edgelist = zip(*adj.nonzero())
good_edges=0
bad_edges=0
for i,j in edgelist:
if labels[i] == labels[j]:
good_edges += 1
else:
bad_edges += 1
return float(good_edges)/float(good_edges + bad_edges)
def group_fidelity(adata, name, groupby='label', pairwise=False):
"""matrix containing the fraction of edges that stay within each group
if pairwise=True, returns the fraction of edges between each pair of groups
i.e. position (i,j) is the proportion of edges that land in group j, among
all edges originating in group i"""
classes = adata.obs[groupby].values
G = (adata.obsp[name+'_'*(len(name)>0)+'connectivities']>0)
class_list = np.unique(classes)
result = np.zeros((len(class_list), len(class_list)))
for i, c in enumerate(class_list):
# print(i)
# print(c)
inds = np.where(np.array(classes) == c)[0]
G_sub = G[inds, :] # only look at vertices in c
class_freqs = [0] * len(class_list)
for j in range(len(inds)):
row = G_sub[j, :].todense().tolist()[0]
row = np.array(row).astype(int)
# print(G_sub[j,:].tolist()[0])
nbr_inds = np.where(row > 0)[0]
# print(nbr_inds)
nbr_classes = np.array([classes[x] for x in nbr_inds])
for k, c2 in enumerate(class_list):
class_freqs[k] += np.sum(nbr_classes == c2)
# print(class_freqs)
result[i, :] = class_freqs
result = result / result.sum(axis=1)[:, None]
result = pd.DataFrame(result)
result.columns = class_list
result.index = class_list
if pairwise:
return (result)
else:
diags = np.diag(result.values)
return pd.DataFrame({'cluster':class_list, 'fidelity':diags, 'method':name})
def plot_umap(adata, name, n_neighbors=10, rerun=False, **kwargs):
"""looks in .uns[name] for neighbors info, and uses that to store and plot a UMAP"""
if name not in adata.uns or rerun:
sc.pp.neighbors(adata, use_rep='X_'+name, key_added=name, n_neighbors=n_neighbors)
if 'X_'+name+'_umap' not in adata.obsm or rerun:
sc.tl.umap(adata, neighbors_key=name)
adata.obsm['X_{}_umap'.format(name)] = adata.obsm['X_umap']
sc.pl.embedding(adata, basis='{}_umap'.format(name), neighbors_key=name, **kwargs)
def good_bad_edges(adata, name, groupby='label'):
labels = adata.obs[groupby].values
adj = (adata.obsp[name + '_' * (len(name) > 0) + 'connectivities'] > 0)
edgelist = zip(*adj.nonzero())
good_edges = []
bad_edges = []
for i, j in edgelist:
if labels[i] == labels[j]:
good_edges.append((i,j))
else:
bad_edges.append((i,j))
return (good_edges, bad_edges) |
py | 1a3783242beccfce88b54caf24dba4be6c2a76f1 | import os
import time
import yaml
from platform import python_version
from unittest import skipIf
import bzt
from bzt.engine import EXEC
from bzt.modules import ConsolidatingAggregator
from bzt.modules._selenium import GeckoDriver
from bzt.modules.functional import FuncSamplesReader, LoadSamplesReader, FunctionalAggregator
from bzt.modules._apiritif import ApiritifNoseExecutor
from bzt.modules._pytest import PyTestExecutor
from bzt.modules.robot import RobotExecutor
from tests.unit import RESOURCES_DIR, ExecutorTestCase
from tests.unit.modules._selenium import SeleniumTestCase, MockPythonTool, MockDriver
from bzt.utils import EXE_SUFFIX, is_windows
class TestSeleniumApiritifRunner(SeleniumTestCase):
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def test_selenium_prepare_python_single(self):
"""
Check if script exists in working dir
:return:
"""
self.obj.execution.merge({"scenario": {
"script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py"
}})
self.obj_prepare()
def test_selenium_prepare_python_folder(self):
"""
Check if scripts exist in working dir
:return:
"""
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.obj_prepare()
def test_selenium_startup_shutdown_python_single(self):
"""
run tests from .py file
:return:
"""
self.configure({
'execution': {
"iterations": 1,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/'},
'executor': 'selenium'
},
'reporting': [{'module': 'junit-xml'}]
})
self.obj.execution.merge({"scenario": {
"script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py"
}})
self.obj_prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.csv")))
@skipIf(python_version() >= '3.8' and is_windows(), "Temporary disabled")
def test_selenium_startup_shutdown_python_folder(self):
"""
run tests from .py files
:return:
"""
self.configure({
'execution': {
'iterations': 1,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/'},
'executor': 'selenium'
},
'reporting': [{'module': 'junit-xml'}]
})
self.obj_prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
api_log = os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.csv")
nose_log = os.path.join(self.obj.engine.artifacts_dir, "apiritif.out")
self.assertTrue(os.path.exists(api_log))
with open(nose_log) as fds:
content = fds.read()
self.assertIn("Transaction started::", content)
self.assertIn("Transaction ended::", content)
def test_runner_fail_no_test_found(self):
"""
Check that Python Apiritif runner fails if no tests were found
:return:
"""
self.configure({
EXEC: {
"iterations": 1,
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/dummy.py"}
}
})
self.obj_prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
diagnostics = "\n".join(self.obj.get_error_diagnostics())
self.assertIn("Nothing to test.", diagnostics)
def test_resource_files_collection_remote_apiritif(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.assertEqual(len(self.obj.resource_files()), 1)
def test_long_iterations_value(self):
self.engine.aggregator = ConsolidatingAggregator()
self.engine.aggregator.engine = self.engine
self.obj.execution.merge({
"iterations": 2 ** 64,
"scenario": {
"requests": [
"http://blazedemo.com/",
],
}
})
self.obj_prepare()
try:
self.obj.startup()
for _ in range(3):
self.assertFalse(self.obj.check())
self.engine.aggregator.check()
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
def test_check_tools_installed_conf(self):
self.obj.execution.merge({"scenario": {"requests": ["http://blazedemo.com/"]}})
self.obj_prepare()
self.assertTrue(self.obj.selenium.called)
self.assertTrue(self.obj.runner.selenium.called)
self.assertTrue(self.obj.runner.apiritif.called)
def test_check_tools_installed_script(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.obj_prepare()
self.assertTrue(self.obj.selenium.called)
self.assertTrue(self.obj.runner.selenium.called)
self.assertTrue(self.obj.runner.apiritif.called)
class TestApiritifRunner(ExecutorTestCase):
EXECUTOR = ApiritifNoseExecutor
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def test_new_flow(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
{"set-variables": {"name1": "val1"}},
{
"transaction": "second",
"do": [
"/other.html",
"/reserve.php",
{
"transaction": "third",
"do": [
"/${name1}"
]
}
]}]}}]})
self.obj_prepare()
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "test_requests.py")))
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_apiritif_generated_requests(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
"/reserve.php"]}}]})
self.obj_prepare()
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "test_requests.py")))
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_apiritif_transactions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "apiritif/test_transactions.py"
}
}]
})
self.obj_prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_report_reading(self):
reader = FuncSamplesReader(RESOURCES_DIR + "apiritif/transactions.ldjson", self.obj.engine, self.obj.log)
items = list(reader.read(last_pass=True))
self.assertEqual(9, len(items))
self.assertEqual(items[0].get_short_name(), 'TestRequests.test_1_single_request')
self.assertEqual(items[1].get_short_name(), 'TestRequests.test_2_multiple_requests')
self.assertEqual(items[2].get_short_name(), 'test_3_toplevel_transaction.Transaction')
self.assertEqual(items[3].get_short_name(), 'test_4_mixed_transaction.Transaction')
self.assertEqual(items[4].get_short_name(), 'test_5_multiple_transactions.Transaction 1')
self.assertEqual(items[5].get_short_name(), 'test_5_multiple_transactions.Transaction 2')
self.assertEqual(items[6].get_short_name(), 'test_6_transaction_obj.Label')
self.assertEqual(items[7].get_short_name(), 'test_7_transaction_fail.Label')
self.assertEqual(items[8].get_short_name(), 'test_8_transaction_attach.Label')
def test_report_transactions_as_failed(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://httpbin.org",
"requests": [{
"label": "failure by 404",
"url": "/status/404",
}]
}
}]
})
self.obj.engine.aggregator = FunctionalAggregator()
self.obj_prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
reader = LoadSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.ldjson"), self.obj.log)
samples = list(reader._read(last_pass=True))
self.assertEqual(len(samples), 1)
tstmp, label, concur, rtm, cnn, ltc, rcd, error, trname, byte_count = samples[0]
self.assertIsNotNone(error)
def test_status_skipped(self):
self.configure({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "functional/test_all.py"
}
}]
})
self.obj.engine.aggregator = FunctionalAggregator()
self.obj_prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
reader = FuncSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.ldjson"),
self.obj.engine, self.obj.log)
samples = list(reader.read(last_pass=True))
self.assertEqual(len(samples), 4)
self.assertIsNotNone(samples[-1].status)
class TestPyTestExecutor(ExecutorTestCase):
EXECUTOR = PyTestExecutor
CMD_LINE = None
def setUp(self):
super().setUp()
bzt.modules._selenium.ChromeDriver = MockDriver
bzt.modules._selenium.GeckoDriver = MockDriver
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = args
def obj_prepare(self):
tmp_tool = bzt.modules._pytest.PyTest
try:
bzt.modules._pytest.PyTest = MockPythonTool
self.obj.prepare()
finally:
bzt.modules._pytest.PyTest = tmp_tool
def full_run(self, config):
self.obj.execution.merge(config)
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_report_file(self):
self.full_run({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue('--report-file' in self.CMD_LINE)
val = self.CMD_LINE[self.CMD_LINE.index('--report-file') + 1]
self.assertTrue(val.endswith("PyTestExecutor.ldjson"))
def test_iterations(self):
self.full_run({
"iterations": 10,
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue('-i 10' in ' '.join(self.CMD_LINE))
def test_hold(self):
self.full_run({
"hold-for": "3s",
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue('-d 3.0' in ' '.join(self.CMD_LINE))
def test_script(self):
self.full_run({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue(self.CMD_LINE[-1].endswith("test_single.py"))
def test_blazedemo(self):
self.obj.engine.check_interval = 0.1
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_blazedemo.py"
}
})
self.obj_prepare()
driver = self.obj._get_tool(MockDriver, tool_path=self.obj.settings.get('geckodriver').get('path'))
if not driver.check_if_installed():
driver.install()
self.obj.env.add_path({"PATH": driver.get_driver_dir()})
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_package(self):
self.obj.engine.check_interval = 0.1
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/"
}
})
self.obj_prepare()
driver = self.obj._get_tool(MockDriver, tool_path=self.obj.settings.get('geckodriver').get('path'))
if not driver.check_if_installed():
driver.install()
self.obj.env.add_path({"PATH": driver.get_driver_dir()})
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_additional_args(self):
additional_args = "--foo --bar"
self.obj.runner_path = RESOURCES_DIR + "selenium/pytest/bin/runner.py"
self.full_run({
"scenario": {
"additional-args": additional_args,
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue(additional_args in " ".join(self.CMD_LINE))
class TestRobotExecutor(ExecutorTestCase):
EXECUTOR = RobotExecutor
CMD_LINE = None
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = args
def test_full_single_script(self):
self.configure({
"execution": [{
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
tmp_tool = bzt.modules.robot.Robot
try:
bzt.modules.robot.Robot = MockPythonTool
self.obj.prepare()
self.obj.settings["interpreter"] = RESOURCES_DIR + "selenium/robot/robot-mock" + EXE_SUFFIX
self.obj.startup()
finally:
bzt.modules.robot.Robot = tmp_tool
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
lines = open(self.obj.report_file).readlines()
self.assertEqual(1, len(lines))
def full_run(self, config):
self.configure(config)
tmp_tool = bzt.modules.robot.Robot
try:
bzt.modules.robot.Robot = MockPythonTool
self.obj.prepare()
finally:
bzt.modules.robot.Robot = tmp_tool
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_hold(self):
self.full_run({
"execution": [{
"hold-for": "5s",
"iterations": 3,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--duration' in self.CMD_LINE)
dur_val = self.CMD_LINE[self.CMD_LINE.index('--duration') + 1]
self.assertEqual(dur_val, '5.0')
def test_report_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--report-file' in self.CMD_LINE)
report_file = self.CMD_LINE[self.CMD_LINE.index('--report-file') + 1]
self.assertTrue(report_file.endswith("RobotExecutor.ldjson"))
def test_iterations(self):
self.full_run({
"execution": [{
"iterations": 3,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--iterations' in self.CMD_LINE)
iters_val = self.CMD_LINE[self.CMD_LINE.index('--iterations') + 1]
self.assertEqual(iters_val, '3')
def test_variables(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"variables": {
"USERNAME": "janedoe",
},
"script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot",
}
}]
})
self.assertTrue('--variablefile' in self.CMD_LINE)
var_file = self.CMD_LINE[self.CMD_LINE.index('--variablefile') + 1]
self.assertTrue(var_file.endswith("robot-vars.yaml"))
self.assertEqual('janedoe', yaml.full_load(open(var_file).read())['USERNAME'])
def test_variables_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"variables": RESOURCES_DIR + "selenium/robot/simple/vars.yaml",
"script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot",
}
}]
})
self.assertTrue('--variablefile' in self.CMD_LINE)
var_file = self.CMD_LINE[self.CMD_LINE.index('--variablefile') + 1]
self.assertEqual(var_file, os.path.normpath(RESOURCES_DIR + "selenium/robot/simple/vars.yaml"))
def test_output_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--outputfile' in self.CMD_LINE)
out_file = self.CMD_LINE[self.CMD_LINE.index('--outputfile') + 1]
self.assertTrue(out_file.endswith("output.xml"))
def test_log_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--logfile' in self.CMD_LINE)
log_file = self.CMD_LINE[self.CMD_LINE.index('--logfile') + 1]
self.assertTrue(log_file.endswith("log.html"))
def test_single_tag(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"tags": "create",
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot",
}
}]
})
self.assertTrue('--include' in self.CMD_LINE)
tags = self.CMD_LINE[self.CMD_LINE.index('--include') + 1]
self.assertEqual(tags, 'create')
def test_multiple_tags(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"tags": "create,database",
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot",
}
}]
})
self.assertTrue('--include' in self.CMD_LINE)
tags = self.CMD_LINE[self.CMD_LINE.index('--include') + 1]
self.assertEqual(tags, 'create,database')
|
py | 1a3784089863fdf78892d1ab6bf9d9f8685fcfc4 | from setuptools import find_packages
from setuptools import setup
package_name = 'ament_cppcheck'
setup(
name=package_name,
version='0.8.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['setuptools'],
zip_safe=True,
author='Dirk Thomas',
author_email='[email protected]',
maintainer='Dirk Thomas',
maintainer_email='[email protected]',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Static code analysis on C/C++ code using Cppcheck.',
long_description="""\
The ability to perform static code analysis on C/C++ code using Cppcheck
and generate xUnit test result files.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ament_cppcheck = ament_cppcheck.main:main',
],
},
)
|
py | 1a3784346cfdf58ef37cb47ae1de3f62de68fe60 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
#
# mininode.py - Bitcoin P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of https://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from binascii import hexlify
from io import BytesIO
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
from pyblake2 import blake2b
try:
import yescrypt
except ImportError as e:
exit("Please run 'sudo pip install https://github.com/wo01/yescrypt_python/archive/master.zip'")
BIP0031_VERSION = 60000
SPROUT_PROTO_VERSION = 170002 # past bip-31 for ping/pong
OVERWINTER_PROTO_VERSION = 170003
SAPLING_PROTO_VERSION = 170006
BLOSSOM_PROTO_VERSION = 170008
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
SPROUT_VERSION_GROUP_ID = 0x00000000
OVERWINTER_VERSION_GROUP_ID = 0x02E7D970
SAPLING_VERSION_GROUP_ID = 0x9023E50A
# No transaction format change in Blossom.
MAX_INV_SZ = 50000
COIN = 100000000 # 1 zec in zatoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def bfh(x):
if sys.version_info[0] >= 3:
return bytes.fromhex(x)
else:
return x.decode("hex")
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def bh2u(x):
return binascii.hexlify(x).decode('ascii')
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def nuparams(branch_id, height):
return '-nuparams=%x:%d' % (branch_id, height)
def fundingstream(idx, start_height, end_height, addrs):
return '-fundingstream=%d:%d:%d:%s' % (idx, start_height, end_height, ",".join(addrs))
def ser_compactsize(n):
if n < 253:
return struct.pack("B", n)
elif n < 0x10000:
return struct.pack("<BH", 253, n)
elif n < 0x100000000:
return struct.pack("<BI", 254, n)
return struct.pack("<BQ", 255, n)
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def block_work_from_compact(c):
target = uint256_from_compact(c)
return 2**256 // (target + 1)
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
def deser_char_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = struct.unpack("<B", f.read(1))[0]
r.append(t)
return r
def ser_char_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("B", i)
return r
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: b"Error",
1: b"TX",
2: b"Block"}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = SPROUT_PROTO_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%r)" \
% (self.nVersion, repr(self.vHave))
class SpendDescription(object):
def __init__(self):
self.cv = None
self.anchor = None
self.nullifier = None
self.rk = None
self.zkproof = None
self.spendAuthSig = None
def deserialize(self, f):
self.cv = deser_uint256(f)
self.anchor = deser_uint256(f)
self.nullifier = deser_uint256(f)
self.rk = deser_uint256(f)
self.zkproof = f.read(192)
self.spendAuthSig = f.read(64)
def serialize(self):
r = b""
r += ser_uint256(self.cv)
r += ser_uint256(self.anchor)
r += ser_uint256(self.nullifier)
r += ser_uint256(self.rk)
r += self.zkproof
r += self.spendAuthSig
return r
def __repr__(self):
return "SpendDescription(cv=%064x, anchor=%064x, nullifier=%064x, rk=%064x, zkproof=%064x, spendAuthSig=%064x)" \
% (self.cv, self.anchor, self.nullifier, self.rk, self.zkproof, self.spendauthsig)
class OutputDescription(object):
def __init__(self):
self.cv = None
self.cmu = None
self.ephemeralKey = None
self.encCiphertext = None
self.outCiphertext = None
self.zkproof = None
def deserialize(self, f):
self.cv = deser_uint256(f)
self.cmu = deser_uint256(f)
self.ephemeralKey = deser_uint256(f)
self.encCiphertext = f.read(580)
self.outCiphertext = f.read(80)
self.zkproof = f.read(192)
def serialize(self):
r = b""
r += ser_uint256(self.cv)
r += ser_uint256(self.cmu)
r += ser_uint256(self.ephemeralKey)
r += self.encCiphertext
r += self.outCiphertext
r += self.zkproof
return r
def __repr__(self):
return "OutputDescription(cv=%064x, cmu=%064x, ephemeralKey=%064x, encCiphertext=%064x, outCiphertext=%064x, zkproof=%064x)" \
% (self.cv, self.cmu, self.ephemeralKey, self.encCiphertext, self.outCiphertext, self.zkproof)
G1_PREFIX_MASK = 0x02
G2_PREFIX_MASK = 0x0a
class ZCProof(object):
def __init__(self):
self.g_A = None
self.g_A_prime = None
self.g_B = None
self.g_B_prime = None
self.g_C = None
self.g_C_prime = None
self.g_K = None
self.g_H = None
def deserialize(self, f):
def deser_g1(self, f):
leadingByte = struct.unpack("<B", f.read(1))[0]
return {
'y_lsb': leadingByte & 1,
'x': f.read(32),
}
def deser_g2(self, f):
leadingByte = struct.unpack("<B", f.read(1))[0]
return {
'y_gt': leadingByte & 1,
'x': f.read(64),
}
self.g_A = deser_g1(f)
self.g_A_prime = deser_g1(f)
self.g_B = deser_g2(f)
self.g_B_prime = deser_g1(f)
self.g_C = deser_g1(f)
self.g_C_prime = deser_g1(f)
self.g_K = deser_g1(f)
self.g_H = deser_g1(f)
def serialize(self):
def ser_g1(self, p):
return chr(G1_PREFIX_MASK | p['y_lsb']) + p['x']
def ser_g2(self, p):
return chr(G2_PREFIX_MASK | p['y_gt']) + p['x']
r = b""
r += ser_g1(self.g_A)
r += ser_g1(self.g_A_prime)
r += ser_g2(self.g_B)
r += ser_g1(self.g_B_prime)
r += ser_g1(self.g_C)
r += ser_g1(self.g_C_prime)
r += ser_g1(self.g_K)
r += ser_g1(self.g_H)
return r
def __repr__(self):
return "ZCProof(g_A=%r g_A_prime=%r g_B=%r g_B_prime=%r g_C=%r g_C_prime=%r g_K=%r g_H=%r)" \
% (self.g_A, self.g_A_prime,
self.g_B, self.g_B_prime,
self.g_C, self.g_C_prime,
self.g_K, self.g_H)
ZC_NUM_JS_INPUTS = 2
ZC_NUM_JS_OUTPUTS = 2
ZC_NOTEPLAINTEXT_LEADING = 1
ZC_V_SIZE = 8
ZC_RHO_SIZE = 32
ZC_R_SIZE = 32
ZC_MEMO_SIZE = 512
ZC_NOTEPLAINTEXT_SIZE = (
ZC_NOTEPLAINTEXT_LEADING +
ZC_V_SIZE +
ZC_RHO_SIZE +
ZC_R_SIZE +
ZC_MEMO_SIZE
)
NOTEENCRYPTION_AUTH_BYTES = 16
ZC_NOTECIPHERTEXT_SIZE = (
ZC_NOTEPLAINTEXT_SIZE +
NOTEENCRYPTION_AUTH_BYTES
)
class JSDescription(object):
def __init__(self):
self.vpub_old = 0
self.vpub_new = 0
self.anchor = 0
self.nullifiers = [0] * ZC_NUM_JS_INPUTS
self.commitments = [0] * ZC_NUM_JS_OUTPUTS
self.onetimePubKey = 0
self.randomSeed = 0
self.macs = [0] * ZC_NUM_JS_INPUTS
self.proof = None
self.ciphertexts = [None] * ZC_NUM_JS_OUTPUTS
def deserialize(self, f):
self.vpub_old = struct.unpack("<q", f.read(8))[0]
self.vpub_new = struct.unpack("<q", f.read(8))[0]
self.anchor = deser_uint256(f)
self.nullifiers = []
for i in range(ZC_NUM_JS_INPUTS):
self.nullifiers.append(deser_uint256(f))
self.commitments = []
for i in range(ZC_NUM_JS_OUTPUTS):
self.commitments.append(deser_uint256(f))
self.onetimePubKey = deser_uint256(f)
self.randomSeed = deser_uint256(f)
self.macs = []
for i in range(ZC_NUM_JS_INPUTS):
self.macs.append(deser_uint256(f))
self.proof = ZCProof()
self.proof.deserialize(f)
self.ciphertexts = []
for i in range(ZC_NUM_JS_OUTPUTS):
self.ciphertexts.append(f.read(ZC_NOTECIPHERTEXT_SIZE))
def serialize(self):
r = b""
r += struct.pack("<q", self.vpub_old)
r += struct.pack("<q", self.vpub_new)
r += ser_uint256(self.anchor)
for i in range(ZC_NUM_JS_INPUTS):
r += ser_uint256(self.nullifiers[i])
for i in range(ZC_NUM_JS_OUTPUTS):
r += ser_uint256(self.commitments[i])
r += ser_uint256(self.onetimePubKey)
r += ser_uint256(self.randomSeed)
for i in range(ZC_NUM_JS_INPUTS):
r += ser_uint256(self.macs[i])
r += self.proof.serialize()
for i in range(ZC_NUM_JS_OUTPUTS):
r += ser_uint256(self.ciphertexts[i])
return r
def __repr__(self):
return "JSDescription(vpub_old=%i vpub_new=%i anchor=%064x onetimePubKey=%064x randomSeed=%064x proof=%r)" \
% (self.vpub_old, self.vpub_new, self.anchor,
self.onetimePubKey, self.randomSeed, self.proof)
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (self.prevout, hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // 100000000, self.nValue % 100000000,
hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.fOverwintered = True
self.nVersion = 4
self.nVersionGroupId = SAPLING_VERSION_GROUP_ID
self.vin = []
self.vout = []
self.nLockTime = 0
self.nExpiryHeight = 0
self.valueBalance = 0
self.shieldedSpends = []
self.shieldedOutputs = []
self.vJoinSplit = []
self.joinSplitPubKey = None
self.joinSplitSig = None
self.bindingSig = None
self.sha256 = None
self.hash = None
else:
self.fOverwintered = tx.fOverwintered
self.nVersion = tx.nVersion
self.nVersionGroupId = tx.nVersionGroupId
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.nExpiryHeight = tx.nExpiryHeight
self.valueBalance = tx.valueBalance
self.shieldedSpends = copy.deepcopy(tx.shieldedSpends)
self.shieldedOutputs = copy.deepcopy(tx.shieldedOutputs)
self.vJoinSplit = copy.deepcopy(tx.vJoinSplit)
self.joinSplitPubKey = tx.joinSplitPubKey
self.joinSplitSig = tx.joinSplitSig
self.bindingSig = tx.bindingSig
self.sha256 = None
self.hash = None
def deserialize(self, f):
header = struct.unpack("<I", f.read(4))[0]
self.fOverwintered = bool(header >> 31)
self.nVersion = header & 0x7FFFFFFF
self.nVersionGroupId = (struct.unpack("<I", f.read(4))[0]
if self.fOverwintered else 0)
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
isSaplingV4 = (self.fOverwintered and
self.nVersionGroupId == SAPLING_VERSION_GROUP_ID and
self.nVersion == 4)
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
if isOverwinterV3 or isSaplingV4:
self.nExpiryHeight = struct.unpack("<I", f.read(4))[0]
if isSaplingV4:
self.valueBalance = struct.unpack("<q", f.read(8))[0]
self.shieldedSpends = deser_vector(f, SpendDescription)
self.shieldedOutputs = deser_vector(f, OutputDescription)
if self.nVersion >= 2:
self.vJoinSplit = deser_vector(f, JSDescription)
if len(self.vJoinSplit) > 0:
self.joinSplitPubKey = deser_uint256(f)
self.joinSplitSig = f.read(64)
if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0):
self.bindingSig = f.read(64)
self.sha256 = None
self.hash = None
def serialize(self):
header = (int(self.fOverwintered)<<31) | self.nVersion
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
isSaplingV4 = (self.fOverwintered and
self.nVersionGroupId == SAPLING_VERSION_GROUP_ID and
self.nVersion == 4)
r = b""
r += struct.pack("<I", header)
if self.fOverwintered:
r += struct.pack("<I", self.nVersionGroupId)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
if isOverwinterV3 or isSaplingV4:
r += struct.pack("<I", self.nExpiryHeight)
if isSaplingV4:
r += struct.pack("<q", self.valueBalance)
r += ser_vector(self.shieldedSpends)
r += ser_vector(self.shieldedOutputs)
if self.nVersion >= 2:
r += ser_vector(self.vJoinSplit)
if len(self.vJoinSplit) > 0:
r += ser_uint256(self.joinSplitPubKey)
r += self.joinSplitSig
if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0):
r += self.bindingSig
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = hash256(self.serialize())[::-1].hex()
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 214160000 * 100000000:
return False
return True
def __repr__(self):
r = ("CTransaction(fOverwintered=%r nVersion=%i nVersionGroupId=0x%08x "
"vin=%r vout=%r nLockTime=%i nExpiryHeight=%i "
"valueBalance=%i shieldedSpends=%r shieldedOutputs=%r"
% (self.fOverwintered, self.nVersion, self.nVersionGroupId,
self.vin, self.vout, self.nLockTime, self.nExpiryHeight,
self.valueBalance, self.shieldedSpends, self.shieldedOutputs))
if self.nVersion >= 2:
r += " vJoinSplit=%r" % (self.vJoinSplit,)
if len(self.vJoinSplit) > 0:
r += " joinSplitPubKey=%064x joinSplitSig=%064x" \
% (self.joinSplitPubKey, self.joinSplitSig)
if len(self.shieldedSpends) > 0 or len(self.shieldedOutputs) > 0:
r += " bindingSig=%064x" % (self.bindingSig,)
r += ")"
return r
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.hashFinalSaplingRoot = header.hashFinalSaplingRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.hashFinalSaplingRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.hashFinalSaplingRoot = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.hashFinalSaplingRoot)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.hashFinalSaplingRoot)
self.sha256 = uint256_from_str(hash256(r))
self.hash = hash256(r)[::-1].hex()
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashFinalSaplingRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.hashFinalSaplingRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def solve(self):
target = uint256_from_compact(self.nBits)
self.nNonce = 0
while True:
_powhash = rev_hex(bh2u(yescrypt.getPoWHash(super(CBlock, self).serialize())))
pow = int('0x' + _powhash, 16)
if pow <= target:
self.rehash()
return
self.nNonce += 1
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashFinalSaplingRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%r)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
self.hashFinalSaplingRoot, time.ctime(self.nTime), self.nBits,
self.nNonce, self.vtx)
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self, protocol_version=SPROUT_PROTO_VERSION):
self.nVersion = protocol_version
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
self.addrTo, self.addrFrom, self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%r)" % (self.addrs,)
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_notfound(object):
command = b"notfound"
def __init__(self):
self.inv = []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_notfound(inv=%r)" % (self.inv,)
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_filteradd(object):
command = b"filteradd"
def __init__(self):
self.data = b""
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
return ser_string(self.data)
def __repr__(self):
return "msg_filteradd(data=%r)" % (self.data,)
class msg_filterclear(object):
command = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_filterclear()"
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# Derived classes should call this function once to set the message map
# which associates the derived classes' functions to incoming messages
def create_callback_map(self):
self.cbmap = {
b"version": self.on_version,
b"verack": self.on_verack,
b"addr": self.on_addr,
b"alert": self.on_alert,
b"inv": self.on_inv,
b"getdata": self.on_getdata,
b"notfound": self.on_notfound,
b"getblocks": self.on_getblocks,
b"tx": self.on_tx,
b"block": self.on_block,
b"getaddr": self.on_getaddr,
b"ping": self.on_ping,
b"pong": self.on_pong,
b"headers": self.on_headers,
b"getheaders": self.on_getheaders,
b"reject": self.on_reject,
b"mempool": self.on_mempool
}
def deliver(self, conn, message):
with mininode_lock:
try:
self.cbmap[message.command](conn, message)
except:
print("ERROR delivering %r (%s)" % (message,
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(SPROUT_PROTO_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_notfound(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"notfound": msg_notfound,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool
}
MAGIC_BYTES = {
"mainnet": b"\x4b\x6f\x74\x6f", # mainnet
"testnet3": b"\x54\x6f\x6b\x6f", # testnet3
"regtest": b"\x52\x65\x6b\x6f" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", protocol_version=SAPLING_PROTO_VERSION):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version(protocol_version)
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport) + ' using version ' + str(protocol_version))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = b"connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = b"closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %r" % (self.recvbuf,))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum %r" % (self.recvbuf,))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != b"connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
py | 1a378442b3c1651f0d68efa9bd6bf6d792b28ffd | from FileSystem_Core.File_System_Core import File_System_Core
a=File_System_Core()
print(a.Os_Detail.Python_Ver()) |
py | 1a37851676b3230fb7b2a43a019a3d68c8a0a926 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineImagesOperations:
"""VirtualMachineImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
location: str,
publisher_name: str,
offer: str,
skus: str,
version: str,
**kwargs: Any
) -> "_models.VirtualMachineImage":
"""Gets a virtual machine image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'} # type: ignore
async def list(
self,
location: str,
publisher_name: str,
offer: str,
skus: str,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of all virtual machine image versions for the specified location, publisher, offer,
and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'} # type: ignore
async def list_offers(
self,
location: str,
publisher_name: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image offers for the specified location and publisher.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list_offers.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'} # type: ignore
async def list_publishers(
self,
location: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image publishers for the specified Azure location.
:param location: The name of a supported Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list_publishers.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'} # type: ignore
async def list_skus(
self,
location: str,
publisher_name: str,
offer: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image SKUs for the specified location, publisher, and offer.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list_skus.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'} # type: ignore
|
py | 1a3785bd339808a0b08877a979f9b678b941f65f | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Car, Driver, Reporter
class ManyToOneNullTests(TestCase):
def setUp(self):
# Create a Reporter.
self.r = Reporter(name='John Smith')
self.r.save()
# Create an Article.
self.a = Article(headline="First", reporter=self.r)
self.a.save()
# Create an Article via the Reporter object.
self.a2 = self.r.article_set.create(headline="Second")
# Create an Article with no Reporter by passing "reporter=None".
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
# Create another article and reporter
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'),
['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertEqual(self.a3.reporter, None)
# Need to reget a3 to refresh the cache
a3 = Article.objects.get(pk=self.a3.pk)
with self.assertRaises(AttributeError):
getattr(a3.reporter, 'id')
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
self.assertEqual(a3.reporter, None)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None),
['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>'])
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
with self.assertRaises(Reporter.DoesNotExist):
self.r.article_set.remove(self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_set(self):
# Use manager.set() to allocate ForeignKey. Null is legal, so existing
# members of the set that are not in the assignment set are set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Use manager.set(clear=True)
self.r2.article_set.set([self.a3, self.a4], clear=True)
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Fourth>', '<Article: Third>'])
# Clear the rest of the set
self.r2.article_set.set([])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Fourth>', '<Article: Second>', '<Article: Third>'])
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of the set that are not in the assignment set are
# set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>'])
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse FK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# RelatedManager.set() (#19816).
self.r2.article_set.set([self.a2, self.a3])
qs = self.r2.article_set.filter(headline="Second")
self.r2.article_set.set(qs)
self.assertEqual(1, self.r2.article_set.count())
self.assertEqual(1, qs.count())
def test_add_efficiency(self):
r = Reporter.objects.create()
articles = []
for _ in range(3):
articles.append(Article.objects.create())
with self.assertNumQueries(1):
r.article_set.add(*articles)
self.assertEqual(r.article_set.count(), 3)
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in range(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0)
def test_related_null_to_field(self):
c1 = Car.objects.create()
d1 = Driver.objects.create()
self.assertIs(d1.car, None)
with self.assertNumQueries(0):
self.assertEqual(list(c1.drivers.all()), [])
|
py | 1a378647628d2ad1e66d49e26d718e8c94e39d7b | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
from typing import Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'chicago_taxi_portable_beam'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
worker_parallelism: int) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'binary_accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
# LINT.IfChange
beam_pipeline_args=[
# -------------------------- Beam Args --------------------------.
'--runner=PortableRunner',
# Points to the job server started in
# setup_beam_on_{flink, spark}.sh
'--job_endpoint=localhost:8099',
'--environment_type=LOOPBACK',
'--sdk_worker_parallelism=%d' % worker_parallelism,
'--experiments=use_loopback_process_worker=True',
# Setting environment_cache_millis to practically infinity enables
# continual reuse of Beam SDK workers, improving performance.
'--environment_cache_millis=1000000',
# TODO(BEAM-7199): Obviate the need for setting pre_optimize=all. # pylint: disable=g-bad-todo
'--experiments=pre_optimize=all',
# Note; We use 100 worker threads to mitigate the issue with
# scheduling work between the Beam runner and SDK harness. Flink
# and Spark can process unlimited work items concurrently while
# SdkHarness can only process 1 work item per worker thread.
# Having 100 threads will let 100 tasks execute concurrently
# avoiding scheduling issue in most cases. In case the threads are
# exhausted, beam prints the relevant message in the log.
# TODO(BEAM-8151) Remove worker_threads=100 after we start using a # pylint: disable=g-bad-todo
# virtually unlimited thread pool by default.
'--experiments=worker_threads=100',
# ---------------------- End of Beam Args -----------------------.
# --------- Flink runner Args (ignored by Spark runner) ---------.
'--parallelism=%d' % worker_parallelism,
# TODO(FLINK-10672): Obviate setting BATCH_FORCED. # pylint: disable=g-bad-todo
'--execution_mode_for_batch=BATCH_FORCED',
# ------------------ End of Flink runner Args -------------------.
],
# LINT.ThenChange(setup/setup_beam_on_spark.sh)
# LINT.ThenChange(setup/setup_beam_on_flink.sh)
)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_portable_beam.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
# LINT.IfChange
try:
parallelism = multiprocessing.cpu_count()
except NotImplementedError:
parallelism = 1
absl.logging.info('Using %d process(es) for Beam pipeline execution.' %
parallelism)
# LINT.ThenChange(setup/setup_beam_on_flink.sh)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
worker_parallelism=parallelism))
|
py | 1a3787cacaaede952c6adbf6cf5e76b0c89b0f79 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .bounding_box import BoxList
from maskrcnn_benchmark.layers import nms as _box_nms
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maxium suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
score = boxlist.get_field(score_field)
keep = _box_nms(boxes, score, nms_thresh)
if max_proposals > 0:
keep = keep[:max_proposals]
boxlist = boxlist[keep]
return boxlist.convert(mode)
def remove_small_boxes(boxlist, min_size):
"""
Only keep boxes with both sides >= min_size
Arguments:
boxlist (Boxlist)
min_size (int)
"""
# TODO maybe add an API for querying the ws / hs
xywh_boxes = boxlist.convert("xywh").bbox
_, _, ws, hs = xywh_boxes.unbind(dim=1)
keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1)
return boxlist[keep]
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def boxlist_iou(boxlist1, boxlist2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Arguments:
box1: (BoxList) bounding boxes, sized [N,4].
box2: (BoxList) bounding boxes, sized [M,4].
Returns:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if boxlist1.size != boxlist2.size:
raise RuntimeError(
"boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2)
)
N = len(boxlist1)
M = len(boxlist2)
area1 = boxlist1.area()
area2 = boxlist2.area()
box1, box2 = boxlist1.bbox, boxlist2.bbox
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
# TODO redundant, remove
def _cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def cat_boxlist(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode)
for field in fields:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes
|
py | 1a37899a2a1f8131e48c53bfac6d6d4ae4e0ce96 | from compiler import *
####################################################################################################################
# Each mesh record contains the following fields:
# 1) Mesh id: used for referencing meshes in other files. The prefix mesh_ is automatically added before each mesh id.
# 2) Mesh flags. See header_meshes.py for a list of available flags
# 3) Mesh resource name: Resource name of the mesh
# 4) Mesh translation on x axis: Will be done automatically when the mesh is loaded
# 5) Mesh translation on y axis: Will be done automatically when the mesh is loaded
# 6) Mesh translation on z axis: Will be done automatically when the mesh is loaded
# 7) Mesh rotation angle over x axis: Will be done automatically when the mesh is loaded
# 8) Mesh rotation angle over y axis: Will be done automatically when the mesh is loaded
# 9) Mesh rotation angle over z axis: Will be done automatically when the mesh is loaded
# 10) Mesh x scale: Will be done automatically when the mesh is loaded
# 11) Mesh y scale: Will be done automatically when the mesh is loaded
# 12) Mesh z scale: Will be done automatically when the mesh is loaded
####################################################################################################################
meshes = [
("pic_bandits", 0, "pic_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_mb_warrior_1", 0, "pic_mb_warrior_1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_messenger", 0, "pic_messenger", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_prisoner_man", 0, "pic_prisoner_man", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_prisoner_fem", 0, "pic_prisoner_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_prisoner_wilderness", 0, "pic_prisoner_wilderness", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_siege_sighted", 0, "pic_siege_sighted", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_siege_sighted_fem", 0, "pic_siege_sighted_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_camp", 0, "pic_camp", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_payment", 0, "pic_payment", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_escape_1", 0, "pic_escape_1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_escape_1_fem", 0, "pic_escape_1_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_victory", 0, "pic_victory", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_defeat", 0, "pic_defeat", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_wounded", 0, "pic_wounded", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_wounded_fem", 0, "pic_wounded_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_steppe_bandits", 0, "pic_steppe_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_mountain_bandits", 0, "pic_mountain_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sea_raiders", 0, "pic_sea_raiders", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_deserters", 0, "pic_deserters", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_forest_bandits", 0, "pic_forest_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_cattle", 0, "pic_cattle", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_looted_village", 0, "pic_looted_village", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_village_p", 0, "pic_village_p", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_village_s", 0, "pic_village_s", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_village_w", 0, "pic_village_w", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_recruits", 0, "pic_recruits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_swadian", 0, "pic_arms_swadian", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_vaegir", 0, "pic_arms_vaegir", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_khergit", 0, "pic_arms_khergit", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_nord", 0, "pic_arms_nord", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_rhodok", 0, "pic_arms_rhodok", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sarranid_arms", 0, "pic_sarranid_arms", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_castle1", 0, "pic_castle1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_castledes", 0, "pic_castledes", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_castlesnow", 0, "pic_castlesnow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_charge", 0, "pic_charge", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_khergit", 0, "pic_khergit", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_nord", 0, "pic_nord", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_rhodock", 0, "pic_rhodock", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sally_out", 0, "pic_sally_out", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_siege_attack", 0, "pic_siege_attack", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_swad", 0, "pic_swad", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_town1", 0, "pic_town1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_towndes", 0, "pic_towndes", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_townriot", 0, "pic_townriot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_townsnow", 0, "pic_townsnow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_vaegir", 0, "pic_vaegir", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_villageriot", 0, "pic_villageriot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sarranid_encounter", 0, "pic_sarranid_encounter", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_score_a", 0, "mp_score_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_score_b", 0, "mp_score_b", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("portrait_blend_out", 0, "portrait_blend_out", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("load_window", 0, "load_window", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("checkbox_off", render_order_plus_1, "checkbox_off", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("checkbox_on", render_order_plus_1, "checkbox_on", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("white_plane", 0, "white_plane", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("white_dot", 0, "white_dot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("player_dot", 0, "player_dot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_infantry", 0, "flag_infantry", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_archers", 0, "flag_archers", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_cavalry", 0, "flag_cavalry", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("inv_slot", 0, "inv_slot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ingame_menu", 0, "mp_ingame_menu", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_left", 0, "mp_inventory_left", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_right", 0, "mp_inventory_right", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_choose", 0, "mp_inventory_choose", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_glove", 0, "mp_inventory_slot_glove", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_horse", 0, "mp_inventory_slot_horse", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_armor", 0, "mp_inventory_slot_armor", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_helmet", 0, "mp_inventory_slot_helmet", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_boot", 0, "mp_inventory_slot_boot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_empty", 0, "mp_inventory_slot_empty", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_equip", 0, "mp_inventory_slot_equip", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_left_arrow", 0, "mp_inventory_left_arrow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_right_arrow", 0, "mp_inventory_right_arrow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_main", 0, "mp_ui_host_main", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_1", 0, "mp_ui_host_maps_a1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_2", 0, "mp_ui_host_maps_a2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_3", 0, "mp_ui_host_maps_c", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_4", 0, "mp_ui_host_maps_ruinedf", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_5", 0, "mp_ui_host_maps_a1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_6", 0, "mp_ui_host_maps_a1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_7", 0, "mp_ui_host_maps_fieldby", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_8", 0, "mp_ui_host_maps_castle2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_9", 0, "mp_ui_host_maps_snovyv", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_10", 0, "mp_ui_host_maps_castle3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_11", 0, "mp_ui_host_maps_c1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_12", 0, "mp_ui_host_maps_c2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_13", 0, "mp_ui_host_maps_c3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_randomp", 0, "mp_ui_host_maps_randomp", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_randoms", 0, "mp_ui_host_maps_randoms", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_command_panel", 0, "mp_ui_command_panel", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_command_border_l", 0, "mp_ui_command_border_l", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_command_border_r", 0, "mp_ui_command_border_r", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_welcome_panel", 0, "mp_ui_welcome_panel", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sw", 0, "flag_project_sw", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_vg", 0, "flag_project_vg", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_kh", 0, "flag_project_kh", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_nd", 0, "flag_project_nd", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rh", 0, "flag_project_rh", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sr", 0, "flag_project_sr", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_projects_end", 0, "0", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sw_miss", 0, "flag_project_sw_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_vg_miss", 0, "flag_project_vg_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_kh_miss", 0, "flag_project_kh_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_nd_miss", 0, "flag_project_nd_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rh_miss", 0, "flag_project_rh_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sr_miss", 0, "flag_project_sr_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_misses_end", 0, "0", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("color_picker", 0, "color_picker", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("custom_map_banner_01", 0, "custom_map_banner_01", 0, 0, 0, -90, 0, 90, 1, 1, 1),
("custom_map_banner_02", 0, "custom_map_banner_02", 0, 0, 0, -90, 0, 90, 1, 1, 1),
("custom_map_banner_03", 0, "custom_map_banner_03", 0, 0, 0, -90, 0, 90, 1, 1, 1),
("custom_banner_01", 0, "custom_banner_01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("custom_banner_02", 0, "custom_banner_02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("custom_banner_bg", 0, "custom_banner_bg", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg01", 0, "custom_banner_fg01", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg02", 0, "custom_banner_fg02", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg03", 0, "custom_banner_fg03", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg04", 0, "custom_banner_fg04", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg05", 0, "custom_banner_fg05", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg06", 0, "custom_banner_fg06", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg07", 0, "custom_banner_fg07", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg08", 0, "custom_banner_fg08", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg09", 0, "custom_banner_fg09", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg10", 0, "custom_banner_fg10", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg11", 0, "custom_banner_fg11", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg12", 0, "custom_banner_fg12", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg13", 0, "custom_banner_fg13", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg14", 0, "custom_banner_fg14", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg15", 0, "custom_banner_fg15", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg16", 0, "custom_banner_fg16", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg17", 0, "custom_banner_fg17", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg18", 0, "custom_banner_fg18", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg19", 0, "custom_banner_fg19", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg20", 0, "custom_banner_fg20", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg21", 0, "custom_banner_fg21", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg22", 0, "custom_banner_fg22", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg23", 0, "custom_banner_fg23", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_01", 0, "custom_banner_charge_01", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_02", 0, "custom_banner_charge_02", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_03", 0, "custom_banner_charge_03", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_04", 0, "custom_banner_charge_04", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_05", 0, "custom_banner_charge_05", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_06", 0, "custom_banner_charge_06", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_07", 0, "custom_banner_charge_07", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_08", 0, "custom_banner_charge_08", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_09", 0, "custom_banner_charge_09", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_10", 0, "custom_banner_charge_10", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_11", 0, "custom_banner_charge_11", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_12", 0, "custom_banner_charge_12", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_13", 0, "custom_banner_charge_13", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_14", 0, "custom_banner_charge_14", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_15", 0, "custom_banner_charge_15", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_16", 0, "custom_banner_charge_16", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_17", 0, "custom_banner_charge_17", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_18", 0, "custom_banner_charge_18", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_19", 0, "custom_banner_charge_19", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_20", 0, "custom_banner_charge_20", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_21", 0, "custom_banner_charge_21", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_22", 0, "custom_banner_charge_22", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_23", 0, "custom_banner_charge_23", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_24", 0, "custom_banner_charge_24", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_25", 0, "custom_banner_charge_25", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_26", 0, "custom_banner_charge_26", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_27", 0, "custom_banner_charge_27", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_28", 0, "custom_banner_charge_28", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_29", 0, "custom_banner_charge_29", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_30", 0, "custom_banner_charge_30", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_31", 0, "custom_banner_charge_31", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_32", 0, "custom_banner_charge_32", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_33", 0, "custom_banner_charge_33", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_34", 0, "custom_banner_charge_34", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_35", 0, "custom_banner_charge_35", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_36", 0, "custom_banner_charge_36", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_37", 0, "custom_banner_charge_37", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_38", 0, "custom_banner_charge_38", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_39", 0, "custom_banner_charge_39", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_40", 0, "custom_banner_charge_40", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_41", 0, "custom_banner_charge_41", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_42", 0, "custom_banner_charge_42", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_43", 0, "custom_banner_charge_43", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_44", 0, "custom_banner_charge_44", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_45", 0, "custom_banner_charge_45", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_46", 0, "custom_banner_charge_46", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner", 0, "tableau_mesh_custom_banner", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner_square", 0, "tableau_mesh_custom_banner_square", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner_tall", 0, "tableau_mesh_custom_banner_tall", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner_short", 0, "tableau_mesh_custom_banner_short", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_1", 0, "tableau_mesh_shield_round_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_2", 0, "tableau_mesh_shield_round_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_3", 0, "tableau_mesh_shield_round_3", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_4", 0, "tableau_mesh_shield_round_4", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_5", 0, "tableau_mesh_shield_round_5", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_small_round_1", 0, "tableau_mesh_shield_small_round_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_small_round_2", 0, "tableau_mesh_shield_small_round_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_small_round_3", 0, "tableau_mesh_shield_small_round_3", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_1", 0, "tableau_mesh_shield_kite_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_2", 0, "tableau_mesh_shield_kite_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_3", 0, "tableau_mesh_shield_kite_3", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_4", 0, "tableau_mesh_shield_kite_4", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_heater_1", 0, "tableau_mesh_shield_heater_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_heater_2", 0, "tableau_mesh_shield_heater_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_pavise_1", 0, "tableau_mesh_shield_pavise_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_pavise_2", 0, "tableau_mesh_shield_pavise_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("heraldic_armor_bg", 0, "heraldic_armor_bg", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_heraldic_armor_a", 0, "tableau_mesh_heraldic_armor_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("tableau_mesh_heraldic_armor_b", 0, "tableau_mesh_heraldic_armor_b", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("tableau_mesh_heraldic_armor_c", 0, "tableau_mesh_heraldic_armor_c", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("tableau_mesh_heraldic_armor_d", 0, "tableau_mesh_heraldic_armor_d", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("outer_terrain_plain_1", 0, "ter_border_a", -90, 0, 0, 0, 0, 0, 1, 1, 1),
("banner_a01", 0, "banner_a01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a02", 0, "banner_a02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a03", 0, "banner_a03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a04", 0, "banner_a04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a05", 0, "banner_a05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a06", 0, "banner_a06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a07", 0, "banner_a07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a08", 0, "banner_a08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a09", 0, "banner_a09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a10", 0, "banner_a10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a11", 0, "banner_a11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a12", 0, "banner_a12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a13", 0, "banner_a13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a14", 0, "banner_a14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a15", 0, "banner_f21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a16", 0, "banner_a16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a17", 0, "banner_a17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a18", 0, "banner_a18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a19", 0, "banner_a19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a20", 0, "banner_a20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a21", 0, "banner_a21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b01", 0, "banner_b01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b02", 0, "banner_b02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b03", 0, "banner_b03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b04", 0, "banner_b04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b05", 0, "banner_b05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b06", 0, "banner_b06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b07", 0, "banner_b07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b08", 0, "banner_b08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b09", 0, "banner_b09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b10", 0, "banner_b10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b11", 0, "banner_b11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b12", 0, "banner_b12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b13", 0, "banner_b13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b14", 0, "banner_b14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b15", 0, "banner_b15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b16", 0, "banner_b16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b17", 0, "banner_b17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b18", 0, "banner_b18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b19", 0, "banner_b19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b20", 0, "banner_b20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b21", 0, "banner_b21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c01", 0, "banner_c01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c02", 0, "banner_c02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c03", 0, "banner_c03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c04", 0, "banner_c04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c05", 0, "banner_c05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c06", 0, "banner_c06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c07", 0, "banner_c07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c08", 0, "banner_c08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c09", 0, "banner_c09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c10", 0, "banner_c10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c11", 0, "banner_c11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c12", 0, "banner_c12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c13", 0, "banner_c13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c14", 0, "banner_c14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c15", 0, "banner_c15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c16", 0, "banner_c16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c17", 0, "banner_c17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c18", 0, "banner_c18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c19", 0, "banner_c19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c20", 0, "banner_c20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c21", 0, "banner_c21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d01", 0, "banner_d01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d02", 0, "banner_d02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d03", 0, "banner_d03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d04", 0, "banner_d04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d05", 0, "banner_d05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d06", 0, "banner_d06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d07", 0, "banner_d07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d08", 0, "banner_d08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d09", 0, "banner_d09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d10", 0, "banner_d10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d11", 0, "banner_d11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d12", 0, "banner_d12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d13", 0, "banner_d13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d14", 0, "banner_d14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d15", 0, "banner_d15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d16", 0, "banner_d16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d17", 0, "banner_d17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d18", 0, "banner_d18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d19", 0, "banner_d19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d20", 0, "banner_d20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d21", 0, "banner_d21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e01", 0, "banner_e01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e02", 0, "banner_e02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e03", 0, "banner_e03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e04", 0, "banner_e04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e05", 0, "banner_e05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e06", 0, "banner_e06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e07", 0, "banner_e07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e08", 0, "banner_e08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e09", 0, "banner_e09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e10", 0, "banner_e10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e11", 0, "banner_e11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e12", 0, "banner_e12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e13", 0, "banner_e13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e14", 0, "banner_e14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e15", 0, "banner_e15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e16", 0, "banner_e16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e17", 0, "banner_e17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e18", 0, "banner_e18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e19", 0, "banner_e19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e20", 0, "banner_e20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e21", 0, "banner_e21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f11", 0, "banner_f11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f12", 0, "banner_f12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f13", 0, "banner_f13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f14", 0, "banner_f14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f15", 0, "banner_f15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f16", 0, "banner_f16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f17", 0, "banner_f17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f18", 0, "banner_f18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f19", 0, "banner_f19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f20", 0, "banner_f20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_a", 0, "banner_kingdom_a", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_b", 0, "banner_kingdom_b", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_c", 0, "banner_kingdom_c", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_d", 0, "banner_kingdom_d", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_e", 0, "banner_kingdom_e", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_f", 0, "banner_kingdom_f", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f21", 0, "banner_a15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a01", 0, "arms_a01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a02", 0, "arms_a02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a03", 0, "arms_a03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a04", 0, "arms_a04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a05", 0, "banner_a05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a06", 0, "arms_a06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a07", 0, "banner_a07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a08", 0, "arms_a08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a09", 0, "banner_a09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a10", 0, "banner_a10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a11", 0, "banner_a11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a12", 0, "arms_a12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a13", 0, "arms_a13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a14", 0, "banner_a14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a15", 0, "banner_f21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a16", 0, "arms_a16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a17", 0, "arms_a17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a18", 0, "arms_a18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a19", 0, "arms_a19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a20", 0, "arms_a20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a21", 0, "arms_a21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b01", 0, "arms_b01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b02", 0, "arms_b02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b03", 0, "banner_b03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b04", 0, "banner_b04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b05", 0, "arms_b05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b06", 0, "arms_b06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b07", 0, "arms_b07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b08", 0, "arms_b08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b09", 0, "arms_b09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b10", 0, "arms_b10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b11", 0, "banner_b11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b12", 0, "banner_b12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b13", 0, "banner_b13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b14", 0, "arms_b14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b15", 0, "arms_b15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b16", 0, "arms_b16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b17", 0, "banner_b17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b18", 0, "arms_b18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b19", 0, "banner_b19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b20", 0, "arms_b20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b21", 0, "banner_b21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c01", 0, "arms_c01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c02", 0, "banner_c02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c03", 0, "banner_c03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c04", 0, "arms_c04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c05", 0, "banner_c05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c06", 0, "arms_c06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c07", 0, "arms_c07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c08", 0, "banner_c08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c09", 0, "banner_c09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c10", 0, "arms_c10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c11", 0, "banner_c11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c12", 0, "arms_c12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c13", 0, "arms_c13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c14", 0, "arms_c14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c15", 0, "banner_c15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c16", 0, "arms_c16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c17", 0, "banner_c17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c18", 0, "banner_c18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c19", 0, "arms_c19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c20", 0, "banner_c20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c21", 0, "banner_c21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d01", 0, "banner_d01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d02", 0, "arms_d02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d03", 0, "arms_d03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d04", 0, "arms_d04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d05", 0, "banner_d05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d06", 0, "arms_d06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d07", 0, "arms_d07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d08", 0, "arms_d08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d09", 0, "arms_d09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d10", 0, "banner_d10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d11", 0, "arms_d11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d12", 0, "arms_d12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d13", 0, "arms_d13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d14", 0, "arms_d14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d15", 0, "arms_d15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d16", 0, "arms_d16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d17", 0, "arms_d17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d18", 0, "arms_d18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d19", 0, "arms_d19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d20", 0, "arms_d20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d21", 0, "arms_d21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e01", 0, "banner_e01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e02", 0, "arms_e02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e03", 0, "banner_e03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e04", 0, "banner_e04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e05", 0, "banner_e05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e06", 0, "banner_e06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e07", 0, "banner_e07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e08", 0, "banner_e08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e09", 0, "banner_e09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e10", 0, "banner_e10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e11", 0, "banner_e11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e12", 0, "banner_e12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e13", 0, "banner_e13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e14", 0, "banner_e14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e15", 0, "banner_e15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e16", 0, "banner_e16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e17", 0, "banner_e17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e18", 0, "banner_e18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e19", 0, "banner_e19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e20", 0, "banner_e20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e21", 0, "banner_e21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f11", 0, "banner_f11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f12", 0, "banner_f12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f13", 0, "banner_f13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f14", 0, "banner_f14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f15", 0, "banner_f15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f16", 0, "banner_f16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f17", 0, "banner_f17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f18", 0, "banner_f18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f19", 0, "banner_f19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f20", 0, "banner_f20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_a", 0, "banner_kingdom_a", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_b", 0, "banner_kingdom_b", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_c", 0, "banner_kingdom_c", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_d", 0, "banner_kingdom_d", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_e", 0, "banner_kingdom_e", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_f", 0, "banner_kingdom_f", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f21", 0, "banner_a15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_a", 0, "banners_default_a", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_b", 0, "banners_default_b", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_c", 0, "banners_default_c", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_d", 0, "banners_default_d", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_e", 0, "banners_default_e", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("troop_label_banner", 0, "troop_label_banner", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("ui_kingdom_shield_1", 0, "ui_kingdom_shield_1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_2", 0, "ui_kingdom_shield_2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_3", 0, "ui_kingdom_shield_3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_4", 0, "ui_kingdom_shield_4", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_5", 0, "ui_kingdom_shield_5", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_6", 0, "ui_kingdom_shield_6", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_swadian", 0, "banner_a01", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_vaegir", 0, "banner_a02", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_khergit", 0, "banner_d01", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_nord", 0, "banner_a03", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_rhodok", 0, "banner_a04", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_down", 0, "mouse_arrow_down", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_right", 0, "mouse_arrow_right", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_left", 0, "mouse_arrow_left", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_up", 0, "mouse_arrow_up", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_plus", 0, "mouse_arrow_plus", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_left_click", 0, "mouse_left_click", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_right_click", 0, "mouse_right_click", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("status_ammo_ready", 0, "status_ammo_ready", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("main_menu_background", 0, "main_menu_nord", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("loading_background", 0, "load_screen_2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_quick_battle_a", 0, "ui_quick_battle_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("white_bg_plane_a", 0, "white_bg_plane_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_icon_infantry", 0, "cb_ui_icon_infantry", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_icon_archer", 0, "cb_ui_icon_archer", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_icon_horseman", 0, "cb_ui_icon_horseman", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_main", 0, "cb_ui_main", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_01", 0, "cb_ui_maps_scene_01", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_02", 0, "cb_ui_maps_scene_02", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_03", 0, "cb_ui_maps_scene_03", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_04", 0, "cb_ui_maps_scene_04", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_05", 0, "cb_ui_maps_scene_05", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_06", 0, "cb_ui_maps_scene_06", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_07", 0, "cb_ui_maps_scene_07", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_08", 0, "cb_ui_maps_scene_08", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_09", 0, "cb_ui_maps_scene_09", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_14", 0, "mp_ui_host_maps_c4", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_15", 0, "mp_ui_host_maps_c5", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("quit_adv", 0, "quit_adv", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("quit_adv_b", 0, "quit_adv_b", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_7", 0, "ui_kingdom_shield_7", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rb", 0, "flag_project_rb", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rb_miss", 0, "flag_project_rb_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_16", 0, "mp_ui_host_maps_d1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_17", 0, "mp_ui_host_maps_d2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_18", 0, "mp_ui_host_maps_d3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_19", 0, "mp_ui_host_maps_e2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_20", 0, "mp_ui_host_maps_e1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_21", 0, "mp_ui_host_maps_cold_cost", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#INVASION MODE START
("incoming_enemy", 0, "cb_ui_icon_infantry", 0, 0, 0, 0, 0, 0, 2, 2, 2),
("prison_cart_pos", 0, "ccoop_prison_cart", 0, 0, 0, 0, 0, 0, 2, 2, 2),
("ccoop_drop_chest_top", 0, "ccoop_drop_chest_top", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_drop_chest_bottom", 0, "ccoop_drop_chest_bottom", 0, 0, 200, 0, 0, 0, 1, 1, 1),
("ccoop_random_class", 0, "ccoop_random_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_default_class", 0, "ccoop_default_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_melee_class", 0, "ccoop_melee_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_ranged_class", 0, "ccoop_ranged_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_mounted_class", 0, "ccoop_mounted_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#INVASION MODE END
]
#LWBR WarForge 2.0 --- BEGIN
if not IS_CLIENT:
for g in xrange(len(meshes)):
meshes[g] = (meshes[g][0],0,"pic_bandits",0,0,0,0,0,0,0,0,0)
#LWBR WarForge 2.0 --- END
|
py | 1a378a9835c7850c8d5fc052d1abc3beb861f804 | # -*- coding: utf-8 -*-#
'''
# Name: dnn_regression-keras
# Description:
# Author: super
# Date: 2020/6/2
'''
from HelperClass2.MnistImageDataReader import *
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def load_data():
train_file = "../data/ch09.train.npz"
test_file = "../data/ch09.test.npz"
dataReader = DataReader_2_0(train_file, test_file)
dataReader.ReadData()
# dr.NormalizeX()
# dr.NormalizeY(YNormalizationMethod.Regression)
dataReader.Shuffle()
dataReader.GenerateValidationSet()
x_train, y_train = dataReader.XTrain, dataReader.YTrain
x_test, y_test = dataReader.XTest, dataReader.YTest
x_val, y_val = dataReader.XDev, dataReader.YDev
return x_train, y_train, x_test, y_test, x_val, y_val
def build_model():
model = Sequential()
model.add(Dense(4, activation='sigmoid', input_shape=(1, )))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='Adam',
loss='mean_squared_error')
return model
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
if __name__ == '__main__':
x_train, y_train, x_test, y_test, x_val, y_val = load_data()
# print(x_train.shape)
# print(x_test.shape)
# print(x_val.shape)
model = build_model()
history = model.fit(x_train, y_train, epochs=50, batch_size=10, validation_data=(x_val, y_val))
draw_train_history(history)
loss = model.evaluate(x_test, y_test)
print("test loss: {}".format(loss))
weights = model.get_weights()
print("weights: ", weights) |
py | 1a378b0ad7ab30fadb74c23730a06669673c1db7 | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class InvestigationTypeCode(GenericTypeCode):
"""
InvestigationType
From: http://hl7.org/fhir/ValueSet/investigation-sets in valuesets.xml
Example value set for investigation type.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://snomed.info/sct
"""
codeset: FhirUri = "http://snomed.info/sct"
class InvestigationTypeCodeValues:
"""
From: http://hl7.org/fhir/ValueSet/investigation-sets in valuesets.xml
"""
Examination_Signs = InvestigationTypeCode("271336007")
"""
From: http://hl7.org/fhir/ValueSet/investigation-sets in valuesets.xml
"""
History_symptoms = InvestigationTypeCode("160237006")
|
py | 1a378b1092d68314a9981691de8b948917d515bf | import os
'''
Plugin handler for the EarthData PS6-24 plugin.
'''
class SeedlinkPluginHandler:
# Create defaults
def __init__(self): pass
def push(self, seedlink):
# Check and set defaults
try: seedlink.param('sources.hrd24.comport')
except: seedlink.setParam('sources.hrd24.comport', '/dev/data')
try: seedlink.param('sources.hrd24.baudrate')
except: seedlink.setParam('sources.hrd24.baudrate', 19200)
try: seedlink.param('sources.hrd24.bundles')
except: seedlink.setParam('sources.hrd24.bundles', 59)
try: seedlink.param('sources.hrd24.proc')
except: seedlink.setParam('sources.hrd24.proc', 'hrd24_100')
return seedlink.net + "." + seedlink.sta
# Flush does nothing
def flush(self, seedlink):
pass
|
py | 1a378b4359e0fa3c8aff74df009d237e09e6f914 | from theano import function, config, shared, tensor
import numpy
import time
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
f = function([], tensor.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
r = f()
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
if numpy.any([isinstance(x.op, tensor.Elemwise) and
('Gpu' not in type(x.op).__name__)
for x in f.maker.fgraph.toposort()]):
print('Used the cpu')
else:
print('Used the gpu')
|
py | 1a378b8b4f3d16d5edf7fccdfaf76e7568214fef | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import random
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
class ProjectDoubanSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ProjectDoubanDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
# Added by ARCSINX
class RandomUserAgentMiddleware(UserAgentMiddleware):
#the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
#for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
def __init__(self, user_agent=''):
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if ua:
request.headers.setdefault('User-Agent', ua)
|
py | 1a378cad355b976be59070352830b0ab55cf5627 | # flake8: noqa: E402
import time
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import K8sVersionDisclosure
from kube_hunter.modules.hunting.cves import (
K8sClusterCveHunter,
ServerApiVersionEndPointAccessPE,
ServerApiVersionEndPointAccessDos,
CveUtils,
)
cve_counter = 0
def test_K8sCveHunter():
global cve_counter
# because the hunter unregisters itself, we manually remove this option, so we can test it
K8sClusterCveHunter.__new__ = lambda self, cls: object.__new__(self)
e = K8sVersionDisclosure(version="1.10.1", from_endpoint="/version")
h = K8sClusterCveHunter(e)
h.execute()
time.sleep(0.01)
assert cve_counter == 2
cve_counter = 0
# test patched version
e = K8sVersionDisclosure(version="v1.13.6-gke.13", from_endpoint="/version")
h = K8sClusterCveHunter(e)
h.execute()
time.sleep(0.01)
assert cve_counter == 0
cve_counter = 0
@handler.subscribe(ServerApiVersionEndPointAccessPE)
class test_CVE_2018_1002105:
def __init__(self, event):
global cve_counter
cve_counter += 1
@handler.subscribe(ServerApiVersionEndPointAccessDos)
class test_CVE_2019_1002100:
def __init__(self, event):
global cve_counter
cve_counter += 1
class TestCveUtils:
def test_is_downstream(self):
test_cases = (
("1", False),
("1.2", False),
("1.2-3", True),
("1.2-r3", True),
("1.2+3", True),
("1.2~3", True),
("1.2+a3f5cb2", True),
("1.2-9287543", True),
("v1", False),
("v1.2", False),
("v1.2-3", True),
("v1.2-r3", True),
("v1.2+3", True),
("v1.2~3", True),
("v1.2+a3f5cb2", True),
("v1.2-9287543", True),
("v1.13.9-gke.3", True),
)
for version, expected in test_cases:
actual = CveUtils.is_downstream_version(version)
assert actual == expected
def test_ignore_downstream(self):
test_cases = (
("v2.2-abcd", ["v1.1", "v2.3"], False),
("v2.2-abcd", ["v1.1", "v2.2"], False),
("v1.13.9-gke.3", ["v1.14.8"], False),
)
for check_version, fix_versions, expected in test_cases:
actual = CveUtils.is_vulnerable(fix_versions, check_version, True)
assert actual == expected
|
py | 1a378da0bdbceb47481377206b3ffafe2c5505fe | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import abc
import os
import enum
import time
import logging
import shutil
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import NumpyArrayInitializer
from paddle.fluid.core import PassVersionChecker
import paddle.fluid.core as core
from paddle import compat as cpt
import paddle.inference as paddle_infer
from typing import Optional, List, Callable, Dict, Any, Set
from program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model
import hypothesis
from hypothesis import given, settings, seed, example, assume
logging.basicConfig(level=logging.INFO, format="%(message)s")
settings.register_profile(
"ci",
max_examples=100,
suppress_health_check=hypothesis.HealthCheck.all(),
deadline=None,
print_blob=True,
derandomize=True,
report_multiple_bugs=False)
settings.register_profile(
"dev",
max_examples=1000,
suppress_health_check=hypothesis.HealthCheck.all(),
deadline=None,
print_blob=True,
derandomize=True,
report_multiple_bugs=False)
if float(os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) < 1 or \
os.getenv('HYPOTHESIS_TEST_PROFILE', 'dev') == 'ci':
settings.load_profile("ci")
else:
settings.load_profile("dev")
class SkipReasons(enum.Enum):
# Paddle not support, but trt support, we need to add the feature.
TRT_NOT_IMPLEMENTED = 0
# TRT not support.
TRT_NOT_SUPPORT = 1
# Accuracy is abnormal after enabling pass.
PASS_ACCURACY_ERROR = 2
# Accuracy is abnormal after enabling mkldnn.
MKLDNN_ACCURACY_ERROR = 3
class AutoScanTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
np.random.seed(1024)
paddle.enable_static()
super(AutoScanTest, self).__init__(*args, **kwargs)
self.skip_cases = []
abs_dir = os.path.abspath(os.path.dirname(__file__))
self.cache_dir = os.path.join(abs_dir,
str(self.__module__) + '_cache_dir')
@abc.abstractmethod
def sample_program_configs(self):
'''
Generate all config with the combination of different Input tensor shape and
different Attr values.
'''
raise NotImplementedError
@abc.abstractmethod
def sample_predictor_configs(self):
raise NotImplementedError
@abc.abstractmethod
def add_skip_case(
self,
teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]],
reason: SkipReasons,
note: str):
self.skip_cases.append((teller, reason, note))
@abc.abstractmethod
def is_program_valid(self, program_config: ProgramConfig) -> bool:
raise NotImplementedError
def run_test_config(self, model, params, prog_config, pred_config,
feed_data) -> Dict[str, np.ndarray]:
'''
Test a single case.
'''
pred_config.set_model_buffer(model, len(model), params, len(params))
predictor = paddle_infer.create_predictor(pred_config)
for name, _ in prog_config.inputs.items():
input_tensor = predictor.get_input_handle(name)
input_tensor.copy_from_cpu(feed_data[name]['data'])
if feed_data[name]['lod'] is not None:
input_tensor.set_lod(feed_data[name]['lod'])
predictor.run()
result = {}
for out_name, o_name in zip(prog_config.outputs,
predictor.get_output_names()):
result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()
return result
@abc.abstractmethod
def assert_tensors_near(self,
atol: float,
rtol: float,
tensor: Dict[str, np.array],
baseline: Dict[str, np.array]):
for key, arr in tensor.items():
self.assertTrue(
baseline[key].shape == arr.shape,
"The output shapes are not equal, the baseline shape is " +
str(baseline[key].shape) + ', but got ' + str(arr.shape))
self.assertTrue(
np.allclose(
baseline[key], arr, atol=atol, rtol=rtol),
"Output has diff. ")
@abc.abstractmethod
def run_test(self, quant=False):
raise NotImplementedError
def generate_op_config(self,
ops_config: List[Dict[str, Any]]) -> List[OpConfig]:
ops = []
for i in range(len(ops_config)):
op_config = ops_config[i]
ops.append(
OpConfig(
type=op_config['op_type'],
inputs=op_config['op_inputs'],
outputs=op_config['op_outputs'],
attrs=op_config['op_attrs']))
return ops
@abc.abstractmethod
def skip_log(self, msg: str):
logging.warning("SKIP: " + msg)
@abc.abstractmethod
def fail_log(self, msg: str):
logging.error("FAILE: " + msg)
@abc.abstractmethod
def success_log(self, msg: str):
logging.info("SUCCESS: " + msg)
@abc.abstractmethod
def create_inference_config(self,
passes: Optional[List[str]]=None,
use_gpu: bool=False,
use_mkldnn: bool=False,
ir_optim: Optional[bool]=None):
config = paddle_infer.Config()
config.switch_ir_debug(True)
config.set_optim_cache_dir(self.cache_dir)
config.disable_glog_info()
if ir_optim is not None:
config.switch_ir_optim(ir_optim)
if use_gpu:
config.enable_use_gpu(100, 0)
if use_mkldnn:
config.enable_mkldnn()
if passes is not None:
config.pass_builder().set_passes(passes)
self.passes = passes
return config
class MkldnnAutoScanTest(AutoScanTest):
def __init__(self, *args, **kwargs):
super(MkldnnAutoScanTest, self).__init__(*args, **kwargs)
def run_test(self, quant=False, *args, **kwargs):
status = True
for prog_config in self.sample_program_configs(*args, **kwargs):
# if program is invalid, we should skip that cases.
if not self.is_program_valid(prog_config):
continue
model, params = create_fake_model(prog_config)
if quant:
model, params = create_quant_model(model, params)
feed_data = {}
for name, tensor_config in prog_config.inputs.items():
feed_data[name] = {
'data': tensor_config.data,
'lod': tensor_config.lod
}
results: List[Dict[str, np.ndarray]] = []
# baseline: cpu no ir_optim run
base_config = self.create_inference_config(ir_optim=False)
logging.info('RUN program_config: ' + str(prog_config))
results.append(
self.run_test_config(model, params, prog_config, base_config,
feed_data))
self.success_log('RUN_CPU_BASELINE done')
for pred_config, (
atol, rtol) in self.sample_predictor_configs(prog_config):
# skip info
skip_flag = False
for skip_info in self.skip_cases:
if skip_info[0](prog_config, pred_config):
skip_flag = True
if skip_info[1] == SkipReasons.MKLDNN_ACCURACY_ERROR:
self.skip_log("[MKLDNN_ACCURACY_ERROR] " +
skip_info[2] + ' ' + ' vs ' + self.
inference_config_str(pred_config))
else:
raise NotImplementedError
break
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
try:
results.append(
self.run_test_config(model, params, prog_config,
pred_config, feed_data))
self.assert_tensors_near(atol, rtol, results[-1],
results[0])
except Exception as e:
self.fail_log(
self.inference_config_str(pred_config) +
'\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
if not skip_flag:
status = False
continue
self.success_log('RUN predictor_config ' + self.
inference_config_str(pred_config) + ' done')
self.assertTrue(status)
def inference_config_str(self, config) -> str:
dic = {}
enable_mkldnn = config.mkldnn_enabled()
dic['use_mkldnn'] = enable_mkldnn
enable_gpu = config.use_gpu()
dic['use_gpu'] = enable_gpu
return str(dic)
class PassAutoScanTest(AutoScanTest):
def __init__(self, *args, **kwargs):
super(PassAutoScanTest, self).__init__(*args, **kwargs)
self.passes = []
def check_op_version(self):
status = True
for pass_name in self.passes:
if not PassVersionChecker.IsCompatible(pass_name):
self.fail_log('{} version check failed.'.format(pass_name))
status = False
return status
def assert_op_size(self, fusion_before_num, fusion_after_num, origin_model):
if not self.passes:
raise ValueError(
'In PassAutoScan you should give a valid pass name.')
last_passed_program = os.path.join(self.cache_dir,
self.passes[-1] + '.pdmodel')
model_bytes = paddle.static.load_from_file(last_passed_program)
pg = paddle.static.deserialize_program(model_bytes)
main_block = pg.desc.block(0)
after_op_size = main_block.op_size()
pg = paddle.static.deserialize_program(origin_model)
main_block = pg.desc.block(0)
before_op_size = main_block.op_size()
self.assertTrue(before_op_size == fusion_before_num,
'before fusion op size is {}, but got {}!'.format(
before_op_size, fusion_before_num))
self.assertTrue(after_op_size == fusion_after_num,
'after fusion op size is {}, but got {}!'.format(
after_op_size, fusion_after_num))
def run_test(self, quant=False, *args, **kwargs):
status = True
for prog_config in self.sample_program_configs(*args, **kwargs):
# if program is invalid, we should skip that cases.
if not self.is_program_valid(prog_config):
continue
model, params = create_fake_model(prog_config)
if quant:
model, params = create_quant_model(model, params)
feed_data = {}
for name, tensor_config in prog_config.inputs.items():
feed_data[name] = {
'data': tensor_config.data,
'lod': tensor_config.lod
}
results: List[Dict[str, np.ndarray]] = []
# baseline: cpu no ir_optim run
base_config = self.create_inference_config(ir_optim=False)
logging.info('RUN program_config: ' + str(prog_config))
results.append(
self.run_test_config(model, params, prog_config, base_config,
feed_data))
self.success_log('RUN_CPU_BASELINE done')
for pred_config, nodes_num, (
atol, rtol) in self.sample_predictor_configs(prog_config):
# skip info
skip_flag = False
for skip_info in self.skip_cases:
if skip_info[0](prog_config, pred_config):
skip_flag = True
if skip_info[1] == SkipReasons.PASS_ACCURACY_ERROR:
self.skip_log("[PASS_ACCURACY_ERROR] " + skip_info[
2] + ' ' + ' vs ' + self.inference_config_str(
pred_config))
else:
raise NotImplementedError
break
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
try:
results.append(
self.run_test_config(model, params, prog_config,
pred_config, feed_data))
self.assert_tensors_near(atol, rtol, results[-1],
results[0])
if not skip_flag:
self.assert_op_size(nodes_num[0], nodes_num[1], model)
except Exception as e:
self.fail_log(
self.inference_config_str(pred_config) +
'\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
if not skip_flag:
status = False
continue
self.success_log('RUN predictor_config ' + self.
inference_config_str(pred_config) + ' done')
status = self.check_op_version() and status
self.assertTrue(status)
def inference_config_str(self, config) -> str:
dic = {}
enable_mkldnn = config.mkldnn_enabled()
dic['use_mkldnn'] = enable_mkldnn
enable_gpu = config.use_gpu()
dic['use_gpu'] = enable_gpu
if not self.passes:
dic['passes'] = self.passes
enable_trt = config.tensorrt_engine_enabled()
trt_precison = config.tensorrt_precision_mode()
trt_dynamic_shape = config.tensorrt_dynamic_shape_enabled()
if enable_trt:
dic['use_trt'] = True
dic['trt_precision'] = trt_precison
dic['use_dynamic_shape'] = trt_dynamic_shape
else:
dic['use_trt'] = False
return str(dic)
def create_trt_inference_config(self) -> paddle_infer.Config:
config = paddle_infer.Config()
config.disable_glog_info()
config.enable_use_gpu(100, 0)
config.set_optim_cache_dir(self.cache_dir)
config.switch_ir_debug()
# for assert_op_size.
self.passes = ['transpose_flatten_concat_fuse_pass']
return config
class TrtLayerAutoScanTest(AutoScanTest):
class TensorRTParam:
'''
TensorRT subgraph engine parameters.
'''
def __init__(self, workspace_size, max_batch_size, min_subgraph_size,
precision, use_static, use_calib_mode):
self.workspace_size = workspace_size
self.max_batch_size = max_batch_size
self.min_subgraph_size = min_subgraph_size
self.precision = precision
self.use_static = use_static
self.use_calib_mode = use_calib_mode
class DynamicShapeParam:
'''
Prepare TensorRT subgraph engine dynamic shape parameters.
'''
def __init__(self, min_input_shape, max_input_shape, opt_input_shape,
disable_trt_plugin_fp16):
self.min_input_shape = min_input_shape
self.max_input_shape = max_input_shape
self.opt_input_shape = opt_input_shape
self.disable_trt_plugin_fp16 = disable_trt_plugin_fp16
def __init__(self, *args, **kwargs):
super(TrtLayerAutoScanTest, self).__init__(*args, **kwargs)
self.trt_param = self.TensorRTParam(
workspace_size=1024,
max_batch_size=4,
min_subgraph_size=0,
precision=paddle_infer.PrecisionType.Float32,
use_static=True,
use_calib_mode=False)
self.dynamic_shape = self.DynamicShapeParam({}, {}, {}, False)
self.num_percent_cases = float(
os.getenv(
'TEST_NUM_PERCENT_CASES', default='1.0'))
# Choose different tests by week
np.random.seed(int(time.strftime("%W")))
def create_inference_config(self, use_trt=True) -> paddle_infer.Config:
config = paddle_infer.Config()
config.disable_glog_info()
config.enable_use_gpu(100, 0)
config.set_optim_cache_dir(self.cache_dir)
if use_trt:
config.switch_ir_debug()
config.enable_tensorrt_engine(
max_batch_size=self.trt_param.max_batch_size,
workspace_size=self.trt_param.workspace_size,
min_subgraph_size=self.trt_param.min_subgraph_size,
precision_mode=self.trt_param.precision,
use_static=self.trt_param.use_static,
use_calib_mode=self.trt_param.use_calib_mode)
if len(self.dynamic_shape.min_input_shape
) != 0 and self.dynamic_shape.min_input_shape.keys(
) == self.dynamic_shape.max_input_shape.keys(
) and self.dynamic_shape.min_input_shape.keys(
) == self.dynamic_shape.opt_input_shape.keys():
config.set_trt_dynamic_shape_info(
self.dynamic_shape.min_input_shape,
self.dynamic_shape.max_input_shape,
self.dynamic_shape.opt_input_shape,
self.dynamic_shape.disable_trt_plugin_fp16)
return config
def assert_op_size(self, trt_engine_num, paddle_op_num):
last_passed_program = os.path.join(
self.cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel')
model_bytes = paddle.static.load_from_file(last_passed_program)
pg = paddle.static.deserialize_program(model_bytes)
main_block = pg.desc.block(0)
op_size = main_block.op_size()
op_types = [
main_block.op(i).type() == 'tensorrt_engine' for i in range(op_size)
]
trt_engine_size = sum(op_types)
paddle_op_size = op_size - trt_engine_size
self.assertTrue(trt_engine_size == trt_engine_num,
'trt_engine_num is {}, but got {}!'.format(
trt_engine_size, trt_engine_num))
self.assertTrue(paddle_op_size == paddle_op_num,
'paddle_op_num is {}, but got {}!'.format(
paddle_op_size, paddle_op_num))
def inference_config_str(self, config: paddle_infer.Config) -> str:
dic = {}
enable_trt = config.tensorrt_engine_enabled()
trt_precison = config.tensorrt_precision_mode()
trt_dynamic_shape = config.tensorrt_dynamic_shape_enabled()
if enable_trt:
dic['use_trt'] = True
dic['trt_precision'] = trt_precison
dic['use_dynamic_shape'] = trt_dynamic_shape
else:
dic['use_trt'] = False
return str(dic)
def run_test(self, quant=False, *args, **kwargs):
status = True
run_flags = []
for prog_config in self.sample_program_configs(*args, **kwargs):
# In CI, only run 10% cases
if np.random.rand() < self.num_percent_cases:
run_flags.append(True)
else:
run_flags.append(False)
for prog_config, run_flags in zip(
self.sample_program_configs(*args, **kwargs), run_flags):
if not run_flags:
continue
# if program is invalid, we should skip that cases.
if not self.is_program_valid(prog_config):
continue
model, params = create_fake_model(prog_config)
if quant:
model, params = create_quant_model(model, params)
feed_data = {}
for name, tensor_config in prog_config.inputs.items():
feed_data[name] = {
'data': tensor_config.data,
'lod': tensor_config.lod
}
results: List[Dict[str, np.ndarray]] = []
# baseline: gpu run
logging.info('RUN program_config: ' + str(prog_config))
gpu_config = self.create_inference_config(use_trt=False)
results.append(
self.run_test_config(model, params, prog_config, gpu_config,
feed_data))
self.success_log('RUN_GPU_BASELINE done')
for pred_config, nodes_num, threshold in self.sample_predictor_configs(
prog_config):
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
if isinstance(threshold, float):
atol = threshold
rtol = 1e-8
elif isinstance(threshold, list) or isinstance(threshold,
tuple):
atol = threshold[0]
rtol = threshold[1]
else:
raise NotImplementedError
if quant and pred_config.tensorrt_precision_mode(
) != paddle_infer.PrecisionType.Int8:
continue
if pred_config.tensorrt_precision_mode(
) == paddle_infer.PrecisionType.Int8 and not quant:
continue
skip_flag = False
for skip_info in self.skip_cases:
if skip_info[0](prog_config, pred_config):
skip_flag = True
if skip_info[1] == SkipReasons.TRT_NOT_IMPLEMENTED:
self.skip_log("[TRT_NOT_IMPLEMENTED] " + skip_info[
2] + ' ' + ' vs ' + self.inference_config_str(
pred_config))
elif skip_info[1] == SkipReasons.TRT_NOT_SUPPORT:
self.skip_log("[TRT_NOT_SUPPORT] " + skip_info[
2] + ' ' + ' vs ' + self.inference_config_str(
pred_config))
else:
raise NotImplementedError
break
try:
pred_config_deserialize = paddle_infer.Config(pred_config)
results.append(
self.run_test_config(model, params, prog_config,
pred_config, feed_data))
self.assert_tensors_near(atol, rtol, results[-1],
results[0])
if not skip_flag:
self.assert_op_size(nodes_num[0], nodes_num[1])
# deserialize test
if nodes_num[0] > 0:
self.run_test_config(model, params, prog_config,
pred_config_deserialize, feed_data)
except Exception as e:
self.fail_log(
str(prog_config) + ' vs ' + self.inference_config_str(
pred_config) +
'\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
if not skip_flag:
status = False
continue
self.success_log('RUN predictor_config ' + self.
inference_config_str(pred_config) + ' done')
self.assertTrue(status)
|
py | 1a378e03e1abb3a6cd9c877f3ca0829459255da9 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# datetime:2021/1/25 下午7:01
# software: PyCharm
# project: lingzhi-engine
from django.urls import path
from vuln.views.health import HealthEndPoint
from vuln.views.strategy_run import StrategyRunEndPoint
urlpatterns = [
path('run', StrategyRunEndPoint.as_view()),
path('health', HealthEndPoint.as_view()),
]
|
py | 1a378e958a14e01fb16722bb5ad2b288d4534ec4 | from fhwebscrapers.curvasb3 import ScraperB3
__all__ = ['ScraperB3']
|
py | 1a378f12e98bac4e5e0d1938b64283996c19ee82 | from __future__ import print_function
import keras
from keras.layers import AveragePooling2D, Lambda
import keras.backend as K
from keras.layers import Input, MaxPooling2D, UpSampling2D, Dropout, Conv2D, Concatenate, Activation, Cropping2D, \
Flatten, Dense, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras.optimizers import Adam
from keras.regularizers import l2, l1
from keras.activations import sigmoid, relu
img_rows = 512
img_cols = 512
save_path = 'save/'
num_epochs = 1
save_period = 10
show_groundtruth_flag = False
def _bn_relu(input):
norm = BatchNormalization(axis=-1)(input)
return Activation("relu")(norm)
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _double_bn_relu_conv(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
after_first_bn_relu_conv = _bn_relu_conv(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding, kernel_regularizer=kernel_regularizer)(input)
return _bn_relu_conv(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding, kernel_regularizer=kernel_regularizer)(after_first_bn_relu_conv)
return f
def res_block(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
after_double_bn_relu_conv = _double_bn_relu_conv(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding, kernel_regularizer=kernel_regularizer)(input)
return add([input, after_double_bn_relu_conv])
return f
def conv_factory(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:param x: Input keras network
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:rtype: keras network
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def conv_factory_DO(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:param x: Input keras network
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:rtype: keras network
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x, training=True)
return x
def conv_factory_leaky(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:param x: Input keras network
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:rtype: keras network
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def denseblock(x, concat_axis, nb_layers, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each
conv_factory is fed to subsequent ones
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
return x
def denseblock_DO(x, concat_axis, nb_layers, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each
conv_factory is fed to subsequent ones
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory_DO(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
return x
def denseblock_leaky(x, concat_axis, nb_layers, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each
conv_factory is fed to subsequent ones
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory_leaky(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
return x
def discriminator_96(input_shape):
img_shape = input_shape
model = Sequential()
model.add(Conv2D(64, kernel_size=5, strides=2, input_shape=img_shape, padding='valid',
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(64, kernel_size=5, strides=2, padding="valid",
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)))
model.add(BatchNormalization(momentum=0.99))
model.add(LeakyReLU(alpha=0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.4))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.4))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def get_model_sigmoid_2out(input_shape, l2_weight_decay):
regularizer_func = l2(l2_weight_decay)
inputs = Input(input_shape)
print("inputs shape:", inputs.shape)
conv1 = Conv2D(64, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(inputs)
print("conv1 shape:", conv1.shape)
# conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
# print("conv1 shape:", conv1.shape)
# res1 = res_block(filters=64, kernel_size=3)(conv1)
# print("res1 shape:", res1.shape)
db1 = denseblock(x=conv1, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db1 shape:", db1.shape)
pool1 = MaxPooling2D(pool_size=(2, 2))(db1)
print("pool1 shape:", pool1.shape)
conv2 = Conv2D(128, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool1)
print("conv2 shape:", conv2.shape)
# conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
# print("conv2 shape:", conv2.shape)
# res2 = res_block(filters=128, kernel_size=3)(conv2)
# print("res2 shape:", res2.shape)
db2 = denseblock(x=conv2, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db2 shape:", db2.shape)
pool2 = MaxPooling2D(pool_size=(2, 2))(db2)
print("pool2 shape:", pool2.shape)
conv3 = Conv2D(256, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool2)
print("conv3 shape:", conv3.shape)
# conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
# print("conv3 shape:", conv3.shape)
# res3 = res_block(filters=256,kernel_size=3)(conv3)
# print("res3 shape:", res3.shape)
db3 = denseblock(x=conv3, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db3 shape:", db3.shape)
pool3 = MaxPooling2D(pool_size=(2, 2))(db3)
print("pool3 shape:", pool3.shape)
conv4 = Conv2D(512, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool3)
print("conv4 shape:", conv4.shape)
# conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
# print("conv4 shape:", conv4.shape)
# res4 = res_block(filters=512, kernel_size=3)(conv4)
# print("res4 shape:", res4.shape)
db4 = denseblock(x=conv4, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db4 shape:", db4.shape)
drop4 = Dropout(0.5)(db4)
print("drop4 shape:", drop4.shape)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
print("pool4 shape:", pool4.shape)
conv5 = Conv2D(1024, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool4)
print("conv5 shape:", conv5.shape)
# conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
# print("conv5 shape:", conv5.shape)
# res5 = res_block(filters=1024,kernel_size=3)(conv5)
# print("res5 shape:", res5.shape)
db5 = denseblock(x=conv5, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db5 shape:", db5.shape)
drop5 = Dropout(0.5)(db5)
print("drop5 shape:", drop5.shape)
up6 = Conv2D(512, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(drop5))
print("up6 shape:", up6.shape)
merge6 = Concatenate(axis=3)([drop4, up6])
print("merge6 shape:", merge6.shape)
conv6 = Conv2D(512, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge6)
print("conv6 shape:", conv6.shape)
# conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
# print("conv6 shape:", conv6.shape)
# res5 = res_block(filters=512, kernel_size=3)(conv6)
# print("res5 shape:", res5.shape)
db6 = denseblock(x=conv6, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db6 shape:", db6.shape)
up7 = Conv2D(256, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(db6))
print("up7 shape:", up7.shape)
merge7 = Concatenate(axis=3)([db3, up7])
print("merge7 shape:", merge7.shape)
conv7 = Conv2D(256, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge7)
print("conv7 shape:", conv7.shape)
# conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
# print("conv7 shape:", conv7.shape)
# res6 = res_block(filters=256, kernel_size=3)(conv7)
# print("res6 shape:", res6.shape)
db7 = denseblock(x=conv7, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db7 shape:", db7.shape)
up8 = Conv2D(128, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(db7))
print("up8 shape:", up8.shape)
merge8 = Concatenate(axis=3)([db2, up8])
print("merge8 shape:", merge8.shape)
conv8 = Conv2D(128, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge8)
print("conv8 shape:", conv8.shape)
# conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
# print("conv8 shape:", conv8.shape)
# res7 = res_block(filters=128, kernel_size=3)(conv8)
# print("res7 shape:", res7.shape)
db8 = denseblock(x=conv8, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db8 shape:", db8.shape)
up9 = Conv2D(64, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(db8))
print("up9 shape:", up9.shape)
merge9 = Concatenate(axis=3)([db1, up9]) ##res1 up9
print("merge9 shape:", merge9.shape)
conv9 = Conv2D(64, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge9)
print("conv9 shape:", conv9.shape)
# conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
# print("conv9 shape:", conv9.shape)
# res8 = res_block(filters=64, kernel_size=3)(conv9)
# print("res8 shape:", res8.shape)
db9 = denseblock(x=conv9, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db9 shape:", db9.shape)
conv10 = Conv2D(16, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(db9)
print("conv10 shape:", conv9.shape)
conv11 = Conv2D(2, 1, activation='sigmoid', kernel_regularizer=regularizer_func)(conv10)
print("conv11 shape:", conv11.shape)
model = Model(inputs=inputs, outputs=conv11)
return model
|
py | 1a379145ff877585aed6fa98b7aa8a4787892173 | import random
from toontown.battle.BattleBase import *
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPLocalizer
from toontown.toonbase import TTLocalizer
notify = DirectNotifyGlobal.directNotify.newCategory('SuitBattleGlobals')
debugAttackSequence = {}
def pickFromFreqList(freqList):
randNum = random.randint(0, 99)
count = 0
index = 0
level = None
for f in freqList:
count = count + f
if randNum < count:
level = index
break
index = index + 1
return level
def getActualFromRelativeLevel(name, relLevel):
data = SuitAttributes[name]
actualLevel = data['level'] + relLevel
return actualLevel
def getSuitVitals(name, level = -1):
data = SuitAttributes[name]
if level == -1:
level = pickFromFreqList(data['freq'])
dict = {}
dict['level'] = getActualFromRelativeLevel(name, level)
if dict['level'] == 11:
level = 0
dict['hp'] = calculateHp(data, level)
dict['def'] = data['def'][level]
attacks = data['attacks']
alist = []
for a in attacks:
adict = {}
name = a[0]
adict['name'] = name
adict['animName'] = SuitAttacks[name][0]
adict['hp'] = a[1][level]
adict['acc'] = a[2][level]
adict['freq'] = a[3][level]
adict['group'] = SuitAttacks[name][1]
alist.append(adict)
dict['attacks'] = alist
return dict
def calculateHp(data, level):
return ((data['level'] + 1 + level) * (data['level'] + 2 + level))
def calculateDefense(suitLevel, levelOffset, boost=0):
defense = int(suitLevel * 5 + (levelOffset * 5))
if defense > 55:
defense = 55
elif defense <= 0:
defense = 2
defense += boost
return defense
def pickSuitAttack(attacks, suitLevel):
attackNum = None
randNum = random.randint(0, 99)
notify.debug('pickSuitAttack: rolled %d' % randNum)
count = 0
index = 0
total = 0
for c in attacks:
total = total + c[3][suitLevel]
for c in attacks:
count = count + c[3][suitLevel]
if randNum < count:
attackNum = index
notify.debug('picking attack %d' % attackNum)
break
index = index + 1
configAttackName = simbase.config.GetString('attack-type', 'random')
if configAttackName == 'random':
return attackNum
elif configAttackName == 'sequence':
for i in xrange(len(attacks)):
if attacks[i] not in debugAttackSequence:
debugAttackSequence[attacks[i]] = 1
return i
return attackNum
else:
for i in xrange(len(attacks)):
if attacks[i][0] == configAttackName:
return i
return attackNum
return
def getSuitAttack(suitName, suitLevel, attackNum = -1):
attackChoices = SuitAttributes[suitName]['attacks']
if attackNum == -1:
notify.debug('getSuitAttack: picking attacking for %s' % suitName)
attackNum = pickSuitAttack(attackChoices, suitLevel)
attack = attackChoices[attackNum]
adict = {}
adict['suitName'] = suitName
name = attack[0]
adict['name'] = name
adict['id'] = SuitAttacks.keys().index(name)
adict['animName'] = SuitAttacks[name][0]
adict['hp'] = attack[1][suitLevel]
adict['acc'] = attack[2][suitLevel]
adict['freq'] = attack[3][suitLevel]
adict['group'] = SuitAttacks[name][1]
return adict
SuitSizes = {
'f': 4.0,
'p': 3.35,
'ym': 4.125,
'mm': 2.5,
'ds': 4.5,
'hh': 6.5,
'cr': 6.75,
'tbc': 7.0,
'bf': 4.0,
'b': 4.375,
'dt': 4.25,
'ac': 4.35,
'bs': 4.5,
'sd': 5.65,
'le': 7.125,
'bw': 7.0,
'sc': 3.6,
'pp': 3.55,
'tw': 4.5,
'bc': 4.4,
'nc': 5.25,
'mb': 5.3,
'ls': 6.5,
'rb': 7.0,
'cc': 3.5,
'tm': 3.75,
'nd': 4.35,
'gh': 4.75,
'ms': 4.75,
'tf': 5.25,
'm': 5.75,
'mh': 7.0,
'ca': 4.0,
'cn': 3.75,
'sw': 4.35,
'mdm': 4.75,
'txm': 5.25,
'mg': 6.5,
'bfh': 7.8,
'hho': 7.0,
}
SuitAttributes = {'f': {'name': TTLocalizer.SuitFlunky,
'singularname': TTLocalizer.SuitFlunkyS,
'pluralname': TTLocalizer.SuitFlunkyP,
'level': 0,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('PoundKey',
(2,
2,
3,
4,
6),
(75,
75,
80,
80,
90),
(30,
35,
40,
45,
50)), ('Shred',
(3,
4,
5,
6,
7),
(50,
55,
60,
65,
70),
(10,
15,
20,
25,
30)), ('ClipOnTie',
(1,
1,
2,
2,
3),
(75,
80,
85,
90,
95),
(60,
50,
40,
30,
20)))},
'p': {'name': TTLocalizer.SuitPencilPusher,
'singularname': TTLocalizer.SuitPencilPusherS,
'pluralname': TTLocalizer.SuitPencilPusherP,
'level': 1,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('FountainPen',
(2,
3,
4,
6,
9),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('RubOut',
(4,
5,
6,
8,
12),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('FingerWag',
(1,
2,
2,
3,
4),
(75,
75,
75,
75,
75),
(15,
15,
15,
15,
15)),
('WriteOff',
(4,
6,
8,
10,
12),
(75,
75,
75,
75,
75),
(25,
25,
25,
25,
25)),
('FillWithLead',
(3,
4,
5,
6,
7),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)))},
'ym': {'name': TTLocalizer.SuitYesman,
'singularname': TTLocalizer.SuitYesmanS,
'pluralname': TTLocalizer.SuitYesmanP,
'level': 2,
'freq': (50,
30,
10,
5,
5),
'acc': (65,
70,
75,
80,
85),
'attacks': (('RubberStamp',
(2,
4,
6,
8,
10),
(75,
75,
75,
75,
75),
(35,
35,
35,
35,
35)),
('RazzleDazzle',
(7,
9,
11,
13,
15),
(50,
50,
50,
50,
50),
(25,
20,
15,
10,
5)),
('Synergy',
(4,
6,
8,
10,
12),
(50,
60,
70,
80,
90),
(5,
10,
15,
20,
25)),
('TeeOff',
(5,
7,
9,
11,
13),
(50,
60,
70,
80,
90),
(35,
35,
35,
35,
35)))},
'mm': {'name': TTLocalizer.SuitMicromanager,
'singularname': TTLocalizer.SuitMicromanagerS,
'pluralname': TTLocalizer.SuitMicromanagerP,
'level': 3,
'freq': (50,
30,
10,
5,
5),
'acc': (70,
75,
80,
82,
85),
'attacks': (('Demotion',
(6,
8,
12,
15,
18),
(50,
60,
70,
80,
90),
(30,
30,
30,
30,
30)),
('FingerWag',
(4,
6,
9,
12,
15),
(50,
60,
70,
80,
90),
(10,
10,
10,
10,
10)),
('FountainPen',
(3,
4,
6,
8,
10),
(50,
60,
70,
80,
90),
(15,
15,
15,
15,
15)),
('BrainStorm',
(4,
6,
9,
12,
15),
(50,
55,
65,
75,
85),
(25,
25,
25,
25,
25)),
('BuzzWord',
(4,
6,
9,
12,
15),
(50,
60,
70,
80,
90),
(20,
20,
20,
20,
20)))},
'ds': {'name': TTLocalizer.SuitDownsizer,
'singularname': TTLocalizer.SuitDownsizerS,
'pluralname': TTLocalizer.SuitDownsizerP,
'level': 4,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('Canned',
(5,
6,
8,
10,
12),
(60,
75,
80,
85,
90),
(25,
25,
25,
25,
25)),
('Downsize',
(8,
9,
11,
13,
15),
(50,
65,
70,
75,
80),
(35,
35,
35,
35,
35)),
('PinkSlip',
(4,
5,
6,
7,
8),
(60,
65,
75,
80,
85),
(25,
25,
25,
25,
25)),
('Sacked',
(5,
6,
7,
8,
9),
(50,
50,
50,
50,
50),
(15,
15,
15,
15,
15)))},
'hh': {'name': TTLocalizer.SuitHeadHunter,
'singularname': TTLocalizer.SuitHeadHunterS,
'pluralname': TTLocalizer.SuitHeadHunterP,
'level': 5,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('FountainPen',
(5,
6,
8,
10,
12),
(60,
75,
80,
85,
90),
(15,
15,
15,
15,
15)),
('GlowerPower',
(7,
8,
10,
12,
13),
(50,
60,
70,
80,
90),
(20,
20,
20,
20,
20)),
('HalfWindsor',
(8,
10,
12,
14,
16),
(60,
65,
70,
75,
80),
(20,
20,
20,
20,
20)),
('HeadShrink',
(10,
12,
15,
18,
21),
(65,
75,
80,
85,
95),
(20,
20,
20,
20,
20)),
('ReOrg',
(5,
8,
11,
13,
15),
(65,
75,
80,
85,
90),
(15,
15,
15,
15,
15)),
('Rolodex',
(6,
7,
8,
9,
10),
(60,
65,
70,
75,
80),
(10,
10,
10,
10,
10)))},
'cr': {'name': TTLocalizer.SuitCorporateRaider,
'singularname': TTLocalizer.SuitCorporateRaiderS,
'pluralname': TTLocalizer.SuitCorporateRaiderP,
'level': 6,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('Canned',
(6,
7,
8,
9,
10),
(60,
75,
80,
85,
90),
(20,
20,
20,
20,
20)),
('EvilEye',
(12,
15,
18,
21,
24),
(60,
70,
75,
80,
90),
(20,
20,
20,
20,
20)),
('PickPocket',
(9,
12,
13,
14,
15),
(55,
65,
75,
85,
95),
(20,
20,
20,
20,
20)),
('PlayHardball',
(7,
8,
12,
15,
16),
(60,
65,
70,
75,
80),
(20,
20,
20,
20,
20)),
('PowerTie',
(10,
12,
14,
16,
18),
(65,
75,
80,
85,
95),
(20,
20,
20,
20,
20)))},
'tbc': {'name': TTLocalizer.SuitTheBigCheese,
'singularname': TTLocalizer.SuitTheBigCheeseS,
'pluralname': TTLocalizer.SuitTheBigCheeseP,
'level': 7,
'freq': (50,
30,
15,
15,
10,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5),
'acc': (30,
35,
40,
45,
50, #12
50,
50,
50,
50,
50,
50,
50,
50, #20
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #30
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #40
50,
50,
50,
50,
50,
50,
50,
50,
50,
50), #50
'attacks': (('CigarSmoke',
(10,
12,
15,
18,
20,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61),
(55,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('SongAndDance',
(14,
15,
17,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(60,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('GlowerPower',
(8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
50,
51,
52,
53,
54),
(55,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20)),
('TeeOff',
(8,
11,
14,
17,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(55,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30,
30)))},
'ca': {'name': TTLocalizer.SuitConArtist,
'singularname': TTLocalizer.SuitConArtistS,
'pluralname': TTLocalizer.SuitConArtistP,
'level': 0,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('BounceCheck',
(2,
3,
5,
6,
9),
(75,
80,
85,
90,
95),
(20,
20,
20,
20,
20)),
('Schmooze',
(2,
4,
6,
8,
11),
(50,
55,
60,
65,
70),
(20,
20,
20,
20,
20)),
('FountainPen',
(1,
3,
5,
6,
8),
(50,
55,
60,
65,
70),
(10,
10,
10,
10,
10)),
('DoubleTalk',
(2,
4,
6,
8,
10),
(95,
95,
95,
95,
95),
(10,
10,
10,
10,
10)),
('PickPocket',
(1,
2,
3,
4,
5),
(25,
30,
35,
40,
45),
(40,
40,
40,
40,
40)))},
'cn': {'name': TTLocalizer.SuitConnoisseur,
'singularname': TTLocalizer.SuitConnoisseurS,
'pluralname': TTLocalizer.SuitConnoisseurP,
'level': 1,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('HalfWindsor',
(3,
6,
7,
10,
12),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('ReOrg',
(5,
6,
7,
8,
9),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('BrainStorm',
(5,
7,
9,
11,
12),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('FingerWag',
(4,
5,
6,
7,
8),
(50,
50,
50,
50,
50),
(20,
20,
20,
20,
20)),
('FingerWag',
(4,
5,
7,
9,
10),
(60,
65,
70,
75,
80),
(15,
15,
15,
15,
15)),
('PoundKey',
(3,
4,
5,
6,
7),
(55,
65,
70,
75,
80),
(20,
20,
20,
20,
20)),
('DoubleTalk',
(4,
6,
7,
9,
12),
(75,
80,
85,
90,
95),
(20,
20,
20,
20,
20)))},
'sw': {'name': TTLocalizer.SuitSwindler,
'singularname': TTLocalizer.SuitSwindlerS,
'pluralname': TTLocalizer.SuitSwindlerP,
'level': 2,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('BounceCheck',
(4,
7,
9,
10,
12),
(35,
35,
35,
35,
35),
(25,
25,
25,
25,
25)),
('HangUp',
(6,
7,
9,
13,
14),
(35,
35,
35,
35,
35),
(25,
25,
25,
25,
25)),
('Rolodex',
(3,
6,
7,
11,
13),
(50,
50,
50,
50,
50),
(25,
25,
25,
25,
25)),
('PickPocket',
(2,
4,
7,
10,
14),
(75,
80,
85,
90,
95),
(25,
25,
25,
25,
25)))},
'mdm': {'name': TTLocalizer.SuitMiddleman,
'singularname': TTLocalizer.SuitMiddlemanS,
'pluralname': TTLocalizer.SuitMiddlemanP,
'level': 3,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('ReOrg',
(7,
7,
8,
10,
11),
(60,
75,
80,
85,
90),
(25,
25,
25,
25,
25)),
('Rolodex',
(6,
7,
10,
13,
15),
(60,
70,
75,
80,
90),
(25,
25,
25,
25,
25)),
('Synergy',
(7,
8,
9,
10,
12),
(55,
65,
75,
85,
95),
(25,
25,
25,
25,
25)),
('RazzleDazzle',
(8,
10,
12,
14,
16),
(60,
65,
70,
75,
80),
(25,
25,
25,
25,
25)))},
'txm': {'name': TTLocalizer.SuitToxicManager,
'singularname': TTLocalizer.SuitToxicManagerS,
'pluralname': TTLocalizer.SuitToxicManagerP,
'level': 4,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('AcidRain',
(9,
12,
14,
15,
17),
(55,
65,
75,
85,
95),
(20,
20,
20,
20,
20)),
('FountainPen',
(9,
10,
11,
12,
13),
(60,
75,
80,
85,
90),
(15,
15,
15,
15,
15)),
('GuiltTrip',
(7,
9,
11,
13,
15),
(60,
75,
80,
85,
90),
(35,
35,
35,
35,
35)),
('PowerTrip',
(6,
9,
12,
15,
18),
(60,
65,
70,
75,
80),
(15,
15,
15,
15,
15)),
('EvilEye',
(8,
10,
11,
13,
14),
(55,
65,
70,
75,
80),
(50,
50,
50,
50,
50)))},
'mg': {'name': TTLocalizer.SuitMagnate,
'singularname': TTLocalizer.SuitMagnateS,
'pluralname': TTLocalizer.SuitMagnateP,
'level': 5,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('EvilEye',
(8,
10,
12,
14,
16),
(75,
75,
75,
75,
75),
(15,
15,
15,
15,
15)),
('Jargon',
(9,
10,
11,
12,
13),
(75,
75,
75,
75,
75),
(15,
15,
15,
15,
15)),
('HangUp',
(7,
8,
10,
11,
12,
13),
(75,
75,
75,
75,
75),
(15,
15,
15,
15,
15)),
('PeckingOrder',
(11,
13,
15,
16,
17),
(50,
55,
60,
65,
70),
(30,
30,
30,
30,
30)),
('PowerTrip',
(10,
12,
14,
16,
18),
(75,
80,
85,
90,
95),
(40,
40,
40,
40,
40)))},
'bfh': {'name': TTLocalizer.SuitBigFish,
'singularname': TTLocalizer.SuitBigFishS,
'pluralname': TTLocalizer.SuitBigFishP,
'level': 6,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
60),
'attacks': (('Liquidate',
(11,
15,
17,
21,
24),
(60,
75,
80,
85,
95),
(20,
20,
20,
20,
20)),
('PowerTie',
(13,
15,
17,
19,
21),
(60,
65,
70,
75,
80),
(20,
20,
20,
20,
20)),
('Bite',
(11,
13,
15,
17,
19),
(60,
75,
80,
85,
95),
(20,
20,
20,
20,
20)),
('Watercooler',
(10,
12,
13,
14,
16),
(50,
60,
70,
80,
90),
(20,
20,
20,
20,
20)),
('Canned',
(12,
13,
14,
15,
16),
(5,
5,
5,
5,
5),
(20,
20,
20,
20,
20,
20)))},
'hho': {'name': TTLocalizer.SuitHeadHoncho,
'singularname': TTLocalizer.SuitHeadHonchoS,
'pluralname': TTLocalizer.SuitHeadHonchoP,
'level': 7,
'freq': (50,
30,
15,
15,
10,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5),
'acc': (30,
35,
40,
45,
50, #12
50,
50,
50,
50,
50,
50,
50,
50, #20
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #30
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #40
50,
50,
50,
50,
50,
50,
50,
50,
50,
50), #50
'attacks': (('CigarSmoke',
(12,
15,
18,
20,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60),
(55,
65,
75,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10)),
('Demotion',
(15,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(30,
35,
40,
45,
50,
55,
60,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15)),
('PinkSlip',
(15,
17,
18,
20,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60),
(60,
65,
75,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20)),
('PowerTrip',
(15,
16,
18,
20,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60),
(60,
65,
70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15)),
('Fired',
(13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55),
(35,
35,
35,
45,
50,
55,
60,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10)),
('ParadigmShift',
(13,
16,
19,
20,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60),
(60,
70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20)),
('TeeOff',
(8,
11,
14,
17,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(55,
65,
70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10)))},
'cc': {'name': TTLocalizer.SuitColdCaller,
'singularname': TTLocalizer.SuitColdCallerS,
'pluralname': TTLocalizer.SuitColdCallerP,
'level': 0,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('FreezeAssets',
(1,
1,
1,
1,
1),
(90,
90,
90,
90,
90),
(5,
10,
15,
20,
25)),
('PoundKey',
(2,
2,
3,
4,
5),
(75,
80,
85,
90,
95),
(25,
25,
25,
25,
25)),
('DoubleTalk',
(2,
3,
4,
6,
8),
(50,
55,
60,
65,
70),
(25,
25,
25,
25,
25)),
('HotAir',
(3,
4,
6,
8,
10),
(50,
50,
50,
50,
50),
(45,
40,
35,
30,
25)))},
'tm': {'name': TTLocalizer.SuitTelemarketer,
'singularname': TTLocalizer.SuitTelemarketerS,
'pluralname': TTLocalizer.SuitTelemarketerP,
'level': 1,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('ClipOnTie',
(2,
2,
3,
3,
4),
(75,
75,
75,
75,
75),
(15,
15,
15,
15,
15)),
('PickPocket',
(1,
1,
1,
1,
1),
(75,
75,
75,
75,
75),
(15,
15,
15,
15,
15)),
('Rolodex',
(4,
6,
7,
9,
12),
(50,
50,
50,
50,
50),
(20,
20,
20,
20,
20)),
('FingerWag',
(4,
5,
7,
9,
10),
(60,
65,
70,
75,
80),
(15,
15,
15,
15,
15)),
('PoundKey',
(3,
4,
5,
6,
7),
(55,
65,
70,
75,
80),
(20,
20,
20,
20,
20)),
('DoubleTalk',
(4,
6,
7,
9,
12),
(75,
80,
85,
90,
95),
(15,
15,
15,
15,
15)))},
'nd': {'name': TTLocalizer.SuitNameDropper,
'singularname': TTLocalizer.SuitNameDropperS,
'pluralname': TTLocalizer.SuitNameDropperP,
'level': 2,
'freq': (50,
30,
10,
5,
5),
'acc': (65,
70,
75,
80,
85),
'attacks': (('RazzleDazzle',
(4,
5,
6,
9,
12),
(75,
80,
85,
90,
95),
(30,
30,
30,
30,
30)),
('Rolodex',
(5,
6,
7,
10,
14),
(95,
95,
95,
95,
95),
(40,
40,
40,
40,
40)),
('Synergy',
(3,
4,
6,
9,
12),
(50,
50,
50,
50,
50),
(15,
15,
15,
15,
15)),
('PickPocket',
(2,
2,
2,
2,
2),
(95,
95,
95,
95,
95),
(15,
15,
15,
15,
15)))},
'gh': {'name': TTLocalizer.SuitGladHander,
'singularname': TTLocalizer.SuitGladHanderS,
'pluralname': TTLocalizer.SuitGladHanderP,
'level': 3,
'freq': (50,
30,
10,
5,
5),
'acc': (70,
75,
80,
82,
85),
'attacks': (('RubberStamp',
(4,
3,
3,
2,
1),
(90,
70,
50,
30,
10),
(40,
30,
20,
10,
5)),
('FountainPen',
(3,
3,
2,
1,
1),
(70,
60,
50,
40,
30),
(40,
30,
20,
10,
5)),
('Filibuster',
(4,
6,
9,
12,
15),
(30,
40,
50,
60,
70),
(10,
20,
30,
40,
45)),
('Schmooze',
(5,
7,
11,
15,
20),
(55,
65,
75,
85,
95),
(10,
20,
30,
40,
45)))},
'ms': {'name': TTLocalizer.SuitMoverShaker,
'singularname': TTLocalizer.SuitMoverShakerS,
'pluralname': TTLocalizer.SuitMoverShakerP,
'level': 4,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('BrainStorm',
(5,
6,
8,
10,
12),
(60,
75,
80,
85,
90),
(15,
15,
15,
15,
15)),
('HalfWindsor',
(6,
9,
11,
13,
16),
(50,
65,
70,
75,
80),
(20,
20,
20,
20,
20)),
('Quake',
(9,
12,
15,
18,
21),
(60,
65,
75,
80,
85),
(20,
20,
20,
20,
20)),
('Shake',
(6,
8,
10,
12,
14),
(70,
75,
80,
85,
90),
(25,
25,
25,
25,
25)),
('Tremor',
(5,
6,
7,
8,
9),
(50,
50,
50,
50,
50),
(20,
20,
20,
20,
20)))},
'tf': {'name': TTLocalizer.SuitTwoFace,
'singularname': TTLocalizer.SuitTwoFaceS,
'pluralname': TTLocalizer.SuitTwoFaceP,
'level': 5,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('EvilEye',
(10,
12,
14,
16,
18),
(60,
75,
80,
85,
90),
(25,
25,
25,
25,
25)),
('HangUp',
(7,
8,
10,
12,
13),
(50,
60,
70,
80,
90),
(15,
15,
15,
15,
15)),
('RazzleDazzle',
(8,
10,
12,
14,
16),
(60,
65,
70,
75,
80),
(25,
25,
25,
25,
25)),
('ReOrg',
(5,
8,
11,
13,
15),
(65,
75,
80,
85,
90),
(15,
15,
15,
15,
15)),
('RedTape',
(6,
7,
8,
9,
10),
(60,
65,
75,
85,
90),
(20,
20,
20,
20,
20)))},
'm': {'name': TTLocalizer.SuitTheMingler,
'singularname': TTLocalizer.SuitTheMinglerS,
'pluralname': TTLocalizer.SuitTheMinglerP,
'level': 6,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('BuzzWord',
(10,
11,
13,
15,
16),
(60,
75,
80,
85,
90),
(20,
20,
20,
20,
20)),
('ParadigmShift',
(12,
15,
18,
21,
24),
(60,
70,
75,
80,
90),
(25,
25,
25,
25,
25)),
('PowerTrip',
(10,
13,
14,
15,
18),
(60,
65,
70,
75,
80),
(15,
15,
15,
15,
15)),
('Schmooze',
(7,
8,
12,
15,
16),
(55,
65,
75,
85,
95),
(30,
30,
30,
30,
30)),
('TeeOff',
(8,
9,
10,
11,
12),
(70,
75,
80,
85,
95),
(10,
10,
10,
10,
10)))},
'mh': {'name': TTLocalizer.SuitMrHollywood,
'singularname': TTLocalizer.SuitMrHollywoodS,
'pluralname': TTLocalizer.SuitMrHollywoodP,
'level': 7,
'freq': (50,
30,
15,
15,
10,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5),
'acc': (30,
35,
40,
45,
50, #12
50,
50,
50,
50,
50,
50,
50,
50, #20
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #30
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #40
50,
50,
50,
50,
50,
50,
50,
50,
50,
50), #50
'attacks': (('PowerTrip',
(10,
12,
15,
18,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(55,
65,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('SongAndDance',
(12,
14,
16,
18,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(60,
65,
70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10)),
('Schmooze',
(16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(55,
65,
70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15)),
('RazzleDazzle',
(8,
11,
14,
17,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('SongAndDance',
(12,
14,
16,
18,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(60,
65,
70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10)),
('Schmooze',
(16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(55,
65,
70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15,
15)),
('RazzleDazzle',
(8,
11,
14,
17,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50,
50)))},
'sc': {'name': TTLocalizer.SuitShortChange,
'singularname': TTLocalizer.SuitShortChangeS,
'pluralname': TTLocalizer.SuitShortChangeP,
'level': 0,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('Watercooler',
(2,
2,
3,
4,
6),
(50,
50,
50,
50,
50),
(20,
20,
20,
20,
20)),
('BounceCheck',
(3,
5,
7,
9,
11),
(75,
80,
85,
90,
95),
(15,
15,
15,
15,
15)),
('ClipOnTie',
(1,
1,
2,
2,
3),
(50,
50,
50,
50,
50),
(25,
25,
25,
25,
25)),
('PickPocket',
(2,
2,
3,
4,
6),
(95,
95,
95,
95,
95),
(40,
40,
40,
40,
40)))},
'pp': {'name': TTLocalizer.SuitPennyPincher,
'singularname': TTLocalizer.SuitPennyPincherS,
'pluralname': TTLocalizer.SuitPennyPincherP,
'level': 1,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('BounceCheck',
(4,
5,
6,
8,
12),
(75,
75,
75,
75,
75),
(45,
45,
45,
45,
45)), ('FreezeAssets',
(2,
3,
4,
6,
9),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)), ('FingerWag',
(1,
2,
3,
4,
6),
(50,
50,
50,
50,
50),
(35,
35,
35,
35,
35)))},
'tw': {'name': TTLocalizer.SuitTightwad,
'singularname': TTLocalizer.SuitTightwadS,
'pluralname': TTLocalizer.SuitTightwadP,
'level': 2,
'freq': (50,
30,
10,
5,
5),
'acc': (65,
70,
75,
80,
85),
'attacks': (('Fired',
(3,
4,
5,
5,
6),
(75,
75,
75,
75,
75),
(75,
5,
5,
5,
5)),
('GlowerPower',
(3,
4,
6,
9,
12),
(95,
95,
95,
95,
95),
(10,
15,
20,
25,
30)),
('FingerWag',
(3,
3,
4,
4,
5),
(75,
75,
75,
75,
75),
(5,
70,
5,
5,
5)),
('FreezeAssets',
(3,
4,
6,
9,
12),
(75,
75,
75,
75,
75),
(5,
5,
65,
5,
30)),
('BounceCheck',
(5,
6,
9,
13,
18),
(75,
75,
75,
75,
75),
(5,
5,
5,
60,
30)))},
'bc': {'name': TTLocalizer.SuitBeanCounter,
'singularname': TTLocalizer.SuitBeanCounterS,
'pluralname': TTLocalizer.SuitBeanCounterP,
'level': 3,
'freq': (50,
30,
10,
5,
5),
'acc': (70,
75,
80,
82,
85),
'attacks': (('Audit',
(4,
6,
9,
12,
15),
(95,
95,
95,
95,
95),
(20,
20,
20,
20,
20)),
('Calculate',
(4,
6,
9,
12,
15),
(75,
75,
75,
75,
75),
(25,
25,
25,
25,
25)),
('Tabulate',
(4,
6,
9,
12,
15),
(75,
75,
75,
75,
75),
(25,
25,
25,
25,
25)),
('WriteOff',
(4,
6,
9,
12,
15),
(95,
95,
95,
95,
95),
(30,
30,
30,
30,
30)))},
'nc': {'name': TTLocalizer.SuitNumberCruncher,
'singularname': TTLocalizer.SuitNumberCruncherS,
'pluralname': TTLocalizer.SuitNumberCruncherP,
'level': 4,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('Audit',
(5,
6,
8,
10,
12),
(60,
75,
80,
85,
90),
(15,
15,
15,
15,
15)),
('Calculate',
(6,
7,
9,
11,
13),
(50,
65,
70,
75,
80),
(30,
30,
30,
30,
30)),
('Crunch',
(8,
9,
11,
13,
15),
(60,
65,
75,
80,
85),
(35,
35,
35,
35,
35)),
('Tabulate',
(5,
6,
7,
8,
9),
(50,
50,
50,
50,
50),
(20,
20,
20,
20,
20)))},
'mb': {'name': TTLocalizer.SuitMoneyBags,
'singularname': TTLocalizer.SuitMoneyBagsS,
'pluralname': TTLocalizer.SuitMoneyBagsP,
'level': 5,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('Liquidate',
(10,
12,
14,
16,
18),
(60,
75,
80,
85,
90),
(30,
30,
30,
30,
30)), ('MarketCrash',
(8,
10,
12,
14,
16),
(60,
65,
70,
75,
80),
(45,
45,
45,
45,
45)), ('PowerTie',
(6,
7,
8,
9,
10),
(60,
65,
75,
85,
90),
(25,
25,
25,
25,
25)))},
'ls': {'name': TTLocalizer.SuitLoanShark,
'singularname': TTLocalizer.SuitLoanSharkS,
'pluralname': TTLocalizer.SuitLoanSharkP,
'level': 6,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('Bite',
(10,
11,
13,
15,
16),
(60,
75,
80,
85,
90),
(30,
30,
30,
30,
30),
(40,
40,
40,
40,
40)),
('Chomp',
(12,
15,
18,
21,
24),
(60,
70,
75,
80,
90),
(35,
35,
35,
35,
35)),
('PlayHardball',
(9,
11,
12,
13,
15),
(55,
65,
75,
85,
95),
(10,
10,
10,
10,
10)),
('Overdraft',
(9,
11,
12,
13,
15),
(55,
65,
75,
85,
95),
(15,
15,
15,
15,
15)),
('WriteOff',
(6,
8,
10,
12,
14),
(70,
75,
80,
85,
95),
(10,
10,
10,
10,
10)))},
'rb': {'name': TTLocalizer.SuitRobberBaron,
'singularname': TTLocalizer.SuitRobberBaronS,
'pluralname': TTLocalizer.SuitRobberBaronP,
'level': 7,
'freq': (50,
30,
15,
15,
10,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5),
'acc': (30,
35,
40,
45,
50, #12
50,
50,
50,
50,
50,
50,
50,
50, #20
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #30
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #40
50,
50,
50,
50,
50,
50,
50,
50,
50,
50), #50
'attacks': (('Synergy',
(11,
14,
16,
18,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59),
(60,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('CigarSmoke',
(14,
15,
17,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58),
(60,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('PickPocket',
(8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50),
(55,
65,
70,
75,
80,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('TeeOff',
(10,
12,
14,
16,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56),
(60,
65,
75,
85,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90,
90),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)))},
'bf': {'name': TTLocalizer.SuitBottomFeeder,
'singularname': TTLocalizer.SuitBottomFeederS,
'pluralname': TTLocalizer.SuitBottomFeederP,
'level': 0,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('RubberStamp',
(2,
3,
4,
5,
6),
(75,
80,
85,
90,
95),
(20,
20,
20,
20,
20)),
('Shred',
(2,
4,
6,
8,
10),
(50,
55,
60,
65,
70),
(20,
20,
20,
20,
20)),
('Watercooler',
(3,
4,
5,
6,
7),
(95,
95,
95,
95,
95),
(10,
10,
10,
10,
10)),
('PickPocket',
(1,
1,
2,
2,
3),
(25,
30,
35,
40,
45),
(50,
50,
50,
50,
50)))},
'b': {'name': TTLocalizer.SuitBloodsucker,
'singularname': TTLocalizer.SuitBloodsuckerS,
'pluralname': TTLocalizer.SuitBloodsuckerP,
'level': 1,
'freq': (50,
30,
10,
5,
5),
'acc': (45,
50,
55,
60,
65),
'attacks': (('EvictionNotice',
(1,
2,
3,
3,
4),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('RedTape',
(2,
3,
4,
6,
9),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('Withdrawal',
(6,
8,
10,
12,
14),
(95,
95,
95,
95,
95),
(10,
10,
10,
10,
10)),
('Bite',
(5,
6,
7,
8,
10),
(60,
75,
80,
85,
90),
(10,
10,
10,
10,
10)),
('Liquidate',
(2,
3,
4,
6,
9),
(50,
60,
70,
80,
90),
(40,
40,
40,
40,
40)))},
'dt': {'name': TTLocalizer.SuitDoubleTalker,
'singularname': TTLocalizer.SuitDoubleTalkerS,
'pluralname': TTLocalizer.SuitDoubleTalkerP,
'level': 2,
'freq': (50,
30,
10,
5,
5),
'acc': (65,
70,
75,
80,
85),
'attacks': (('RubberStamp',
(1,
1,
1,
1,
1),
(50,
60,
70,
80,
90),
(5,
5,
5,
5,
5)),
('BounceCheck',
(1,
1,
1,
1,
1),
(50,
60,
70,
80,
90),
(5,
5,
5,
5,
5)),
('BuzzWord',
(1,
2,
3,
5,
6),
(50,
60,
70,
80,
90),
(20,
20,
20,
20,
20)),
('DoubleTalk',
(6,
6,
9,
13,
18),
(50,
60,
70,
80,
90),
(25,
25,
25,
25,
25)),
('Jargon',
(3,
4,
6,
9,
12),
(50,
60,
70,
80,
90),
(25,
25,
25,
25,
25)),
('MumboJumbo',
(3,
4,
6,
9,
12),
(50,
60,
70,
80,
90),
(20,
20,
20,
20,
20)))},
'ac': {'name': TTLocalizer.SuitAmbulanceChaser,
'singularname': TTLocalizer.SuitAmbulanceChaserS,
'pluralname': TTLocalizer.SuitAmbulanceChaserP,
'level': 3,
'freq': (50,
30,
10,
5,
5),
'acc': (65,
70,
75,
80,
85),
'attacks': (('Shake',
(4,
6,
9,
12,
15),
(75,
75,
75,
75,
75),
(15,
15,
15,
15,
15)),
('RedTape',
(6,
8,
12,
15,
19),
(75,
75,
75,
75,
75),
(30,
30,
30,
30,
30)),
('Rolodex',
(3,
4,
5,
6,
7),
(75,
75,
75,
75,
75),
(20,
20,
20,
20,
20)),
('HangUp',
(2,
3,
4,
5,
6),
(75,
75,
75,
75,
75),
(35,
35,
35,
35,
35)))},
'bs': {'name': TTLocalizer.SuitBackStabber,
'singularname': TTLocalizer.SuitBackStabberS,
'pluralname': TTLocalizer.SuitBackStabberP,
'level': 4,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('GuiltTrip',
(8,
11,
13,
15,
18),
(60,
75,
80,
85,
90),
(35,
35,
35,
35,
35)), ('RestrainingOrder',
(6,
7,
9,
11,
13),
(50,
65,
70,
75,
90),
(25,
25,
25,
25,
25)), ('CigarSmoke',
(10,
12,
15,
18,
20),
(55,
65,
75,
85,
95),
(15,
15,
15,
15,
15)), ('FingerWag',
(5,
6,
7,
8,
9),
(50,
55,
65,
75,
80),
(25,
25,
25,
25,
25)))},
'sd': {'name': TTLocalizer.SuitSpinDoctor,
'singularname': TTLocalizer.SuitSpinDoctorS,
'pluralname': TTLocalizer.SuitSpinDoctorP,
'level': 5,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('ParadigmShift',
(9,
10,
13,
16,
17),
(60,
75,
80,
85,
90),
(30,
30,
30,
30,
30)),
('Quake',
(8,
10,
12,
14,
16),
(60,
65,
70,
75,
80),
(20,
20,
20,
20,
20)),
('Spin',
(10,
12,
15,
18,
20),
(70,
75,
80,
85,
90),
(20,
20,
20,
20,
20)),
('ReOrg',
(5,
8,
11,
13,
15),
(65,
75,
80,
85,
90),
(15,
15,
15,
15,
15)),
('WriteOff',
(6,
7,
8,
9,
10),
(60,
65,
75,
85,
90),
(15,
15,
15,
15,
15)))},
'le': {'name': TTLocalizer.SuitLegalEagle,
'singularname': TTLocalizer.SuitLegalEagleS,
'pluralname': TTLocalizer.SuitLegalEagleP,
'level': 6,
'freq': (50,
30,
10,
5,
5),
'acc': (35,
40,
45,
50,
55),
'attacks': (('EvilEye',
(10,
11,
13,
15,
16),
(60,
75,
80,
85,
90),
(20,
20,
20,
20,
20)),
('Jargon',
(7,
9,
11,
13,
15),
(60,
70,
75,
80,
90),
(15,
15,
15,
15,
15)),
('Legalese',
(11,
13,
16,
19,
21),
(55,
65,
75,
85,
95),
(35,
35,
35,
35,
35)),
('PeckingOrder',
(12,
15,
17,
19,
22),
(70,
75,
80,
85,
95),
(30,
30,
30,
30,
30)))},
'bw': {'name': TTLocalizer.SuitBigWig,
'singularname': TTLocalizer.SuitBigWigS,
'pluralname': TTLocalizer.SuitBigWigP,
'level': 7,
'freq': (50,
30,
15,
15,
10,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5),
'acc': (30,
35,
40,
45,
50, #12
50,
50,
50,
50,
50,
50,
50,
50, #20
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #30
50,
50,
50,
50,
50,
50,
50,
50,
50,
50, #40
50,
50,
50,
50,
50,
50,
50,
50,
50,
50), #50
'attacks': (('GuiltTrip',
(10,
11,
13,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54),
(70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('ThrowBook',
(14,
16,
18,
20,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60),
(70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('CigarSmoke',
(10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52),
(70,
75,
80,
85,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)),
('FingerWag',
(13,
15,
17,
19,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59),
(80,
85,
85,
85,
90,
90,
90,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95,
95),
(25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25,
25)))}}
ATK_TGT_UNKNOWN = 1
ATK_TGT_SINGLE = 2
ATK_TGT_GROUP = 3
SuitAttacks = {'AcidRain': ('magic1', ATK_TGT_SINGLE),
'Audit': ('phone', ATK_TGT_SINGLE),
'Bite': ('throw-paper', ATK_TGT_SINGLE),
'BounceCheck': ('throw-paper', ATK_TGT_SINGLE),
'BrainStorm': ('effort', ATK_TGT_SINGLE),
'BuzzWord': ('speak', ATK_TGT_SINGLE),
'Calculate': ('phone', ATK_TGT_SINGLE),
'Canned': ('throw-paper', ATK_TGT_SINGLE),
'Chomp': ('throw-paper', ATK_TGT_SINGLE),
'CigarSmoke': ('cigar-smoke', ATK_TGT_SINGLE),
'ClipOnTie': ('throw-paper', ATK_TGT_SINGLE),
'Crunch': ('throw-object', ATK_TGT_SINGLE),
'Demotion': ('magic1', ATK_TGT_SINGLE),
'DoubleTalk': ('speak', ATK_TGT_SINGLE),
'Downsize': ('magic2', ATK_TGT_SINGLE),
'EvictionNotice': ('throw-paper', ATK_TGT_SINGLE),
'EvilEye': ('glower', ATK_TGT_SINGLE),
'Filibuster': ('speak', ATK_TGT_SINGLE),
'FillWithLead': ('pencil-sharpener', ATK_TGT_SINGLE),
'FingerWag': ('finger-wag', ATK_TGT_SINGLE),
'Fired': ('magic2', ATK_TGT_SINGLE),
'FiveOClockShadow': ('glower', ATK_TGT_SINGLE),
'FloodTheMarket': ('glower', ATK_TGT_SINGLE),
'FountainPen': ('pen-squirt', ATK_TGT_SINGLE),
'FreezeAssets': ('glower', ATK_TGT_SINGLE),
'Gavel': ('gavel', ATK_TGT_SINGLE),
'GlowerPower': ('glower', ATK_TGT_SINGLE),
'GuiltTrip': ('magic1', ATK_TGT_GROUP),
'HalfWindsor': ('throw-paper', ATK_TGT_SINGLE),
'HangUp': ('phone', ATK_TGT_SINGLE),
'HeadShrink': ('magic1', ATK_TGT_SINGLE),
'HotAir': ('speak', ATK_TGT_SINGLE),
'Jargon': ('speak', ATK_TGT_SINGLE),
'Legalese': ('speak', ATK_TGT_SINGLE),
'Liquidate': ('magic1', ATK_TGT_SINGLE),
'MarketCrash': ('throw-paper', ATK_TGT_SINGLE),
'MumboJumbo': ('speak', ATK_TGT_SINGLE),
'ParadigmShift': ('magic2', ATK_TGT_GROUP),
'PeckingOrder': ('throw-object', ATK_TGT_SINGLE),
'PickPocket': ('pickpocket', ATK_TGT_SINGLE),
'PinkSlip': ('throw-paper', ATK_TGT_SINGLE),
'PlayHardball': ('throw-paper', ATK_TGT_SINGLE),
'PoundKey': ('phone', ATK_TGT_SINGLE),
'PowerTie': ('throw-paper', ATK_TGT_SINGLE),
'PowerTrip': ('magic1', ATK_TGT_GROUP),
'Quake': ('quick-jump', ATK_TGT_GROUP),
'RazzleDazzle': ('smile', ATK_TGT_SINGLE),
'RedTape': ('throw-object', ATK_TGT_SINGLE),
'ReOrg': ('magic3', ATK_TGT_SINGLE),
'RestrainingOrder': ('throw-paper', ATK_TGT_SINGLE),
'Rolodex': ('roll-o-dex', ATK_TGT_SINGLE),
'RubberStamp': ('rubber-stamp', ATK_TGT_SINGLE),
'RubOut': ('hold-eraser', ATK_TGT_SINGLE),
'Sacked': ('throw-paper', ATK_TGT_SINGLE),
'SandTrap': ('golf-club-swing', ATK_TGT_SINGLE),
'Schmooze': ('speak', ATK_TGT_SINGLE),
'Shake': ('stomp', ATK_TGT_GROUP),
'Shred': ('shredder', ATK_TGT_SINGLE),
'SongAndDance': ('song-and-dance', ATK_TGT_GROUP),
'Spin': ('magic3', ATK_TGT_SINGLE),
'Synergy': ('magic3', ATK_TGT_GROUP),
'Tabulate': ('phone', ATK_TGT_SINGLE),
'TeeOff': ('golf-club-swing', ATK_TGT_SINGLE),
'ThrowBook': ('throw-object', ATK_TGT_SINGLE),
'Tremor': ('stomp', ATK_TGT_GROUP),
'Watercooler': ('watercooler', ATK_TGT_SINGLE),
'Withdrawal': ('magic1', ATK_TGT_SINGLE),
'WriteOff': ('hold-pencil', ATK_TGT_SINGLE),
'Overdraft': ('hold-pencil', ATK_TGT_SINGLE)}
ACID_RAIN = SuitAttacks.keys().index('AcidRain')
AUDIT = SuitAttacks.keys().index('Audit')
BITE = SuitAttacks.keys().index('Bite')
BOUNCE_CHECK = SuitAttacks.keys().index('BounceCheck')
BRAIN_STORM = SuitAttacks.keys().index('BrainStorm')
BUZZ_WORD = SuitAttacks.keys().index('BuzzWord')
CALCULATE = SuitAttacks.keys().index('Calculate')
CANNED = SuitAttacks.keys().index('Canned')
CHOMP = SuitAttacks.keys().index('Chomp')
CIGAR_SMOKE = SuitAttacks.keys().index('CigarSmoke')
CLIPON_TIE = SuitAttacks.keys().index('ClipOnTie')
CRUNCH = SuitAttacks.keys().index('Crunch')
DEMOTION = SuitAttacks.keys().index('Demotion')
DOWNSIZE = SuitAttacks.keys().index('Downsize')
DOUBLE_TALK = SuitAttacks.keys().index('DoubleTalk')
EVICTION_NOTICE = SuitAttacks.keys().index('EvictionNotice')
EVIL_EYE = SuitAttacks.keys().index('EvilEye')
FILIBUSTER = SuitAttacks.keys().index('Filibuster')
FILL_WITH_LEAD = SuitAttacks.keys().index('FillWithLead')
FINGER_WAG = SuitAttacks.keys().index('FingerWag')
FIRED = SuitAttacks.keys().index('Fired')
FIVE_O_CLOCK_SHADOW = SuitAttacks.keys().index('FiveOClockShadow')
FLOOD_THE_MARKET = SuitAttacks.keys().index('FloodTheMarket')
FOUNTAIN_PEN = SuitAttacks.keys().index('FountainPen')
FREEZE_ASSETS = SuitAttacks.keys().index('FreezeAssets')
GAVEL = SuitAttacks.keys().index('Gavel')
GLOWER_POWER = SuitAttacks.keys().index('GlowerPower')
GUILT_TRIP = SuitAttacks.keys().index('GuiltTrip')
HALF_WINDSOR = SuitAttacks.keys().index('HalfWindsor')
HANG_UP = SuitAttacks.keys().index('HangUp')
HEAD_SHRINK = SuitAttacks.keys().index('HeadShrink')
HOT_AIR = SuitAttacks.keys().index('HotAir')
JARGON = SuitAttacks.keys().index('Jargon')
LEGALESE = SuitAttacks.keys().index('Legalese')
LIQUIDATE = SuitAttacks.keys().index('Liquidate')
MARKET_CRASH = SuitAttacks.keys().index('MarketCrash')
MUMBO_JUMBO = SuitAttacks.keys().index('MumboJumbo')
PARADIGM_SHIFT = SuitAttacks.keys().index('ParadigmShift')
PECKING_ORDER = SuitAttacks.keys().index('PeckingOrder')
PICK_POCKET = SuitAttacks.keys().index('PickPocket')
PINK_SLIP = SuitAttacks.keys().index('PinkSlip')
PLAY_HARDBALL = SuitAttacks.keys().index('PlayHardball')
POUND_KEY = SuitAttacks.keys().index('PoundKey')
POWER_TIE = SuitAttacks.keys().index('PowerTie')
POWER_TRIP = SuitAttacks.keys().index('PowerTrip')
QUAKE = SuitAttacks.keys().index('Quake')
RAZZLE_DAZZLE = SuitAttacks.keys().index('RazzleDazzle')
RED_TAPE = SuitAttacks.keys().index('RedTape')
RE_ORG = SuitAttacks.keys().index('ReOrg')
RESTRAINING_ORDER = SuitAttacks.keys().index('RestrainingOrder')
ROLODEX = SuitAttacks.keys().index('Rolodex')
RUBBER_STAMP = SuitAttacks.keys().index('RubberStamp')
RUB_OUT = SuitAttacks.keys().index('RubOut')
SACKED = SuitAttacks.keys().index('Sacked')
SANDTRAP = SuitAttacks.keys().index('SandTrap')
SCHMOOZE = SuitAttacks.keys().index('Schmooze')
SHAKE = SuitAttacks.keys().index('Shake')
SHRED = SuitAttacks.keys().index('Shred')
SONG_AND_DANCE = SuitAttacks.keys().index('SongAndDance')
SPIN = SuitAttacks.keys().index('Spin')
SYNERGY = SuitAttacks.keys().index('Synergy')
TABULATE = SuitAttacks.keys().index('Tabulate')
TEE_OFF = SuitAttacks.keys().index('TeeOff')
THROW_BOOK = SuitAttacks.keys().index('ThrowBook')
TREMOR = SuitAttacks.keys().index('Tremor')
WATERCOOLER = SuitAttacks.keys().index('Watercooler')
WITHDRAWAL = SuitAttacks.keys().index('Withdrawal')
WRITE_OFF = SuitAttacks.keys().index('WriteOff')
OVERDRAFT = SuitAttacks.keys().index('Overdraft')
def getFaceoffTaunt(suitName, doId):
if suitName in SuitFaceoffTaunts:
taunts = SuitFaceoffTaunts[suitName]
else:
taunts = TTLocalizer.SuitFaceoffDefaultTaunts
return taunts[doId % len(taunts)]
SuitFaceoffTaunts = OTPLocalizer.SuitFaceoffTaunts
def getAttackTauntIndexFromIndex(suit, attackIndex):
adict = getSuitAttack(suit.getStyleName(), suit.getLevel(), attackIndex)
return getAttackTauntIndex(adict['name'])
def getAttackTauntIndex(attackName):
if attackName in SuitAttackTaunts:
taunts = SuitAttackTaunts[attackName]
return random.randint(0, len(taunts) - 1)
else:
return 1
def getAttackTaunt(attackName, index = None):
if attackName in SuitAttackTaunts:
taunts = SuitAttackTaunts[attackName]
else:
taunts = TTLocalizer.SuitAttackDefaultTaunts
if index != None:
if index >= len(taunts):
notify.warning('index exceeds length of taunts list in getAttackTaunt')
return TTLocalizer.SuitAttackDefaultTaunts[0]
return taunts[index]
else:
return random.choice(taunts)
return
SuitAttackTaunts = TTLocalizer.SuitAttackTaunts
DisabledAttacks = ('Gavel', 'SandTrap', 'FloodTheMarket', 'FiveOClockShadow')
def getAttacksByType(attributes):
groupAttacks = []
singleAttacks = []
for attack in sorted(attributes['attacks'], key=lambda x: x[0]):
if attack[0] in DisabledAttacks:
continue
if SuitAttacks[attack[0]][1] == ATK_TGT_GROUP:
groupAttacks.append(attack)
else:
singleAttacks.append(attack)
return groupAttacks, singleAttacks |
py | 1a37917e5b1d1a11875c8a39f2ad774503ef9755 | ### Simulate a large number of coin flips using Python ###
from random import randint
def coingame(numflips: int, gamenum: int):
flips = []
for _ in range(0, numflips):
flips.append(randint(0, 1))
heads = flips.count(0)
tails = flips.count(1)
# Printing the results and showing the distribution with a pie graph
print(f"Game {gamenum + 1} | Heads: {heads:,} | Tails: {tails:,} | Total: {heads + tails:,}")
if __name__ == '__main__':
# Call the function with the number of games and flips
games = 5
flips = 1000000
print("< Python >")
for i in range(0, games):
coingame(flips, i)
|
py | 1a3792d36f02e17e4558e276864dba9f5e0ebc91 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add master_addresses to bay
Revision ID: 6f21dc998bb
Revises: 421102d1f2d2
Create Date: 2015-08-20 13:57:14.863292
"""
# revision identifiers, used by Alembic.
revision = '6f21dc998bb'
down_revision = '421102d1f2d2'
from alembic import op # noqa: E402
from magnum.db.sqlalchemy import models # noqa: E402
import sqlalchemy as sa # noqa: E402
def upgrade():
op.add_column(
'bay',
sa.Column('master_addresses',
models.JSONEncodedList(),
nullable=True)
)
|
py | 1a3794503522218756f28bd873733f86dc333c84 | """portfolio_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('projects.urls')),
path('admin/', admin.site.urls),
]
urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
py | 1a37946f62e46dfbffb3c4206130d6199a60d2f5 | class RebarShapeConstraint180DegreeBendRadius(RebarShapeConstraint, IDisposable):
"""
A constraint which can be applied to a RebarShapeSegment,and causes the segment
to be replaced with a 180-degree arc. The associated parameter drives
the radius of the arc.
RebarShapeConstraint180DegreeBendRadius(paramId: ElementId,refType: RebarShapeArcReferenceType)
"""
def Dispose(self):
""" Dispose(self: RebarShapeConstraint,A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: RebarShapeConstraint,disposing: bool) """
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, paramId, refType):
""" __new__(cls: type,paramId: ElementId,refType: RebarShapeArcReferenceType) """
pass
ArcReferenceType = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""A choice of rule for measuring the radius.
Get: ArcReferenceType(self: RebarShapeConstraint180DegreeBendRadius) -> RebarShapeArcReferenceType
"""
|
py | 1a37949d4a51191eccfc854fbb3002f6ca36a8fd | import mock
import csv
import furl
import pytz
import pytest
from datetime import datetime, timedelta
from nose import tools as nt
from django.test import RequestFactory
from django.http import Http404
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Permission
from tests.base import AdminTestCase
from website import settings
from framework.auth import Auth
from osf.models.user import OSFUser
from osf.models.tag import Tag
from osf_tests.factories import (
UserFactory,
AuthUserFactory,
ProjectFactory,
TagFactory,
UnconfirmedUserFactory
)
from admin_tests.utilities import setup_view, setup_log_view, setup_form_view
from admin.users import views
from admin.users.forms import WorkshopForm, UserSearchForm
from osf.models.admin_log_entry import AdminLogEntry
pytestmark = pytest.mark.django_db
class TestUserView(AdminTestCase):
def test_no_guid(self):
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request)
with nt.assert_raises(AttributeError):
view.get_object()
def test_load_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
res = view.get_object()
nt.assert_is_instance(res, dict)
def test_name_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
temp_object = view.get_object()
view.object = temp_object
res = view.get_context_data()
nt.assert_equal(res[views.UserView.context_object_name], temp_object)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.UserView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
user.user_permissions.add(view_permission)
user.save()
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
response = views.UserView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestResetPasswordView(AdminTestCase):
def setUp(self):
super(TestResetPasswordView, self).setUp()
self.user = UserFactory()
self.request = RequestFactory().get('/fake_path')
self.request.user = self.user
self.plain_view = views.ResetPasswordView
self.view = setup_view(self.plain_view(), self.request, guid=self.user._id)
def test_get_initial(self):
self.view.user = self.user
self.view.get_initial()
res = self.view.initial
nt.assert_is_instance(res, dict)
nt.assert_equal(res['guid'], self.user._id)
nt.assert_equal(res['emails'], [(r, r) for r in self.user.emails.values_list('address', flat=True)])
def test_reset_password_context(self):
self.view.user = self.user
res = self.view.get_context_data()
nt.assert_is_instance(res, dict)
nt.assert_in((self.user.emails.first().address, self.user.emails.first().address), self.view.initial['emails'])
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.ResetPasswordView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
response = views.ResetPasswordView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestDisableUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.UserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_reactivate_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
self.view().delete(self.request)
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user(self):
view = setup_view(views.UserDeleteView(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestHamUserRestore(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.HamUserRestoreView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
self.spam_confirmed, created = Tag.objects.get_or_create(name='spam_confirmed')
self.ham_confirmed, created = Tag.objects.get_or_create(name='ham_confirmed')
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_enable_user(self):
self.user.disable_account()
self.user.save()
nt.assert_true(self.user.is_disabled)
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_false(self.user.all_tags.filter(name=self.spam_confirmed.name).exists())
nt.assert_true(self.user.all_tags.filter(name=self.ham_confirmed.name).exists())
class TestDisableSpamUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.public_node = ProjectFactory(creator=self.user, is_public=True)
self.private_node = ProjectFactory(creator=self.user, is_public=False)
self.request = RequestFactory().post('/fake_path')
self.view = views.SpamUserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_spam_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
self.public_node.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_true(self.user.all_tags.filter(name='spam_confirmed').exists())
nt.assert_false(self.public_node.is_public)
nt.assert_equal(AdminLogEntry.objects.count(), count + 3)
def test_no_user(self):
view = setup_view(self.view(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class SpamUserListMixin(object):
def setUp(self):
spam_flagged = TagFactory(name='spam_flagged')
spam_confirmed = TagFactory(name='spam_confirmed')
ham_confirmed = TagFactory(name='ham_confirmed')
self.flagged_user = UserFactory()
self.flagged_user.tags.add(spam_flagged)
self.flagged_user.save()
self.spam_user = UserFactory()
self.spam_user.tags.add(spam_confirmed)
self.spam_user.save()
self.ham_user = UserFactory()
self.ham_user.tags.add(ham_confirmed)
self.ham_user.save()
self.request = RequestFactory().post('/fake_path')
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(self.url)
request.user = user
with self.assertRaises(PermissionDenied):
self.plain_view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
spam_permission = Permission.objects.get(codename='view_spam')
user.user_permissions.add(view_permission)
user.user_permissions.add(spam_permission)
user.save()
request = RequestFactory().get(self.url)
request.user = user
response = self.plain_view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestFlaggedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestFlaggedSpamUserList, self).setUp()
self.plain_view = views.UserFlaggedSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:flagged-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.flagged_user._id)
class TestConfirmedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedSpamUserList, self).setUp()
self.plain_view = views.UserKnownSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.spam_user._id)
class TestConfirmedHamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedHamUserList, self).setUp()
self.plain_view = views.UserKnownHamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-ham')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.ham_user._id)
class TestRemove2Factor(AdminTestCase):
def setUp(self):
super(TestRemove2Factor, self).setUp()
self.user = AuthUserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.User2FactorDeleteView
self.setup_view = setup_log_view(self.view(), self.request, guid=self.user._id)
self.url = reverse('users:remove2factor', kwargs={'guid': self.user._id})
@mock.patch('osf.models.user.OSFUser.delete_addon')
def test_remove_two_factor_get(self, mock_delete_addon):
self.setup_view.delete(self.request)
mock_delete_addon.assert_called_with('twofactor')
def test_integration_delete_two_factor(self):
user_addon = self.user.get_or_add_addon('twofactor')
nt.assert_not_equal(user_addon, None)
user_settings = self.user.get_addon('twofactor')
nt.assert_not_equal(user_settings, None)
count = AdminLogEntry.objects.count()
self.setup_view.delete(self.request)
post_addon = self.user.get_addon('twofactor')
nt.assert_equal(post_addon, None)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user_permissions_raises_error(self):
guid = self.user._id
request = RequestFactory().get(self.url)
request.user = self.user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
guid = self.user._id
change_permission = Permission.objects.get(codename='change_osfuser')
self.user.user_permissions.add(change_permission)
self.user.save()
request = RequestFactory().get(self.url)
request.user = self.user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestUserWorkshopFormView(AdminTestCase):
def setUp(self):
self.user_1 = AuthUserFactory()
self.auth_1 = Auth(self.user_1)
self.view = views.UserWorkshopFormView()
self.workshop_date = timezone.now()
self.data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, self.user_1.username, None],
]
self.user_exists_by_name_data = [
['number', 'date', 'location', 'topic', 'name', 'email', 'other'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, self.user_1.fullname, '[email protected]', None],
]
self.user_not_found_data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, '[email protected]', None],
]
self.mock_data = mock.patch.object(
csv,
'reader',
# parse data into the proper format handling None values as csv reader would
side_effect=(lambda values: [[item or '' for item in value] for value in values])
)
self.mock_data.start()
def tearDown(self):
self.mock_data.stop()
def _create_and_parse_test_file(self, data):
result_csv = self.view.parse(data)
return result_csv
def _create_nodes_and_add_logs(self, first_activity_date, second_activity_date=None):
node_one = ProjectFactory(creator=self.user_1)
node_one.date_created = first_activity_date
node_one.add_log(
'log_added', params={'project': node_one._id}, auth=self.auth_1, log_date=first_activity_date, save=True
)
if second_activity_date:
node_two = ProjectFactory(creator=self.user_1)
node_two.date_created = second_activity_date
node_two.add_log(
'log_added', params={'project': node_two._id}, auth=self.auth_1, log_date=second_activity_date, save=True
)
def test_correct_number_of_columns_added(self):
added_columns = ['OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Data']
result_csv = self._create_and_parse_test_file(self.data)
nt.assert_equal(len(self.data[0]) + len(added_columns), len(result_csv[0]))
def test_user_activity_day_of_workshop_only(self):
self._create_nodes_and_add_logs(first_activity_date=self.workshop_date)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_before_workshop_only(self):
activity_date = timezone.now() - timedelta(days=1)
self._create_nodes_and_add_logs(first_activity_date=activity_date)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_after_workshop_only(self):
activity_date = timezone.now() + timedelta(hours=25)
self._create_nodes_and_add_logs(first_activity_date=activity_date)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 1)
nt.assert_equal(user_nodes_created_since_workshop, 1)
def test_user_activity_day_of_workshop_and_before(self):
activity_date = timezone.now() - timedelta(days=1)
self._create_nodes_and_add_logs(
first_activity_date=self.workshop_date,
second_activity_date=activity_date
)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_day_of_workshop_and_after(self):
activity_date = timezone.now() + timedelta(hours=25)
self._create_nodes_and_add_logs(
first_activity_date=self.workshop_date,
second_activity_date=activity_date
)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 1)
nt.assert_equal(user_nodes_created_since_workshop, 1)
def test_user_activity_before_workshop_and_after(self):
before_activity_date = timezone.now() - timedelta(days=1)
after_activity_date = timezone.now() + timedelta(hours=25)
self._create_nodes_and_add_logs(
first_activity_date=before_activity_date,
second_activity_date=after_activity_date
)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
# One log before workshop, one after, only should show the one after
nt.assert_equal(user_logs_since_workshop, 1)
nt.assert_equal(user_nodes_created_since_workshop, 1)
def test_user_osf_account_not_found(self):
result_csv = self._create_and_parse_test_file(self.user_not_found_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, '')
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_found_by_name(self):
result_csv = self._create_and_parse_test_file(self.user_exists_by_name_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, self.user_1.id)
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_form_valid(self):
request = RequestFactory().post('/fake_path')
data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, '9/1/16', None, None, None, self.user_1.username, None],
]
uploaded = SimpleUploadedFile('test_name', bytes(csv.reader(data)), content_type='text/csv')
form = WorkshopForm(data={'document': uploaded})
form.is_valid()
form.cleaned_data['document'] = uploaded
setup_form_view(self.view, request, form)
class TestUserSearchView(AdminTestCase):
def setUp(self):
self.user_1 = AuthUserFactory(fullname='Broken Matt Hardy')
self.user_2 = AuthUserFactory(fullname='Jeff Hardy')
self.user_3 = AuthUserFactory(fullname='Reby Sky')
self.user_4 = AuthUserFactory(fullname='King Maxel Hardy')
self.user_2_alternate_email = '[email protected]'
self.user_2.emails.create(address=self.user_2_alternate_email)
self.user_2.save()
self.request = RequestFactory().get('/fake_path')
self.view = views.UserFormView()
self.view = setup_form_view(self.view, self.request, form=UserSearchForm())
def test_search_user_by_guid(self):
form_data = {
'guid': self.user_1.guids.first()._id
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_name(self):
form_data = {
'name': 'Hardy'
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/search/Hardy/')
def test_search_user_by_username(self):
form_data = {
'email': self.user_1.username
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_alternate_email(self):
form_data = {
'email': self.user_2_alternate_email
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_2.guids.first()._id))
def test_search_user_list(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'Hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
def test_search_user_list_case_insensitive(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
class TestGetLinkView(AdminTestCase):
def test_get_user_confirmation_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
user_token = user.email_verifications.keys()[0]
ideal_link_path = '/confirm/{}/{}/'.format(user._id, user_token)
link = view.get_link(user)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_user_confirmation_link_with_expired_token(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
old_user_token = user.email_verifications.keys()[0]
user.email_verifications[old_user_token]['expiration'] = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(hours=24)
user.save()
link = view.get_link(user)
new_user_token = user.email_verifications.keys()[0]
link_path = str(furl.furl(link).path)
ideal_link_path = '/confirm/{}/{}/'.format(user._id, new_user_token)
nt.assert_equal(link_path, ideal_link_path)
def test_get_password_reset_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetPasswordResetLink()
view = setup_view(view, request, guid=user._id)
link = view.get_link(user)
user_token = user.verification_key_v2.get('token')
nt.assert_is_not_none(user_token)
ideal_link_path = '/resetpassword/{}/{}'.format(user._id, user_token)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_unclaimed_node_links(self):
project = ProjectFactory()
unregistered_contributor = project.add_unregistered_contributor(fullname='Brother Nero', email='[email protected]', auth=Auth(project.creator))
project.save()
request = RequestFactory().get('/fake_path')
view = views.GetUserClaimLinks()
view = setup_view(view, request, guid=unregistered_contributor._id)
links = view.get_claim_links(unregistered_contributor)
unclaimed_records = unregistered_contributor.unclaimed_records
nt.assert_equal(len(links), 1)
nt.assert_equal(len(links), len(unclaimed_records.keys()))
link = links[0]
nt.assert_in(project._id, link)
nt.assert_in(unregistered_contributor.unclaimed_records[project._id]['token'], link)
class TestUserReindex(AdminTestCase):
def setUp(self):
super(TestUserReindex, self).setUp()
self.request = RequestFactory().post('/fake_path')
self.user = AuthUserFactory()
@mock.patch('website.search.search.update_user')
def test_reindex_user_elastic(self, mock_reindex_elastic):
count = AdminLogEntry.objects.count()
view = views.UserReindexElastic()
view = setup_log_view(view, self.request, guid=self.user._id)
view.delete(self.request)
nt.assert_true(mock_reindex_elastic.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
|
py | 1a37951822465b18a41fd7cb4f8f0aa50d3049b4 | #!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_cursor08.py
# Log cursors with compression
#
import fnmatch, os, shutil, run, time
from suite_subprocess import suite_subprocess
from wiredtiger import stat, WiredTigerError
from wtscenario import multiply_scenarios, number_scenarios, check_scenarios
import wttest
class test_cursor08(wttest.WiredTigerTestCase, suite_subprocess):
logmax = "100K"
tablename = 'test_cursor08'
uri = 'table:' + tablename
nkeys = 500
reopens = check_scenarios([
('regular', dict(reopen=False)),
('reopen', dict(reopen=True))
])
compress = check_scenarios([
('nop', dict(compress='nop')),
('snappy', dict(compress='snappy')),
('zlib', dict(compress='zlib')),
('none', dict(compress='none')),
])
scenarios = number_scenarios(multiply_scenarios('.', reopens, compress))
# Load the compression extension, and enable it for logging.
def conn_config(self, dir):
return 'log=(archive=false,enabled,file_max=%s,' % self.logmax + \
'compressor=%s),' % self.compress + \
'transaction_sync="(method=dsync,enabled)",' + \
self.extensionArg(self.compress)
# Return the wiredtiger_open extension argument for a shared library.
def extensionArg(self, name):
if name == None or name == 'none':
return ''
testdir = os.path.dirname(__file__)
extdir = os.path.join(run.wt_builddir, 'ext/compressors')
extfile = os.path.join(
extdir, name, '.libs', 'libwiredtiger_' + name + '.so')
if not os.path.exists(extfile):
self.skipTest('compression extension "' + extfile + '" not built')
return ',extensions=["' + extfile + '"]'
def test_log_cursor(self):
# print "Creating %s with config '%s'" % (self.uri, self.create_params)
create_params = 'key_format=i,value_format=S'
self.session.create(self.uri, create_params)
c = self.session.open_cursor(self.uri, None)
# A binary value.
value = u'\u0001\u0002abcd\u0003\u0004'
self.session.begin_transaction()
for k in range(self.nkeys):
c[k] = value
self.session.commit_transaction()
c.close()
if self.reopen:
self.reopen_conn()
# Check for these values via a log cursor
c = self.session.open_cursor("log:", None)
count = 0
while c.next() == 0:
# lsn.file, lsn.offset, opcount
keys = c.get_key()
# txnid, rectype, optype, fileid, logrec_key, logrec_value
values = c.get_value()
try:
if value in str(values[5]): # logrec_value
count += 1
except:
pass
c.close()
self.assertEqual(count, self.nkeys)
if __name__ == '__main__':
wttest.run()
|
py | 1a379523acc99a0938d33135ad5137a0fc496229 | """
pyEngine_problem
"""
# =============================================================================
# Imports
# =============================================================================
from .pyAero_problem import AeroProblem
class EngineProblem(AeroProblem):
"""
The EngineProblem class inherits from the AeroProblem class so that
aerodynamic solvers (AeroSolver) and engine models (EngineModelSMT) can
reference the same flight condition without needing to define redundant
information. The EngineProblem layer simply adds a few possible design
variables and handles some stuff with derivatives.
Parameters
----------
name : str
Name of this Engine problem.
evalFuncs : iterable object containing strings
The names of the functions the user wants evaluated for this
engineProblem.
throttle : float
Initial value for throttle variable
ISA : float
Initial value for ISA temperature variable"""
def __init__(self, name, throttle=1.0, ISA=0.0, **kwargs):
# Initialize AeroProblem
super().__init__(name, **kwargs)
# Set initial throttle or ISA
self.throttle = throttle
self.ISA = ISA
# Update AeroProblem variable sets with possible engine variables
newVars = ["throttle", "ISA"]
self.allVarFuncs += newVars
self.possibleDVs.update(newVars)
self.possibleFunctions.update(newVars)
|
py | 1a37955c52ed515cd72506cd08484d4069052bf3 | import mock
import pytest
from ocflib.ucb.cas import verify_ticket
@pytest.yield_fixture
def mock_get():
with mock.patch('requests.get') as mock_get:
yield mock_get
GOOD_RESPONSE = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>1034192</cas:user>
</cas:authenticationSuccess>
</cas:serviceResponse>"""
BAD_RESPONSE = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationFailure code='INVALID_TICKET'>
ticket 'ST-832595-ZOm6NYCTBJO0d41jjL6l-ncas-p3.calnet.berkeley.edu' not recognized
</cas:authenticationFailure>
</cas:serviceResponse>"""
class TestVerifyTicket:
def test_good_ticket(self, mock_get):
mock_get.return_value.text = GOOD_RESPONSE
assert verify_ticket(
'some-ticket',
'https://accounts.ocf.berkeley.edu/',
) == '1034192'
called_url = mock_get.call_args[0][0]
start = 'https://auth.berkeley.edu/cas/serviceValidate?'
assert called_url.startswith(start)
params = called_url[len(start):].split('&')
assert sorted(params) == [
'service=https%3A%2F%2Faccounts.ocf.berkeley.edu%2F',
'ticket=some-ticket',
]
@pytest.mark.parametrize('response', [
BAD_RESPONSE,
'',
'hello world',
])
def test_bad_ticket(self, response, mock_get):
mock_get.return_value.text = response
assert verify_ticket(
'some-ticket',
'https://accounts.ocf.berkeley.edu/',
) is None
|
py | 1a37971956862889932df6ef28237c950891098f | # -*- coding:utf-8 -*-
"""
上海银行间同业拆放利率(Shibor)数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
from tushare.util import dateu as du
from tushare.util.netbase import Client
from pandas.compat import StringIO
def shibor_data(year=None):
"""
获取上海银行间同业拆放利率(Shibor)
Parameters
------
year:年份(int)
Return
------
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Shibor']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
clt = Client(url=ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor',
year, lab,
year))
content = clt.gvalue()
df = pd.read_excel(StringIO(content))
df.columns = ct.SHIBOR_COLS
df['date'] = df['date'].map(lambda x: x.date())
if pd.__version__ < '0.21':
df['date'] = df['date'].astype(np.datetime64)
else:
df['date'] = df['date'].astype('datetime64[D]')
return df
except:
return None
def shibor_quote_data(year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
date:日期
bank:报价银行名称
ON:隔夜拆放利率
ON_B:隔夜拆放买入价
ON_A:隔夜拆放卖出价
1W_B:1周买入
1W_A:1周卖出
2W_B:买入
2W_A:卖出
1M_B:买入
1M_A:卖出
3M_B:买入
3M_A:卖出
6M_B:买入
6M_A:卖出
9M_B:买入
9M_A:卖出
1Y_B:买入
1Y_A:卖出
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Quote']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
clt = Client(url=ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Quote',
year, lab,
year))
content = clt.gvalue()
df = pd.read_excel(StringIO(content), skiprows=[0])
# df.columns = ct.QUOTE_COLS
df.columns = ct.SHIBOR_Q_COLS
df['date'] = df['date'].map(lambda x: x.date())
if pd.__version__ < '0.21':
df['date'] = df['date'].astype(np.datetime64)
else:
df['date'] = df['date'].astype('datetime64[D]')
return df
except:
return None
def shibor_ma_data(year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
其它分别为各周期5、10、20均价
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
clt = Client(url=ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor_Tendency',
year, lab,
year))
content = clt.gvalue()
df = pd.read_excel(StringIO(content), skiprows=[0])
df.columns = ct.SHIBOR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
if pd.__version__ < '0.21':
df['date'] = df['date'].astype(np.datetime64)
else:
df['date'] = df['date'].astype('datetime64[D]')
return df
except:
return None
def lpr_data(year=None):
"""
获取贷款基础利率(LPR)
Parameters
------
year:年份(int)
Return
------
date:日期
1Y:1年贷款基础利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
clt = Client(url=ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR',
year, lab,
year))
content = clt.gvalue()
df = pd.read_excel(StringIO(content), skiprows=[0])
df.columns = ct.LPR_COLS
df['date'] = df['date'].map(lambda x: x.date())
if pd.__version__ < '0.21':
df['date'] = df['date'].astype(np.datetime64)
else:
df['date'] = df['date'].astype('datetime64[D]')
return df
except:
return None
def lpr_ma_data(year=None):
"""
获取贷款基础利率均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
1Y_5:5日均值
1Y_10:10日均值
1Y_20:20日均值
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR_Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
clt = Client(url=ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR_Tendency',
year, lab,
year))
content = clt.gvalue()
df = pd.read_excel(StringIO(content), skiprows=[0])
df.columns = ct.LPR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
if pd.__version__ < '0.21':
df['date'] = df['date'].astype(np.datetime64)
else:
df['date'] = df['date'].astype('datetime64[D]')
return df
except:
return None
|
py | 1a3797269cc9f510ddd449f32834670da5b034b5 | from .train import main
|
py | 1a3799bce702e00ab405a21a33102ca115f0c862 | import logging
from Request import *
from RandomUtil import *
# Extremely basic check to determine if a post is what we are looking for
def determineExchangeType(submission):
opFlair = submission.link_flair_text
opTitle = submission.title.lower()
opTitle = opTitle.split("[w]")[0]
# Check to ensure the exchange hasn't already been completed
if opFlair is not None and opFlair.lower() == "closed":
return ""
for cardType in searchDict:
if len(searchDict[cardType]) > 0:
if any(string in opTitle for string in hitWordDict[cardType]):
logging.info("Found a valid %s post: %s", cardType, opTitle)
return cardType
return "" |
py | 1a3799e6a025a1bd54f969d2ff3e625d7b25ea6b | from models.network import Net
from learning.learning import create_learners, train_model, test_model, Trainer
from learning.testing import CorrelationMatrix, ResidualStatistics
from data.load_data import load_synth_spectra, split_data
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
#from utils.errorfuncs import corr_matrix_relresids
plt.rcParams["font.family"] = "serif"
# first train on the npca=6 set
# then test on the npca=15 set
# also perform the splitting for better comparison
wave_grid, qso_cont_npca6, qso_flux_npca6 = load_synth_spectra(small=False, npca=6)
X_train6, X_valid6, X_test6, y_train6, y_valid6, y_test6 = split_data(qso_flux_npca6, qso_cont_npca6)
wave_grid15, qso_cont_npca15, qso_flux_npca15 = load_synth_spectra(small=False, npca=15)
X_train15, X_valid15, X_test15, y_train15, y_valid15, y_test15 = split_data(qso_flux_npca15, qso_cont_npca15)
n_feature = len(X_train6[1])
n_output = len(y_train6[1])
net = Net(n_feature, 100, n_output)
optimizer, criterion = create_learners(net.parameters())
trainer = Trainer(net, optimizer, criterion, batch_size=1000, num_epochs=400)
trainer.train(wave_grid, X_train6, y_train6, X_valid6, y_valid6)
#running_loss, mse_loss_valid, scaler_X, scaler_y = train_model(wave_grid, X_train6, y_train6,\
# X_valid6, y_valid6, net, optimizer,\
# criterion, batch_size=1000, num_epochs=400)
#epochs = np.arange(1, len(running_loss)+1)
# plot the test statistics as a function of wavelength
Stats = ResidualStatistics(X_test15, y_test15, trainer.scaler_X, trainer.scaler_y, net)
fig0, ax0 = Stats.plot_means(wave_grid)
fig0.show()
# test the final model and print the result
#mse_test, corr_matrix = test_model(X_test, y_test, scaler_X, scaler_y, net)
#print ("MSE on test set:", mse_test)
fig, ax = trainer.plot_loss()
#ax.plot(epochs, running_loss, label="Training set")
#ax.plot(epochs, mse_loss_valid, label="Validation set")
#ax.legend()
#ax.set_xlabel("Epoch number")
#ax.set_ylabel("MSE")
#ax.set_yscale("log")
#ax.set_title("Mean squared error on the normalised spectra")
fig.show()
# now plot an example result on the npca = 15 TEST set
rand_indx = np.random.randint(len(X_test15))
rescaled_result = net.full_predict(X_test15[rand_indx], trainer.scaler_X, trainer.scaler_y)
#test_input_normed = normalise(scaler_X, X_test[rand_indx])
#test_input_normed_var = Variable(torch.FloatTensor(test_input_normed.numpy()))
#normed_result = net(test_input_normed_var)
#rescaled_result = scaler_y.backward(normed_result)
fig2, ax2 = plt.subplots(figsize=(7,5), dpi=320)
ax2.plot(wave_grid, X_test15[rand_indx], alpha=0.8, lw=2, label="Input")
ax2.plot(wave_grid, y_test15[rand_indx], alpha=0.8, lw=2, label="Target")
ax2.plot(wave_grid, rescaled_result, alpha=0.8, lw=2, label="Output")
ax2.set_xlabel("Rest-frame wavelength ($\AA$)")
ax2.set_ylabel("Flux (a.u.)")
ax2.legend()
ax2.grid()
ax2.set_title("Example of a predicted quasar spectrum")
fig2.show()
# visualise the correlation matrix for the npca = 15 TEST set
CorrMat = CorrelationMatrix(X_test15, y_test15, trainer.scaler_X, trainer.scaler_y, net)
CorrMat.show(wave_grid)
#fig3, ax3 = plt.subplots()
#im = ax3.pcolormesh(wave_grid, wave_grid, corr_matrix)
#fig3.show() |
py | 1a379a31c709ca4ee95458f585f556009b6ed3f2 | import tkinter as tk
import tkinter.messagebox as msg
import os
import sqlite3
class Todo(tk.Tk):
def __init__(self, tasks=None):
super().__init__()
if not tasks:
self.tasks = []
else:
self.tasks = tasks
self.tasks_canvas = tk.Canvas(self)
self.tasks_frame = tk.Frame(self.tasks_canvas)
self.text_frame = tk.Frame(self)
self.scrollbar = tk.Scrollbar(self.tasks_canvas, orient="vertical", command=self.tasks_canvas.yview)
self.tasks_canvas.configure(yscrollcommand=self.scrollbar.set)
self.title("To-Do App v3")
self.geometry("300x400")
self.task_create = tk.Text(self.text_frame, height=3, bg="white", fg="black")
self.tasks_canvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.canvas_frame = self.tasks_canvas.create_window((0, 0), window=self.tasks_frame, anchor="n")
self.task_create.pack(side=tk.BOTTOM, fill=tk.X)
self.text_frame.pack(side=tk.BOTTOM, fill=tk.X)
self.task_create.focus_set()
self.colour_schemes = [{"bg": "lightgrey", "fg": "black"}, {"bg": "grey", "fg": "white"}]
current_tasks = self.load_tasks()
for task in current_tasks:
task_text = task[0]
self.add_task(None, task_text, True)
self.bind("<Return>", self.add_task)
self.bind("<Configure>", self.on_frame_configure)
self.bind_all("<MouseWheel>", self.mouse_scroll)
self.bind_all("<Button-4>", self.mouse_scroll)
self.bind_all("<Button-5>", self.mouse_scroll)
self.tasks_canvas.bind("<Configure>", self.task_width)
def add_task(self, event=None, task_text=None, from_db=False):
if not task_text:
task_text = self.task_create.get(1.0, tk.END).strip()
if len(task_text) > 0:
new_task = tk.Label(self.tasks_frame, text=task_text, pady=10)
self.set_task_colour(len(self.tasks), new_task)
new_task.bind("<Button-1>", self.remove_task)
new_task.pack(side=tk.TOP, fill=tk.X)
self.tasks.append(new_task)
if not from_db:
self.save_task(task_text)
self.task_create.delete(1.0, tk.END)
def remove_task(self, event):
task = event.widget
if msg.askyesno("Really Delete?", "Delete " + task.cget("text") + "?"):
self.tasks.remove(event.widget)
delete_task_query = "DELETE FROM tasks WHERE task=?"
delete_task_data = (task.cget("text"),)
self.runQuery(delete_task_query, delete_task_data)
event.widget.destroy()
self.recolour_tasks()
def recolour_tasks(self):
for index, task in enumerate(self.tasks):
self.set_task_colour(index, task)
def set_task_colour(self, position, task):
_, task_style_choice = divmod(position, 2)
my_scheme_choice = self.colour_schemes[task_style_choice]
task.configure(bg=my_scheme_choice["bg"])
task.configure(fg=my_scheme_choice["fg"])
def on_frame_configure(self, event=None):
self.tasks_canvas.configure(scrollregion=self.tasks_canvas.bbox("all"))
def task_width(self, event):
canvas_width = event.width
self.tasks_canvas.itemconfig(self.canvas_frame, width = canvas_width)
def mouse_scroll(self, event):
if event.delta:
self.tasks_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
else:
if event.num == 5:
move = 1
else:
move = -1
self.tasks_canvas.yview_scroll(move, "units")
def save_task(self, task):
insert_task_query = "INSERT INTO tasks VALUES (?)"
insert_task_data = (task,)
self.runQuery(insert_task_query, insert_task_data)
def load_tasks(self):
load_tasks_query = "SELECT task FROM tasks"
my_tasks = self.runQuery(load_tasks_query, receive=True)
return my_tasks
@staticmethod
def runQuery(sql, data=None, receive=False):
conn = sqlite3.connect("tasks.db")
cursor = conn.cursor()
if data:
cursor.execute(sql, data)
else:
cursor.execute(sql)
if receive:
return cursor.fetchall()
else:
conn.commit()
conn.close()
@staticmethod
def firstTimeDB():
create_tables = "CREATE TABLE tasks (task TEXT)"
Todo.runQuery(create_tables)
default_task_query = "INSERT INTO tasks VALUES (?)"
default_task_data = ("--- Add Items Here ---",)
Todo.runQuery(default_task_query, default_task_data)
if __name__ == "__main__":
if not os.path.isfile("tasks.db"):
Todo.firstTimeDB()
todo = Todo()
todo.mainloop() |
py | 1a379a6c02b2c77916071498615a2d5915b92596 | import io
from notifypy.cli import entry
import os
import sys
from setuptools import Command, find_packages, setup
# import notifypy
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="notify_py",
version="0.3.2",
author="Mustafa Mohamed",
author_email="[email protected]",
description="Cross-platform desktop notification library for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ms7m/notify-py",
python_requires=">=3.6.0",
packages=find_packages(
exclude=["testing", "*.testing", "*.testing.*", "testing.*", "tests"]
),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["notifypy = notifypy.cli:entry"]},
include_package_data=True,
install_requires=["loguru", "jeepney ; platform_system=='Linux'"],
)
|
py | 1a379a8b77499efa78f3fbe4c82b0d3b11eb26e2 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
KCU__IN_SUBSETS = {
'1D': [0, 1, 2, 3, 4, 5],
'1D2D': [51, 52, 53, 54, 55, 56, 57, 58],
'2D_OPEN_WATER': [100, 101, 200, 300, 400, 500],
'LONG_CRESTED_STRUCTURES': [3],
'SHORT_CRESTED_STRUCTURES': [4],
'2D_OPEN_WATER_OBSTACLES': [101],
'2D_VERTICAL_INFILTRATION': [150],
'2D_GROUNDWATER': [-150],
'ACTIVE_BREACH': [56],
'POTENTIAL_BREACH': [55]
}
KCU__IN_SUBSETS['1D_ALL'] = KCU__IN_SUBSETS['1D'] + KCU__IN_SUBSETS['1D2D']
KCU__IN_SUBSETS['GROUNDWATER_ALL'] = (
KCU__IN_SUBSETS['2D_GROUNDWATER'] + KCU__IN_SUBSETS[
'2D_VERTICAL_INFILTRATION']
)
KCU__IN_SUBSETS['2D_ALL'] = (
KCU__IN_SUBSETS['GROUNDWATER_ALL'] +
KCU__IN_SUBSETS['2D_OPEN_WATER_OBSTACLES'] +
KCU__IN_SUBSETS['2D_OPEN_WATER']
)
|
py | 1a379c37711ac29bfd495cddbe6218ee3fafc0c3 | from .na_gragas_top import *
from .na_gragas_jng import *
from .na_gragas_mid import *
from .na_gragas_bot import *
from .na_gragas_sup import *
|
py | 1a379d2292bbb56e56d349781b3595a090ee539d | # Generated by Django 2.2 on 2021-01-12 09:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects_api', '0037_auto_20210112_0253'),
]
operations = [
migrations.RenameModel(
old_name='User',
new_name='UserPlatform',
),
migrations.RenameField(
model_name='project',
old_name='user_id',
new_name='user_platform_id',
),
]
|
py | 1a379d93883966d682b191aaf0648dcba6d221f7 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import re
import unittest
from datetime import timedelta
from unittest import mock
from urllib.parse import quote_plus
from airflow import settings
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, Pool, TaskInstance
from airflow.models.serialized_dag import SerializedDagModel
from airflow.settings import Session
from airflow.utils.timezone import datetime, parse as parse_datetime, utcnow
from airflow.version import version
from airflow.www import app as application
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_pools
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, os.pardir)
)
class TestBase(unittest.TestCase):
@conf_vars({('api', 'enable_experimental_api'): 'true'})
def setUp(self):
self.app = application.create_app(testing=True)
self.appbuilder = self.app.appbuilder # pylint: disable=no-member
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
self.app.config['SECRET_KEY'] = 'secret_key'
self.app.config['CSRF_ENABLED'] = False
self.app.config['WTF_CSRF_ENABLED'] = False
self.client = self.app.test_client()
settings.configure_orm()
self.session = Session
def assert_deprecated(self, resp):
assert 'true' == resp.headers['Deprecation']
assert re.search(
r'\<.+/stable-rest-api/migration.html\>; ' 'rel="deprecation"; type="text/html"',
resp.headers['Link'],
)
class TestApiExperimental(TestBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=True)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super().tearDown()
def test_info(self):
url = '/api/experimental/info'
resp_raw = self.client.get(url)
resp = json.loads(resp_raw.data.decode('utf-8'))
assert version == resp['version']
self.assert_deprecated(resp_raw)
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.client.get(url_template.format('example_bash_operator', 'runme_0'))
self.assert_deprecated(response)
assert '"email"' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
assert 200 == response.status_code
response = self.client.get(url_template.format('example_bash_operator', 'DNE'))
assert 'error' in response.data.decode('utf-8')
assert 404 == response.status_code
response = self.client.get(url_template.format('DNE', 'DNE'))
assert 'error' in response.data.decode('utf-8')
assert 404 == response.status_code
def test_get_dag_code(self):
url_template = '/api/experimental/dags/{}/code'
response = self.client.get(url_template.format('example_bash_operator'))
self.assert_deprecated(response)
assert 'BashOperator(' in response.data.decode('utf-8')
assert 200 == response.status_code
response = self.client.get(url_template.format('xyz'))
assert 404 == response.status_code
def test_dag_paused(self):
pause_url_template = '/api/experimental/dags/{}/paused/{}'
paused_url_template = '/api/experimental/dags/{}/paused'
paused_url = paused_url_template.format('example_bash_operator')
response = self.client.get(pause_url_template.format('example_bash_operator', 'true'))
self.assert_deprecated(response)
assert 'ok' in response.data.decode('utf-8')
assert 200 == response.status_code
paused_response = self.client.get(paused_url)
assert 200 == paused_response.status_code
assert {"is_paused": True} == paused_response.json
response = self.client.get(pause_url_template.format('example_bash_operator', 'false'))
assert 'ok' in response.data.decode('utf-8')
assert 200 == response.status_code
paused_response = self.client.get(paused_url)
assert 200 == paused_response.status_code
assert {"is_paused": False} == paused_response.json
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
run_id = 'my_run' + utcnow().isoformat()
response = self.client.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': run_id}),
content_type="application/json",
)
self.assert_deprecated(response)
assert 200 == response.status_code
response_execution_date = parse_datetime(json.loads(response.data.decode('utf-8'))['execution_date'])
assert 0 == response_execution_date.microsecond
# Check execution_date is correct
response = json.loads(response.data.decode('utf-8'))
dagbag = DagBag()
dag = dagbag.get_dag('example_bash_operator')
dag_run = dag.get_dagrun(response_execution_date)
dag_run_id = dag_run.run_id
assert run_id == dag_run_id
assert dag_run_id == response['run_id']
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'), data=json.dumps({}), content_type="application/json"
)
assert 404 == response.status_code
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
execution_date = utcnow() + timedelta(hours=1)
datetime_string = execution_date.isoformat()
# Test correct execution with execution date
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json",
)
self.assert_deprecated(response)
assert 200 == response.status_code
assert datetime_string == json.loads(response.data.decode('utf-8'))['execution_date']
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
assert dag_run, f'Dag Run not found for execution date {execution_date}'
# Test correct execution with execution date and microseconds replaced
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string, 'replace_microseconds': 'true'}),
content_type="application/json",
)
assert 200 == response.status_code
response_execution_date = parse_datetime(json.loads(response.data.decode('utf-8'))['execution_date'])
assert 0 == response_execution_date.microsecond
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(response_execution_date)
assert dag_run, f'Dag Run not found for execution date {execution_date}'
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json",
)
assert 404 == response.status_code
# Test error for bad datetime format
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json",
)
assert 400 == response.status_code
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# Create DagRun
trigger_dag(dag_id=dag_id, run_id='test_task_instance_info_run', execution_date=execution_date)
# Test Correct execution
response = self.client.get(url_template.format(dag_id, datetime_string, task_id))
self.assert_deprecated(response)
assert 200 == response.status_code
assert 'state' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string, task_id),
)
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent task
response = self.client.get(url_template.format(dag_id, datetime_string, 'does_not_exist_task'))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(url_template.format(dag_id, wrong_datetime_string, task_id))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for bad datetime format
response = self.client.get(url_template.format(dag_id, 'not_a_datetime', task_id))
assert 400 == response.status_code
assert 'error' in response.data.decode('utf-8')
def test_dagrun_status(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}'
dag_id = 'example_bash_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# Create DagRun
trigger_dag(dag_id=dag_id, run_id='test_task_instance_info_run', execution_date=execution_date)
# Test Correct execution
response = self.client.get(url_template.format(dag_id, datetime_string))
self.assert_deprecated(response)
assert 200 == response.status_code
assert 'state' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(url_template.format(dag_id, wrong_datetime_string))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for bad datetime format
response = self.client.get(url_template.format(dag_id, 'not_a_datetime'))
assert 400 == response.status_code
assert 'error' in response.data.decode('utf-8')
class TestLineageApiExperimental(TestBase):
PAPERMILL_EXAMPLE_DAGS = os.path.join(ROOT_FOLDER, "airflow", "providers", "papermill", "example_dags")
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=False, dag_folder=cls.PAPERMILL_EXAMPLE_DAGS)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
@mock.patch("airflow.settings.DAGS_FOLDER", PAPERMILL_EXAMPLE_DAGS)
def test_lineage_info(self):
url_template = '/api/experimental/lineage/{}/{}'
dag_id = 'example_papermill_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# create DagRun
trigger_dag(dag_id=dag_id, run_id='test_lineage_info_run', execution_date=execution_date)
# test correct execution
response = self.client.get(url_template.format(dag_id, datetime_string))
self.assert_deprecated(response)
assert 200 == response.status_code
assert 'task_ids' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(url_template.format(dag_id, wrong_datetime_string))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for bad datetime format
response = self.client.get(url_template.format(dag_id, 'not_a_datetime'))
assert 400 == response.status_code
assert 'error' in response.data.decode('utf-8')
class TestPoolApiExperimental(TestBase):
USER_POOL_COUNT = 2
TOTAL_POOL_COUNT = USER_POOL_COUNT + 1 # including default_pool
@classmethod
def setUpClass(cls):
super().setUpClass()
def setUp(self):
super().setUp()
clear_db_pools()
self.pools = [Pool.get_default_pool()]
for i in range(self.USER_POOL_COUNT):
name = f'experimental_{i + 1}'
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[-1]
def _get_pool_count(self):
response = self.client.get('/api/experimental/pools')
assert response.status_code == 200
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.client.get(
f'/api/experimental/pools/{self.pool.pool}',
)
self.assert_deprecated(response)
assert response.status_code == 200
assert json.loads(response.data.decode('utf-8')) == self.pool.to_json()
def test_get_pool_non_existing(self):
response = self.client.get('/api/experimental/pools/foo')
assert response.status_code == 404
assert json.loads(response.data.decode('utf-8'))['error'] == "Pool 'foo' doesn't exist"
def test_get_pools(self):
response = self.client.get('/api/experimental/pools')
self.assert_deprecated(response)
assert response.status_code == 200
pools = json.loads(response.data.decode('utf-8'))
assert len(pools) == self.TOTAL_POOL_COUNT
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
assert pool == self.pools[i].to_json()
def test_create_pool(self):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps(
{
'name': 'foo',
'slots': 1,
'description': '',
}
),
content_type='application/json',
)
self.assert_deprecated(response)
assert response.status_code == 200
pool = json.loads(response.data.decode('utf-8'))
assert pool['pool'] == 'foo'
assert pool['slots'] == 1
assert pool['description'] == ''
assert self._get_pool_count() == self.TOTAL_POOL_COUNT + 1
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps(
{
'name': name,
'slots': 1,
'description': '',
}
),
content_type='application/json',
)
assert response.status_code == 400
assert json.loads(response.data.decode('utf-8'))['error'] == "Pool name shouldn't be empty"
assert self._get_pool_count() == self.TOTAL_POOL_COUNT
def test_delete_pool(self):
response = self.client.delete(
f'/api/experimental/pools/{self.pool.pool}',
)
self.assert_deprecated(response)
assert response.status_code == 200
assert json.loads(response.data.decode('utf-8')) == self.pool.to_json()
assert self._get_pool_count() == self.TOTAL_POOL_COUNT - 1
def test_delete_pool_non_existing(self):
response = self.client.delete(
'/api/experimental/pools/foo',
)
assert response.status_code == 404
assert json.loads(response.data.decode('utf-8'))['error'] == "Pool 'foo' doesn't exist"
def test_delete_default_pool(self):
clear_db_pools()
response = self.client.delete(
'/api/experimental/pools/default_pool',
)
assert response.status_code == 400
assert json.loads(response.data.decode('utf-8'))['error'] == "default_pool cannot be deleted"
|
py | 1a379dfd2aa03b284a48df04cd2987101aa3d98c | # Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" Feature module
Provides:
o Feature - class to wrap Bio.SeqFeature objects with drawing information
For drawing capabilities, this module uses reportlab to define colors:
http://www.reportlab.com
For dealing with biological information, the package uses BioPython:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.lib import colors
# GenomeDiagram imports
from ._Colors import ColorTranslator
class Feature(object):
""" Class to wrap Bio.SeqFeature objects for GenomeDiagram
Provides:
Methods:
o __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen) Called when the feature is
instantiated
o set_feature(self, feature) Wrap the passed feature
o get_feature(self) Return the unwrapped Bio.SeqFeature object
o set_color(self, color) Set the color in which the feature will
be drawn (accepts multiple formats: reportlab color.Color()
tuple and color.name, or integer representing Artemis color
o get_color(self) Returns color.Color tuple of the feature's color
o __getattr__(self, name) Catches attribute requests and passes them to
the wrapped Bio.SeqFeature object
Attributes:
o parent FeatureSet, container for the object
o id Unique id
o color color.Color, color to draw the feature
o hide Boolean for whether the feature will be drawn or not
o sigil String denoting the type of sigil to use for the feature.
Currently either "BOX" or "ARROW" are supported.
o arrowhead_length Float denoting length of the arrow head to be drawn,
relative to the bounding box height. The arrow shaft
takes up the remainder of the bounding box's length.
o arrowshaft_height Float denoting length of the representative arrow
shaft to be drawn, relative to the bounding box height.
The arrow head takes the full height of the bound box.
o name_qualifiers List of Strings, describes the qualifiers that may
contain feature names in the wrapped Bio.SeqFeature object
o label Boolean, 1 if the label should be shown
o label_font String describing the font to use for the feature label
o label_size Int describing the feature label font size
o label_color color.Color describing the feature label color
o label_angle Float describing the angle through which to rotate the
feature label in degrees (default = 45, linear only)
o label_position String, 'start', 'end' or 'middle' denoting where
to place the feature label. Leave as None for the default
which is 'start' for linear diagrams, and at the bottom of
the feature as drawn on circular diagrams.
o label_strand Integer -1 or +1 to explicitly place the label on the
forward or reverse strand. Default (None) follows th
feature's strand. Use -1 to put labels under (linear) or
inside (circular) the track, +1 to put them above (linear)
or outside (circular) the track.
o locations List of tuples of (start, end) ints describing where the
feature and any subfeatures start and end
o type String denoting the feature type
o name String denoting the feature name
o strand Int describing the strand on which the feature is found
"""
def __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen, label=0, border=None, colour=None):
""" __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen, label=0)
o parent FeatureSet containing the feature
o feature_id Unique id for the feature
o feature Bio.SeqFeature object to be wrapped
o color color.Color Color to draw the feature (overridden
by backwards compatible argument with UK spelling,
colour). Either argument is overridden if 'color'
is found in feature qualifiers
o border color.Color Color to draw the feature border, use
None for the same as the fill color, False for no border.
o label Boolean, 1 if the label should be shown
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
self._colortranslator = ColorTranslator()
# Initialise attributes
self.parent = parent
self.id = feature_id
self.color = color # default color to draw the feature
self.border = border
self._feature = None # Bio.SeqFeature object to wrap
self.hide = 0 # show by default
self.sigil = 'BOX'
self.arrowhead_length = 0.5 # 50% of the box height
self.arrowshaft_height = 0.4 # 40% of the box height
self.name_qualifiers = ['gene', 'label', 'name', 'locus_tag', 'product']
self.label = label
self.label_font = 'Helvetica'
self.label_size = 6
self.label_color = colors.black
self.label_angle = 45
self.label_position = None #Expect 'start', 'middle', or 'end' (plus aliases)
self.label_strand = None #Expect +1 or -1 if overriding this
if feature is not None:
self.set_feature(feature)
def set_feature(self, feature):
""" set_feature(self, feature)
o feature Bio.SeqFeature object to be wrapped
Defines the Bio.SeqFeature object to be wrapped
"""
self._feature = feature
self.__process_feature()
def __process_feature(self):
""" __process_feature(self)
Examine the feature to be wrapped, and set some of the Feature's
properties accordingly
"""
self.locations = []
bounds = []
#This will be a list of length one for simple FeatureLocation:
for location in self._feature.location.parts:
start = location.nofuzzy_start
end = location.nofuzzy_end
#if start > end and self.strand == -1:
# start, end = end, start
self.locations.append((start, end))
bounds += [start, end]
self.type = str(self._feature.type) # Feature type
#TODO - Strand can vary with subfeatures (e.g. mixed strand tRNA)
if self._feature.strand is None:
#This is the SeqFeature default (None), but the drawing code
#only expects 0, +1 or -1.
self.strand = 0
else:
self.strand = int(self._feature.strand) # Feature strand
if 'color' in self._feature.qualifiers: # Artemis color (if present)
self.color = self._colortranslator.artemis_color(
self._feature.qualifiers['color'][0])
self.name = self.type
for qualifier in self.name_qualifiers:
if qualifier in self._feature.qualifiers:
self.name = self._feature.qualifiers[qualifier][0]
break
#Note will be 0 to N for origin wrapping feature on genome of length N
self.start, self.end = min(bounds), max(bounds)
def get_feature(self):
""" get_feature(self) -> Bio.SeqFeature
Returns the unwrapped Bio.SeqFeature object
"""
return self._feature
def set_colour(self, colour):
"""Backwards compatible variant of set_color(self, color) using UK spelling."""
color = self._colortranslator.translate(colour)
self.color = color
def set_color(self, color):
""" set_color(self, color)
o color The color to draw the feature - either a colors.Color
object, an RGB tuple of floats, or an integer
corresponding to colors in colors.txt
Set the color in which the feature will be drawn
"""
#TODO - Make this into the set method for a color property?
color = self._colortranslator.translate(color)
self.color = color
def __getattr__(self, name):
""" __getattr__(self, name) -> various
If the Feature class doesn't have the attribute called for,
check in self._feature for it
"""
return getattr(self._feature, name) # try to get the attribute from the feature
################################################################################
# RUN AS SCRIPT
################################################################################
if __name__ == '__main__':
# Test code
gdf = Feature()
|
py | 1a379e25d97bf570a01bb8b335f25bbabc7e498d | """
Functions to construct models that use LSTM layers to process prong inputs.
"""
from keras.layers import LSTM, Concatenate
from keras.models import Model
from lstm_ee.consts import DEF_MASK
from .funcs import (
get_inputs, get_outputs, modify_layer, modify_series_layer,
add_hidden_layers, add_hidden_series_layers, add_resblocks,
add_stack_of_lstms
)
def make_standard_lstm_branch(
branch_label, input_layer, hidden_layers_spec, lstm_units, batchnorm,
dropout, reg, lstm_kwargs = None, mask_value = DEF_MASK
):
"""Create the default block of layers to process sequential inputs.
Parameters
----------
branch_label : str
Suffix that will be added to layer names.
input_layer : keras.Layer
Layer on top of which new will be added.
hidden_layers_spec : list of int
List of Dense layer sizes that will be used to preprocess inputs before
feeding them to the LSTM layer.
lstm_units : int
Number of units that the LSTM layer will have.
batchnorm : bool or None
If True then the BatchNorm layers will be used to normalize
activations.
dropout : float or None
If not None then Dropout layers with `dropout` value of dropout will
be added to regularize activations.
reg : keras.Regularizer or None
`keras` regularizer to use.
lstm_kwargs : dict or None, optional
Additional arguments to be passed to the LSTM layer constructor.
mask_value : float, optional
In sequential data missing values were padded by some value.
The `mask_value` parameter specified that value.
Default: DEF_MASK
Returns
-------
keras.Layer
Last layer that was added on top of `input_layer`
See Also
--------
make_stacked_lstm_branch
add_hidden_series_layers
"""
lstm_kwargs = lstm_kwargs or {}
input_layer = modify_series_layer(
input_layer, 'input_%s' % (branch_label),
mask = True, batchnorm = batchnorm, mask_value = mask_value
)
layer_hidden_pre = add_hidden_series_layers(
input_layer, hidden_layers_spec, "hidden_pre_%s" % (branch_label),
batchnorm, dropout,
activation = 'relu',
kernel_regularizer = reg,
)
layer_lstm = LSTM(
lstm_units, kernel_regularizer = reg, recurrent_regularizer = reg,
**lstm_kwargs
)(layer_hidden_pre)
return layer_lstm
def make_stacked_lstm_branch(
branch_label, input_layer, hidden_layers_spec, lstm_spec, batchnorm,
dropout, reg, lstm_kwargs = None, mask_value = DEF_MASK
):
"""Create a stack of LSTMs to process sequential inputs.
Parameters
----------
branch_label : str
Suffix that will be added to layer names.
input_layer : keras.Layer
Layer on top of which new will be added.
hidden_layers_spec : list of int
List of Dense layer sizes that will be used to preprocess inputs before
feeding them to the LSTM layer.
lstm_spec : list of (int, str)
List of pairs that specify number of units and directions of LSTM
layers to be added. C.f. `add_stack_of_lstms`
batchnorm : bool or None
If True then the BatchNorm layers will be used to normalize
activations.
dropout : float or None
If not None then Dropout layers with `dropout` value of dropout will
be added to regularize activations.
reg : keras.Regularizer or None
`keras` regularizer to use.
lstm_kwargs : dict or None
Additional arguments to be passed to the LSTM layers constructors.
mask_value : float, optional
In sequential data missing values were padded by some value.
The `mask_value` parameter specified that value.
Default: DEF_MASK
Returns
-------
keras.Layer
Last layer that was added on top of `input_layer`
See Also
--------
make_standard_lstm_branch
add_stack_of_lstms
"""
lstm_kwargs = lstm_kwargs or {}
input_layer = modify_series_layer(
input_layer, 'input_%s' % (branch_label),
mask = True, batchnorm = batchnorm, mask_value = mask_value
)
layer_hidden_pre = add_hidden_series_layers(
input_layer, hidden_layers_spec, "hidden_pre_%s" % (branch_label),
batchnorm, dropout,
activation = 'relu',
kernel_regularizer = reg,
)
layer_lstm = add_stack_of_lstms(
layer_hidden_pre, lstm_spec,
'lstm_%s' % (branch_label), batchnorm, dropout,
recurrent_regularizer = reg, kernel_regularizer = reg,
**lstm_kwargs
)
return layer_lstm
def make_standard_postprocess_branch(
input_layer, hidden_layers_spec, batchnorm, dropout, reg, n_resblocks
):
"""Create the default postprocessing block of layers.
Parameters
----------
input_layer : keras.Layer
Layer on top of which new will be added.
hidden_layers_spec : list of int
List of Dense layer sizes that will be used to postprocess LSTM layer
outputs.
batchnorm : bool or None
If True then the BatchNorm layers will be used to normalize
activations.
dropout : float or None
If not None then Dropout layers with `dropout` value of dropout will
be added to regularize activations.
reg : keras.Regularizer or None
`keras` regularizer to use.
n_resblocks : int or None
Number of Dense residual block layers to be added after the last Dense
layer.
Returns
-------
keras.Layer
Last layer that was added on top of `input_layer`
See Also
--------
make_standard_lstm_branch
make_stacked_lstm_branch
add_hidden_layers
add_resblocks
"""
layer_hidden_post = add_hidden_layers(
input_layer, hidden_layers_spec, "hidden_post", batchnorm, dropout,
activation = 'relu', kernel_regularizer = reg,
)
layer_resnet = add_resblocks(
layer_hidden_post, n_resblocks, 'resblocks', kernel_regularizer = reg
)
return layer_resnet
def model_lstm_v1(
lstm_units = 16,
max_prongs = 5,
reg = None,
batchnorm = False,
vars_input_slice = None,
vars_input_png3d = None,
vars_input_png2d = None,
var_target_total = None,
var_target_primary = None
):
"""Create version 1 LSTM network.
This is the vanilla network that Alexander Radovic used.
This network uses only 3D prong and slice level inputs and limits the
number of prongs to 5.
No input preprocessing or output postprocessing is done.
Parameters
----------
lstm_units : int, optional
Number of units that LSTM layer will have. Default: 16.
max_prongs : int or None, optional
Limit on the number of prongs that will be used. Default: 5.
reg : keras.Regularizer or None, optional
Regularization to use. Default: None
batchnorm : bool or None, optional
Whether to use Batch Normalization. Default: False.
vars_input_slice : list of str or None
List of slice level input variable names.
vars_input_png3d : list of str or None
List of 3D prong level input variable names.
vars_input_png2d : None
List of 2D prong level input variable names.
This is dummy input variable and MUST be None.
var_target_total : str or None
Name of the variable that defines true total energy.
var_target_primary : str or None
Name of the variable that defines true primary energy.
Returns
-------
keras.Model
Model that defines the network.
See Also
--------
model_lstm_v2
model_lstm_v3
"""
assert(vars_input_png2d is None)
inputs = get_inputs(vars_input_slice, vars_input_png3d, None, max_prongs)
# pylint: disable=unbalanced-tuple-unpacking
input_slc, input_png = inputs
input_png = modify_series_layer(
input_png, 'input_png', mask = True, batchnorm = batchnorm
)
layer_png_1 = LSTM(
lstm_units, kernel_regularizer = reg, recurrent_regularizer = reg,
)(input_png)
layer_merged = Concatenate()([ layer_png_1, input_slc ])
layer_merged = modify_layer(layer_merged, 'layer_merged', batchnorm)
outputs = get_outputs(
var_target_total, var_target_primary, reg, layer_merged
)
model = Model(inputs = inputs, outputs = outputs)
return model
def model_lstm_v2(
lstm_units = 16,
layers_pre = [],
layers_post = [],
n_resblocks = None,
max_prongs = 5,
reg = None,
batchnorm = False,
dropout = None,
vars_input_slice = None,
vars_input_png3d = None,
vars_input_png2d = None,
var_target_total = None,
var_target_primary = None
):
"""Create version 2 LSTM network.
This is a modification of the vanilla network that Alexander Radovic used.
This network also uses only 3D prong and slice level inputs.
However, it does LSTM input preprocessing and output postprocessing.
Parameters
----------
lstm_units : int, optional
Number of units that LSTM layer will have. Default: 16.
layers_pre : list of int
List of Dense layer sizes that will be used to preprocess prong inputs.
layers_post : list of int
List of Dense layer sizes that will be used to postprocess LSTM
outputs.
n_resblocks : int or None, optional
Number of the fully connected residual blocks to be added before the
output layer. Default: None
max_prongs : int or None, optional
Limit on the number of prongs that will be used. Default: 5.
reg : keras.Regularizer or None, optional
Regularization to use. Default: None
batchnorm : bool or None, optional
Whether to use Batch Normalization. Default: False.
dropout : float or None
If not None then Dropout layers with `dropout` value of dropout will
be added to regularize activations.
vars_input_slice : list of str or None
List of slice level input variable names.
vars_input_png3d : list of str or None
List of 3D prong level input variable names.
vars_input_png2d : None
List of 2D prong level input variable names.
This is dummy input variable and MUST be None.
var_target_total : str or None
Name of the variable that defines true total energy.
var_target_primary : str or None
Name of the variable that defines true primary energy.
Returns
-------
keras.Model
Model that defines the network.
See Also
--------
model_lstm_v1
model_lstm_v3
"""
# pylint: disable=dangerous-default-value
assert(vars_input_png2d is None)
inputs = get_inputs(
vars_input_slice, vars_input_png3d, vars_input_png2d, max_prongs
)
# pylint: disable=unbalanced-tuple-unpacking
input_slc, input_png = inputs
layer_png_1 = make_standard_lstm_branch(
'png', input_png, layers_pre, lstm_units, batchnorm, dropout, reg
)
layer_merged = Concatenate()([ layer_png_1, input_slc ])
layer_merged = modify_layer(layer_merged, 'layer_merged', batchnorm)
layer_post = make_standard_postprocess_branch(
layer_merged, layers_post, batchnorm, dropout, reg, n_resblocks
)
outputs = get_outputs(
var_target_total, var_target_primary, reg, layer_post
)
return Model(inputs = inputs, outputs = outputs)
def model_lstm_v3(
lstm_units3d = 16,
lstm_units2d = 16,
layers_pre = [],
layers_post = [],
n_resblocks = 0,
max_prongs = None,
reg = None,
batchnorm = False,
dropout = None,
vars_input_slice = None,
vars_input_png3d = None,
vars_input_png2d = None,
var_target_total = None,
var_target_primary = None,
lstm_kwargs = None
):
"""Create version 3 LSTM network.
This is the latest revision of the LSTM network:
- It uses both 2D and 3D prong level inputs
- It relies on a heavy input preprocessing and postprocessing.
Parameters
----------
lstm_units3d : int
Number of units that LSTM layer that processes 3D prongs will have.
Default: 16.
lstm_units2d : int
Number of units that LSTM layer that processes 2D prongs will have.
Default: 16.
layers_pre : list of int
List of Dense layer sizes that will be used to preprocess prong inputs.
Same Dense layer configuration will be used for 2D and 3D level
prong inputs.
layers_post : list of int
List of Dense layer sizes that will be used to postprocess LSTM
outputs.
n_resblocks : int or None, optional
Number of the fully connected residual blocks to be added before the
output layer. Default: None
max_prongs : int or None, optional
Limit on the number of prongs that will be used. Default: None.
reg : keras.Regularizer or None, optional
Regularization to use. Default: None
batchnorm : bool or None, optional
Whether to use Batch Normalization. Default: False.
dropout : float or None
If not None then Dropout layers with `dropout` value of dropout will
be added to regularize activations.
vars_input_slice : list of str or None
List of slice level input variable names.
vars_input_png3d : list of str or None
List of 3D prong level input variable names.
vars_input_png2d : None
List of 2D prong level input variable names.
This is dummy input variable and MUST be None.
var_target_total : str or None
Name of the variable that defines true total energy.
var_target_primary : str or None
Name of the variable that defines true primary energy.
lstm_kwargs : dict or None, optional
Extra arguments that will be passed to the LSTM layer constructors.
Default: None
Returns
-------
keras.Model
Model that defines the network.
See Also
--------
model_lstm_v1
model_lstm_v2
model_lstm_v3_stack
"""
# pylint: disable=dangerous-default-value
inputs = get_inputs(
vars_input_slice, vars_input_png3d, vars_input_png2d, max_prongs
)
# pylint: disable=unbalanced-tuple-unpacking
input_slice, input_png3d, input_png2d = inputs
layer_lstm_png3d = make_standard_lstm_branch(
'png3d', input_png3d, layers_pre, lstm_units3d,
batchnorm, dropout, reg, lstm_kwargs
)
layer_lstm_png2d = make_standard_lstm_branch(
'png2d', input_png2d, layers_pre, lstm_units2d,
batchnorm, dropout, reg, lstm_kwargs
)
layer_merged = Concatenate()([
layer_lstm_png3d, layer_lstm_png2d, input_slice
])
layer_merged = modify_layer(layer_merged, 'layer_merged', batchnorm)
layer_post = make_standard_postprocess_branch(
layer_merged, layers_post, batchnorm, dropout, reg, n_resblocks
)
outputs = get_outputs(
var_target_total, var_target_primary, reg, layer_post
)
return Model(inputs = inputs, outputs = outputs)
def model_lstm_v3_stack(
lstm3d_spec = [ (32, 'forward'), ],
lstm2d_spec = [ (32, 'forward'), ],
layers_pre = [],
layers_post = [],
n_resblocks = 0,
max_prongs = None,
reg = None,
batchnorm = False,
dropout = None,
vars_input_slice = None,
vars_input_png3d = None,
vars_input_png2d = None,
var_target_total = None,
var_target_primary = None,
lstm_kwargs = None
):
"""Create version 3 LSTM network that supports stacks of LSTM layers.
Parameters
----------
lstm3d_spec : list of (int, str)
List of pairs that specify number of units and directions of LSTM
layers that will be used to process 3D prongs.
C.f. `add_stack_of_lstms`
lstm2d_spec : list of (int, str)
List of pairs that specify number of units and directions of LSTM
layers that will be used to process 2D prongs.
C.f. `add_stack_of_lstms`
layers_pre : list of int
List of Dense layer sizes that will be used to preprocess prong inputs.
Same Dense layer configuration will be used for 2D and 3D level
prong inputs.
layers_post : list of int
List of Dense layer sizes that will be used to postprocess LSTM
outputs.
n_resblocks : int or None, optional
Number of the fully connected residual blocks to be added before the
output layer. Default: None
max_prongs : int or None, optional
Limit on the number of prongs that will be used. Default: None.
reg : keras.Regularizer or None, optional
Regularization to use. Default: None
batchnorm : bool or None, optional
Whether to use Batch Normalization. Default: False.
dropout : float or None
If not None then Dropout layers with `dropout` value of dropout will
be added to regularize activations.
vars_input_slice : list of str or None
List of slice level input variable names.
vars_input_png3d : list of str or None
List of 3D prong level input variable names.
vars_input_png2d : None
List of 2D prong level input variable names.
This is dummy input variable and MUST be None.
var_target_total : str or None
Name of the variable that defines true total energy.
var_target_primary : str or None
Name of the variable that defines true primary energy.
lstm_kwargs : dict or None, optional
Extra arguments that will be passed to the LSTM layer constructors.
Default: None
Returns
-------
keras.Model
Model that defines the network.
See Also
--------
model_lstm_v3
"""
# pylint: disable=dangerous-default-value
inputs = get_inputs(
vars_input_slice, vars_input_png3d, vars_input_png2d, max_prongs
)
# pylint: disable=unbalanced-tuple-unpacking
input_slice, input_png3d, input_png2d = inputs
layer_lstm_png3d = make_stacked_lstm_branch(
'png3d', input_png3d, layers_pre, lstm3d_spec,
batchnorm, dropout, reg, lstm_kwargs
)
layer_lstm_png2d = make_stacked_lstm_branch(
'png2d', input_png2d, layers_pre, lstm2d_spec,
batchnorm, dropout, reg, lstm_kwargs
)
layer_merged = Concatenate()([
layer_lstm_png3d, layer_lstm_png2d, input_slice
])
layer_merged = modify_layer(layer_merged, 'layer_merged', batchnorm)
layer_post = make_standard_postprocess_branch(
layer_merged, layers_post, batchnorm, dropout, reg, n_resblocks
)
outputs = get_outputs(
var_target_total, var_target_primary, reg, layer_post
)
return Model(inputs = inputs, outputs = outputs)
|
py | 1a379e54e17d534df1be8c11db3f46edc28d3c23 | # -*- coding: utf-8 -*-
"""
Setup
-----
Install troposphere in the current python environment.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
# ---- Future
from __future__ import print_function
from __future__ import with_statement
# ---- System
import os
from setuptools import setup
# ----------------------------------------------------------------------------
# Helper Functions
# ----------------------------------------------------------------------------
def file_contents(file_name):
"""Given a file name to a valid file returns the file object."""
curr_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(curr_dir, file_name)) as the_file:
contents = the_file.read()
return contents
def get_version():
curr_dir = os.path.abspath(os.path.dirname(__file__))
with open(curr_dir + "/troposphere/__init__.py", "r") as init_version:
for line in init_version:
if "__version__" in line:
return str(line.split("=")[-1].strip(" ")[1:-2])
# ----------------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------------
setup(
name='troposphere',
version=get_version(),
description="AWS CloudFormation creation library",
long_description=file_contents("README.rst"),
long_description_content_type='text/x-rst',
author="Mark Peek",
author_email="[email protected]",
license="New BSD license",
url="https://github.com/cloudtools/troposphere",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 2.7",
],
packages=[
'troposphere',
'troposphere.openstack',
'troposphere.helpers'
],
scripts=[
'scripts/cfn',
'scripts/cfn2py'
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=file_contents("requirements.txt"),
test_suite="tests",
tests_require=["awacs>=0.8"],
extras_require={'policy': ['awacs>=0.8']},
use_2to3=True,
)
|
py | 1a379eba622c15fe049a7f49d2678b500eb6f5b6 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
from pyspark import RDD, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc, JavaModelWrapper
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = [
"DecisionTreeModel",
"DecisionTree",
"RandomForestModel",
"RandomForest",
"GradientBoostedTreesModel",
"GradientBoostedTrees",
]
class TreeEnsembleModel(JavaModelWrapper, JavaSaveable):
"""TreeEnsembleModel
.. versionadded:: 1.3.0
"""
def predict(self, x):
"""
Predict values for a single data point or an RDD of points using
the model trained.
.. versionadded:: 1.3.0
Notes
-----
In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.3.0")
def numTrees(self):
"""
Get number of trees in ensemble.
"""
return self.call("numTrees")
@since("1.3.0")
def totalNumNodes(self):
"""
Get total number of nodes, summed over all trees in the ensemble.
"""
return self.call("totalNumNodes")
def __repr__(self):
"""Summary of model"""
return self._java_model.toString()
@since("1.3.0")
def toDebugString(self):
"""Full model"""
return self._java_model.toDebugString()
class DecisionTreeModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A decision tree model for classification or regression.
.. versionadded:: 1.1.0
"""
def predict(self, x):
"""
Predict the label of one or more examples.
.. versionadded:: 1.1.0
Parameters
----------
x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Data point (feature vector), or an RDD of data points (feature
vectors).
Notes
-----
In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.1.0")
def numNodes(self):
"""Get number of nodes in tree, including leaf nodes."""
return self._java_model.numNodes()
@since("1.1.0")
def depth(self):
"""
Get depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
"""
return self._java_model.depth()
def __repr__(self):
"""summary of model."""
return self._java_model.toString()
@since("1.2.0")
def toDebugString(self):
"""full model."""
return self._java_model.toDebugString()
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.DecisionTreeModel"
class DecisionTree(object):
"""
Learning algorithm for a decision tree model for classification or
regression.
.. versionadded:: 1.1.0
"""
@classmethod
def _train(
cls,
data,
type,
numClasses,
features,
impurity="gini",
maxDepth=5,
maxBins=32,
minInstancesPerNode=1,
minInfoGain=0.0,
):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc(
"trainDecisionTreeModel",
data,
type,
numClasses,
features,
impurity,
maxDepth,
maxBins,
minInstancesPerNode,
minInfoGain,
)
return DecisionTreeModel(model)
@classmethod
def trainClassifier(
cls,
data,
numClasses,
categoricalFeaturesInfo,
impurity="gini",
maxDepth=5,
maxBins=32,
minInstancesPerNode=1,
minInfoGain=0.0,
):
"""
Train a decision tree model for classification.
.. versionadded:: 1.1.0
Parameters
----------
data : :py:class:`pyspark.RDD`
Training data: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
numClasses : int
Number of classes for classification.
categoricalFeaturesInfo : dict
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
impurity : str, optional
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
maxDepth : int, optional
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
maxBins : int, optional
Number of bins used for finding splits at each node.
(default: 32)
minInstancesPerNode : int, optional
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
minInfoGain : float, optional
Minimum info gain required to create a split.
(default: 0.0)
Returns
-------
:py:class:`DecisionTreeModel`
Examples
--------
>>> from numpy import array
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {})
>>> print(model)
DecisionTreeModel classifier of depth 1 with 3 nodes
>>> print(model.toDebugString())
DecisionTreeModel classifier of depth 1 with 3 nodes
If (feature 0 <= 0.5)
Predict: 0.0
Else (feature 0 > 0.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict(array([1.0]))
1.0
>>> model.predict(array([0.0]))
0.0
>>> rdd = sc.parallelize([[1.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(
data,
"classification",
numClasses,
categoricalFeaturesInfo,
impurity,
maxDepth,
maxBins,
minInstancesPerNode,
minInfoGain,
)
@classmethod
@since("1.1.0")
def trainRegressor(
cls,
data,
categoricalFeaturesInfo,
impurity="variance",
maxDepth=5,
maxBins=32,
minInstancesPerNode=1,
minInfoGain=0.0,
):
"""
Train a decision tree model for regression.
Parameters
----------
data : :py:class:`pyspark.RDD`
Training data: RDD of LabeledPoint. Labels are real numbers.
categoricalFeaturesInfo : dict
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
impurity : str, optional
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
maxDepth : int, optional
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
maxBins : int, optional
Number of bins used for finding splits at each node.
(default: 32)
minInstancesPerNode : int, optional
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
minInfoGain : float, optional
Minimum info gain required to create a split.
(default: 0.0)
Returns
-------
:py:class:`DecisionTreeModel`
Examples
--------
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(
data,
"regression",
0,
categoricalFeaturesInfo,
impurity,
maxDepth,
maxBins,
minInstancesPerNode,
minInfoGain,
)
@inherit_doc
class RandomForestModel(TreeEnsembleModel, JavaLoader):
"""
Represents a random forest model.
.. versionadded:: 1.2.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.RandomForestModel"
class RandomForest(object):
"""
Learning algorithm for a random forest model for classification or
regression.
.. versionadded:: 1.2.0
"""
supportedFeatureSubsetStrategies = ("auto", "all", "sqrt", "log2", "onethird")
@classmethod
def _train(
cls,
data,
algo,
numClasses,
categoricalFeaturesInfo,
numTrees,
featureSubsetStrategy,
impurity,
maxDepth,
maxBins,
seed,
):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
if featureSubsetStrategy not in cls.supportedFeatureSubsetStrategies:
raise ValueError("unsupported featureSubsetStrategy: %s" % featureSubsetStrategy)
if seed is None:
seed = random.randint(0, 1 << 30)
model = callMLlibFunc(
"trainRandomForestModel",
data,
algo,
numClasses,
categoricalFeaturesInfo,
numTrees,
featureSubsetStrategy,
impurity,
maxDepth,
maxBins,
seed,
)
return RandomForestModel(model)
@classmethod
def trainClassifier(
cls,
data,
numClasses,
categoricalFeaturesInfo,
numTrees,
featureSubsetStrategy="auto",
impurity="gini",
maxDepth=4,
maxBins=32,
seed=None,
):
"""
Train a random forest model for binary or multiclass
classification.
.. versionadded:: 1.2.0
Parameters
----------
data : :py:class:`pyspark.RDD`
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
numClasses : int
Number of classes for classification.
categoricalFeaturesInfo : dict
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
numTrees : int
Number of trees in the random forest.
featureSubsetStrategy : str, optional
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
impurity : str, optional
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
maxDepth : int, optional
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
maxBins : int, optional
Maximum number of bins used for splitting features.
(default: 32)
seed : int, Optional
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
Returns
-------
:py:class:`RandomForestModel`
that can be used for prediction.
Examples
--------
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(
data,
"classification",
numClasses,
categoricalFeaturesInfo,
numTrees,
featureSubsetStrategy,
impurity,
maxDepth,
maxBins,
seed,
)
@classmethod
def trainRegressor(
cls,
data,
categoricalFeaturesInfo,
numTrees,
featureSubsetStrategy="auto",
impurity="variance",
maxDepth=4,
maxBins=32,
seed=None,
):
"""
Train a random forest model for regression.
.. versionadded:: 1.2.0
Parameters
----------
data : :py:class:`pyspark.RDD`
Training dataset: RDD of LabeledPoint. Labels are real numbers.
categoricalFeaturesInfo : dict
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
numTrees : int
Number of trees in the random forest.
featureSubsetStrategy : str, optional
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
- if numTrees == 1, set to "all";
- if numTrees > 1 (forest) set to "onethird" for regression.
(default: "auto")
impurity : str, optional
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
maxDepth : int, optional
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
maxBins : int, optional
Maximum number of bins used for splitting features.
(default: 32)
seed : int, optional
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
Returns
-------
:py:class:`RandomForestModel`
that can be used for prediction.
Examples
--------
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42)
>>> model.numTrees()
2
>>> model.totalNumNodes()
4
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.5
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.5]
"""
return cls._train(
data,
"regression",
0,
categoricalFeaturesInfo,
numTrees,
featureSubsetStrategy,
impurity,
maxDepth,
maxBins,
seed,
)
@inherit_doc
class GradientBoostedTreesModel(TreeEnsembleModel, JavaLoader):
"""
Represents a gradient-boosted tree model.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.GradientBoostedTreesModel"
class GradientBoostedTrees(object):
"""
Learning algorithm for a gradient boosted trees model for
classification or regression.
.. versionadded:: 1.3.0
"""
@classmethod
def _train(
cls,
data,
algo,
categoricalFeaturesInfo,
loss,
numIterations,
learningRate,
maxDepth,
maxBins,
):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc(
"trainGradientBoostedTreesModel",
data,
algo,
categoricalFeaturesInfo,
loss,
numIterations,
learningRate,
maxDepth,
maxBins,
)
return GradientBoostedTreesModel(model)
@classmethod
def trainClassifier(
cls,
data,
categoricalFeaturesInfo,
loss="logLoss",
numIterations=100,
learningRate=0.1,
maxDepth=3,
maxBins=32,
):
"""
Train a gradient-boosted trees model for classification.
.. versionadded:: 1.3.0
Parameters
----------
data : :py:class:`pyspark.RDD`
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1}.
categoricalFeaturesInfo : dict
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
loss : str, optional
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "logLoss")
numIterations : int, optional
Number of iterations of boosting.
(default: 100)
learningRate : float, optional
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
maxDepth : int, optional
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
maxBins : int, optional
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
Returns
-------
:py:class:`GradientBoostedTreesModel`
that can be used for prediction.
Examples
--------
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>>
>>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
30
>>> print(model) # it already has newline
TreeEnsembleModel classifier with 10 trees
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[2.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(
data,
"classification",
categoricalFeaturesInfo,
loss,
numIterations,
learningRate,
maxDepth,
maxBins,
)
@classmethod
def trainRegressor(
cls,
data,
categoricalFeaturesInfo,
loss="leastSquaresError",
numIterations=100,
learningRate=0.1,
maxDepth=3,
maxBins=32,
):
"""
Train a gradient-boosted trees model for regression.
.. versionadded:: 1.3.0
Parameters
----------
data :
Training dataset: RDD of LabeledPoint. Labels are real numbers.
categoricalFeaturesInfo : dict
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
loss : str, optional
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "leastSquaresError")
numIterations : int, optional
Number of iterations of boosting.
(default: 100)
learningRate : float, optional
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
maxDepth : int, optional
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
maxBins : int, optional
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
Returns
-------
:py:class:`GradientBoostedTreesModel`
that can be used for prediction.
Examples
--------
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> data = sc.parallelize(sparse_data)
>>> model = GradientBoostedTrees.trainRegressor(data, {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
12
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(
data,
"regression",
categoricalFeaturesInfo,
loss,
numIterations,
learningRate,
maxDepth,
maxBins,
)
def _test():
import doctest
globs = globals().copy()
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[4]").appName("mllib.tree tests").getOrCreate()
globs["sc"] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
py | 1a379f6c8aeaf2934bbdfb982f4fcfa5fc932656 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
|
py | 1a379f8d9d2dcb3f9ad74646b3c92a7124125aa2 | subBloco = [[],[],[],[],[],[],[],[],[]]
for l in range(9):
for c in range(9):
n = int(input(f"Digite o valor [{l}][{c}] "))
if n >= 0 and n < 10:
if l <= 2:
if c >= 0 and c <= 2:
subBloco[0].append(n)
if c >= 3 and c <= 5:
subBloco[1].append(n)
if c >= 6:
subBloco[2].append(n)
elif l > 2 and l <= 5:
if c >= 0 and c <= 2:
subBloco[3].append(n)
if c >= 3 and c <= 5:
subBloco[4].append(n)
if c >= 6:
subBloco[5].append(n)
if l > 5 and l <= 8:
if c >= 0 and c <= 2:
subBloco[6].append(n)
if c >= 3 and c <= 5:
subBloco[7].append(n)
if c >= 6:
subBloco[8].append(n)
for i in range(len(subBloco)):
print(subBloco[i])
# ============ Parte 02 =============
celula = []
while True:
if 0 not in subBloco:
break
else:
for cont in range(9):
for i in range(1,9):
if i in subBloco[cont]:
print(f'{i} faz parte do Sub-bloco {cont}')
else:
for l in range(9):
for c in range(9):
if i not in subBloco[c]:
print(f'{i} NãO está na linha')
if i not in subBloco[l]:
print(f'{i} não está na coluna')
break |
py | 1a379ffe4852a2cc4294ad8b82ab999dd6f79262 | # Copyright 2019-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions for constructing/calculating the means, variances and covariances of
Gaussian states.
"""
from itertools import product
from scipy.special import factorial
import numpy as np
from .._hafnian import hafnian, reduction
from .._torontonian import threshold_detection_prob
from .conversions import (
reduced_gaussian,
Qmat,
Xmat,
complex_to_real_displacements
)
def photon_number_mean(mu, cov, j, hbar=2):
r""" Calculate the mean photon number of mode j of a Gaussian state.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
j (int): the j :sup:`th` mode
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
float: the mean photon number in mode :math:`j`.
"""
num_modes = len(mu) // 2
return (
mu[j] ** 2
+ mu[j + num_modes] ** 2
+ cov[j, j]
+ cov[j + num_modes, j + num_modes]
- hbar
) / (2 * hbar)
def photon_number_mean_vector(mu, cov, hbar=2):
r""" Calculate the mean photon number of each of the modes in a Gaussian state
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
array: the vector of means of the photon number distribution
"""
N = len(mu) // 2
return np.array([photon_number_mean(mu, cov, j, hbar=hbar) for j in range(N)])
def photon_number_covar(mu, cov, j, k, hbar=2):
r""" Calculate the variance/covariance of the photon number distribution
of a Gaussian state.
Implements the covariance matrix of the photon number distribution of a
Gaussian state according to the Last two eq. of Part II. in
`'Multidimensional Hermite polynomials and photon distribution for polymode
mixed light', Dodonov et al. <https://journals.aps.org/pra/abstract/10.1103/PhysRevA.50.813>`_
.. math::
\sigma_{n_j n_j} &= \frac{1}{2}\left(T_j^2 - 2d_j - \frac{1}{2}\right)
+ \left<\mathbf{Q}_j\right>\mathcal{M}_j\left<\mathbf{Q}_j\right>, \\
\sigma_{n_j n_k} &= \frac{1}{2}\mathrm{Tr}\left(\Lambda_j \mathbf{M} \Lambda_k \mathbf{M}\right)
+ \left<\mathbf{Q}\right>\Lambda_j \mathbf{M} \Lambda_k\left<\mathbf{Q}\right>,
where :math:`T_j` and :math:`d_j` are the trace and the determinant of
:math:`2 \times 2` matrix :math:`\mathcal{M}_j` whose elements coincide
with the nonzero elements of matrix :math:`\mathbf{M}_j = \Lambda_j \mathbf{M} \Lambda_k`
while the two-vector :math:`\mathbf{Q}_j` has the components :math:`(q_j, p_j)`.
:math:`2N \times 2N` projector matrix :math:`\Lambda_j` has only two nonzero
elements: :math:`\left(\Lambda_j\right)_{jj} = \left(\Lambda_j\right)_{j+N,j+N} = 1`.
Note that the convention for ``mu`` used here differs from the one used in Dodonov et al.,
They both provide the same results in this particular case.
Also note that the original reference of Dodonov et al. has an incorrect prefactor of 1/2
in the last terms of the last equation above.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
j (int): the j :sup:`th` mode
k (int): the k :sup:`th` mode
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
float: the covariance for the photon numbers at modes :math:`j` and :math:`k`.
"""
if j == k:
mu, cov = reduced_gaussian(mu, cov, [j])
term_1 = 0.5 * np.trace(cov) ** 2 - np.linalg.det(cov)
term_2 = mu @ cov @ mu
return ((term_1 + term_2) / hbar ** 2) - 0.25
mu, cov = reduced_gaussian(mu, cov, [j, k])
term_1 = cov[0, 1] ** 2 + cov[0, 3] ** 2 + cov[2, 1] ** 2 + cov[2, 3] ** 2
term_2 = (
cov[0, 1] * mu[0] * mu[1]
+ cov[2, 1] * mu[1] * mu[2]
+ cov[0, 3] * mu[0] * mu[3]
+ cov[2, 3] * mu[2] * mu[3]
)
return (term_1 + 2 * term_2) / (2 * hbar ** 2)
def photon_number_covmat(mu, cov, hbar=2):
r""" Calculate the covariance matrix of the photon number distribution of a
Gaussian state.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
array: the covariance matrix of the photon number distribution
"""
N = len(mu) // 2
pnd_cov = np.zeros((N, N))
for i in range(N):
for j in range(i+1):
pnd_cov[i][j] = photon_number_covar(mu, cov, i, j, hbar=hbar)
pnd_cov[j][i] = pnd_cov[i][j]
return pnd_cov
def photon_number_expectation(mu, cov, modes, hbar=2):
r"""Calculates the expectation value of the product of the number operator of the modes in a Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list): list of modes
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the product of the number operators of the modes.
"""
n, _ = cov.shape
n_modes = n // 2
rpt = np.zeros([n], dtype=int)
for i in modes:
rpt[i] = 1
rpt[i + n_modes] = 1
return normal_ordered_expectation(mu, cov, rpt, hbar=hbar)
def photon_number_squared_expectation(mu, cov, modes, hbar=2):
r"""Calculates the expectation value of the square of the product of the number operator of the modes in
a Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list): list of modes
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the square of the product of the number operator of the modes.
"""
n_modes = len(modes)
mu_red, cov_red = reduced_gaussian(mu, cov, modes)
result = 0
for item in product([1, 2], repeat=n_modes):
rpt = item + item
term = normal_ordered_expectation(mu_red, cov_red, rpt, hbar=hbar)
result += term
return result
def normal_ordered_expectation(mu, cov, rpt, hbar=2):
r"""Calculates the expectation value of the normal ordered product
:math:`\prod_{i=0}^{N-1} a_i^{\dagger n_i} \prod_{j=0}^{N-1} a_j^{m_j}` with respect to an N-mode Gaussian state,
where :math:`\text{rpt}=(n_0, n_1, \ldots, n_{N-1}, m_0, m_1, \ldots, m_{N-1})`.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
rpt (list): integers specifying the terms to calculate.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the normal ordered product of operators
"""
return s_ordered_expectation(mu, cov, rpt, hbar, s=1)
def s_ordered_expectation(mu, cov, rpt, hbar=2, s=0):
r"""Calculates the expectation value of the s-ordered product
obtained by taking deirvatives of the characteristic function of a Gaussian states,
Here, :math:`\text{rpt}=(n_0, n_1, \ldots, n_{N-1}, m_0, m_1, \ldots, m_{N-1})`.
indicates how many derivatives are taken with respect to the complex argument and its
conjugate.
The values :math:`s=\{1,0,-1\}` correspond respectively to normal, symmetric and antinormal order.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
rpt (list): integers specifying the terms to calculate.
hbar (float): value of hbar in the uncertainty relation.
s (float): value setting the ordering it must be between -1 and 1.
Returns:
(float): expectation value of the normal ordered product of operators
"""
# The following seven lines are written so that we remove from the calculation the
# modes k that we don't care about. These modes have rpt[k] = rpt[k+M] = 0
if np.allclose(rpt, 0):
return 1.0
M = len(cov) // 2
modes = np.where(np.array(rpt[0:M]) + np.array(rpt[M : 2 * M]) != 0)[0]
mu, cov = reduced_gaussian(mu, cov, list(modes))
ind = list(modes) + list(modes + M)
rpt = list(np.array(rpt)[np.array(ind)])
alpha = complex_to_real_displacements(mu, hbar=hbar)
n = len(cov)
V = (Qmat(cov, hbar=hbar) - 0.5 * (s + 1) * np.identity(n)) @ Xmat(n // 2)
A = reduction(V, rpt)
if np.allclose(mu, 0):
return hafnian(A)
np.fill_diagonal(A, reduction(np.conj(alpha), rpt))
return hafnian(A, loop=True)
def mean_clicks(cov, hbar=2):
r""" Calculates the total mean number of clicks when a zero-mean gaussian state
is measured using threshold detectors.
Args
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`
Returns
float: mean number of clicks
"""
n, _ = cov.shape
nmodes = n // 2
Q = Qmat(cov, hbar=hbar)
meanc = 1.0 * nmodes
for i in range(nmodes):
det_val = np.real(Q[i, i] * Q[i + nmodes, i + nmodes] - Q[i + nmodes, i] * Q[i, i + nmodes])
meanc -= 1.0 / np.sqrt(det_val)
return meanc
def variance_clicks(cov, hbar=2):
r""" Calculates the variance of the total number of clicks when a zero-mean gaussian state
is measured using threshold detectors.
Args
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`
Returns
float: variance in the total number of clicks
"""
n, _ = cov.shape
means = np.zeros([n])
nmodes = n // 2
Q = Qmat(cov, hbar=hbar)
vac_probs = np.array(
[
np.real(Q[i, i] * Q[i + nmodes, i + nmodes] - Q[i + nmodes, i] * Q[i, i + nmodes])
for i in range(nmodes)
]
)
vac_probs = np.sqrt(vac_probs)
vac_probs = 1 / vac_probs
term1 = np.sum(vac_probs * (1 - vac_probs))
term2 = 0
for i in range(nmodes):
for j in range(i):
_, Qij = reduced_gaussian(means, Q, [i, j])
prob_vac_ij = np.linalg.det(Qij).real
prob_vac_ij = 1.0 / np.sqrt(prob_vac_ij)
term2 += prob_vac_ij - vac_probs[i] * vac_probs[j]
return term1 + 2 * term2
def _coeff_normal_ordered(m, k):
r"""Returns the coefficients giving the expansion of a photon number power in terms of normal ordered power of creation
and annihilation operators. The coefficient is given by :math:`\sum_{\mu=0}^k \frac{(-1)^{k-\mu} \mu^m}{\mu!(k-\mu)!}`.
Args:
m (int): power of the photon number operator, :math:`(a^\dagger a)^m `.
k (int): power of the normal ordered term, :math:`a^{\dagger i} a^i`.
Returns:
(float): expansion coefficient
"""
return sum(
[
(1 / (factorial(mu) * factorial(k - mu)))
* ((-1) ** (k - mu) * (mu ** m))
for mu in range(0, k + 1)
]
)
def photon_number_moment(mu, cov, indices, hbar=2):
r"""Calculates the expectation value of product of powers of photon number operators of a Gaussian state.
The powers are specified by a dictionary with modes as keys and powers as values.
The calculation is performed by first writing any power of the photon number as
:math:`(a^\dagger a)^m = \sum_{k=1}^m c_k a^{\dagger k} a^k`
where the coefficients :math:`c_i` are provided by the function `_coeff_normal_ordered`.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
indices (dictionary): specification of the different modes and their power of their photon number
hbar (float): value of hbar in the uncertainty relation.
Returns:
float: the expectation value of the photon number powers.
"""
N = len(cov) // 2
list_indices = [indices[key] for key in indices]
modes = list(indices)
# Find the expansion coefficients of all the different powers
expansion_coeff = [
[_coeff_normal_ordered(indices[key], i) for i in range(1, 1 + indices[key])]
for key in indices
]
values = [list(range(i)) for i in list_indices]
net_sum = 0.0
# Construct the product of each possible term appearing in the normal ordered expansion
for item in product(*values):
rpt = [0] * N
for i, key in enumerate(modes):
rpt[key] = item[i] + 1
rpt = rpt + rpt
prod_coeff = np.prod([expansion_coeff[i][coeff] for i, coeff in enumerate(item)])
net_sum += prod_coeff * s_ordered_expectation(mu, cov, rpt, s=1, hbar=hbar)
return np.real_if_close(net_sum)
def partition(collection):
"""Generate all set partitions of a collection.
Taken from: https://stackoverflow.com/a/30134039
Args:
collection (sequence): set to find partitions of
Yields:
list[list]: set partition of collection
"""
if len(collection) == 1:
yield [collection]
return
first = collection[0]
for smaller in partition(collection[1:]):
for n, subset in enumerate(smaller):
yield smaller[:n] + [[first] + subset] + smaller[n+1:]
yield [[first]] + smaller
def _list_to_freq_dict(words):
"""Convert between a list which of "words" and a dictionary
which shows how many times each word appears in word
Args:
words (list): list of words
Returns:
dict : how many times a word appears. key is word, value is multiplicity
"""
return {i : words.count(i) for i in set(words)}
def photon_number_cumulant(mu, cov, modes, hbar=2):
r"""Calculates the photon-number cumulant of the modes in the Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list or array): list of modes. Note that it can have repetitions.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): the cumulant
"""
modes = list(modes) # turns modes from array to list if passed in as array
kappa = 0
for pi in partition(modes):
size = len(pi)
term = factorial(size - 1) * (-1) ** (size - 1)
for B in pi:
indices = _list_to_freq_dict(B)
term *= photon_number_moment(mu, cov, indices, hbar=hbar)
kappa += term
return kappa
def click_cumulant(mu, cov, modes, hbar=2):
r"""Calculates the click cumulant of the modes in the Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list or array): list of modes.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): the cumulant
"""
modes = list(modes) # turns modes from array to list if passed in as array
kappa = 0
for pi in partition(modes):
size = len(pi)
term = factorial(size - 1) * (-1) ** (size - 1)
for B in pi:
B = list(set(B)) # remove repetitions
pattern = np.ones_like(B)
mu_red, cov_red = reduced_gaussian(mu, cov, B)
summand = threshold_detection_prob(mu_red, cov_red, pattern, hbar=hbar)
term *= summand
kappa += term
return kappa
|
py | 1a37a01da4360126eb2d383d34c4dfa9e66ec1bf | from __future__ import absolute_import
try:
import holoviews as hv
except ImportError:
hv = None
import pytest
from bokeh.plotting import figure
from panel.layout import Row
from panel.links import Link
from panel.pane import Bokeh, HoloViews
from panel.widgets import FloatSlider, RangeSlider, ColorPicker, TextInput, DatetimeInput
from panel.tests.util import hv_available
def test_widget_link_bidirectional():
t1 = TextInput()
t2 = TextInput()
t1.link(t2, value='value', bidirectional=True)
t1.value = 'ABC'
assert t1.value == 'ABC'
assert t2.value == 'ABC'
t2.value = 'DEF'
assert t1.value == 'DEF'
assert t2.value == 'DEF'
def test_widget_jslink_bidirectional(document, comm):
t1 = TextInput()
t2 = TextInput()
t1.jslink(t2, value='value', bidirectional=True)
row = Row(t1, t2)
model = row.get_root(document, comm)
tm1, tm2 = model.children
link1_customjs = tm1.js_property_callbacks['change:value'][-1]
link2_customjs = tm2.js_property_callbacks['change:value'][-1]
assert link1_customjs.args['source'] is tm1
assert link2_customjs.args['source'] is tm2
assert link1_customjs.args['target'] is tm2
assert link2_customjs.args['target'] is tm1
def test_widget_link_source_param_not_found():
t1 = TextInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t1.jslink(t2, value1='value')
assert "Could not jslink \'value1\' parameter" in str(excinfo)
def test_widget_link_target_param_not_found():
t1 = TextInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t1.jslink(t2, value='value1')
assert "Could not jslink \'value1\' parameter" in str(excinfo)
def test_widget_link_no_transform_error():
t1 = DatetimeInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t1.jslink(t2, value='value')
assert "Cannot jslink \'value\' parameter on DatetimeInput object" in str(excinfo)
def test_widget_link_no_target_transform_error():
t1 = DatetimeInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t2.jslink(t1, value='value')
assert ("Cannot jslink \'value\' parameter on TextInput object "
"to \'value\' parameter on DatetimeInput object") in str(excinfo)
@hv_available
def test_pnwidget_hvplot_links(document, comm):
size_widget = FloatSlider(value=5, start=1, end=10)
points1 = hv.Points([1, 2, 3])
size_widget.jslink(points1, value='glyph.size')
row = Row(points1, size_widget)
model = row.get_root(document, comm=comm)
hv_views = row.select(HoloViews)
widg_views = row.select(FloatSlider)
assert len(hv_views) == 1
assert len(widg_views) == 1
slider = widg_views[0]._models[model.ref['id']][0]
scatter = hv_views[0]._plots[model.ref['id']][0].handles['glyph']
link_customjs = slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is slider
assert link_customjs.args['target'] is scatter
code = """
var value = source['value'];
value = value;
value = value;
try {
var property = target.properties['size'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set size on target, raised error: ' + err);
return;
}
try {
target['size'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
@hv_available
def test_bkwidget_hvplot_links(document, comm):
from bokeh.models import Slider
bokeh_widget = Slider(value=5, start=1, end=10, step=1e-1)
points1 = hv.Points([1, 2, 3])
Link(bokeh_widget, points1, properties={'value': 'glyph.size'})
row = Row(points1, bokeh_widget)
model = row.get_root(document, comm=comm)
hv_views = row.select(HoloViews)
assert len(hv_views) == 1
slider = bokeh_widget
scatter = hv_views[0]._plots[model.ref['id']][0].handles['glyph']
link_customjs = slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is slider
assert link_customjs.args['target'] is scatter
code = """
var value = source['value'];
value = value;
value = value;
try {
var property = target.properties['size'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set size on target, raised error: ' + err);
return;
}
try {
target['size'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
def test_bkwidget_bkplot_links(document, comm):
from bokeh.models import Slider
bokeh_widget = Slider(value=5, start=1, end=10, step=1e-1)
bokeh_fig = figure()
scatter = bokeh_fig.scatter([1, 2, 3], [1, 2, 3])
Link(bokeh_widget, scatter, properties={'value': 'glyph.size'})
row = Row(bokeh_fig, bokeh_widget)
row.get_root(document, comm=comm)
slider = bokeh_widget
link_customjs = slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is slider
assert link_customjs.args['target'] is scatter.glyph
code = """
var value = source['value'];
value = value;
value = value;
try {
var property = target.properties['size'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set size on target, raised error: ' + err);
return;
}
try {
target['size'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
def test_widget_bkplot_link(document, comm):
widget = ColorPicker(value='#ff00ff')
bokeh_fig = figure()
scatter = bokeh_fig.scatter([1, 2, 3], [1, 2, 3])
widget.jslink(scatter.glyph, value='fill_color')
row = Row(bokeh_fig, widget)
model = row.get_root(document, comm=comm)
link_customjs = model.children[1].js_property_callbacks['change:color'][-1]
assert link_customjs.args['source'] is model.children[1]
assert link_customjs.args['target'] is scatter.glyph
assert scatter.glyph.fill_color == '#ff00ff'
code = """
var value = source['color'];
value = value;
value = value;
try {
var property = target.properties['fill_color'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set fill_color on target, raised error: ' + err);
return;
}
try {
target['fill_color'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
def test_bokeh_figure_jslink(document, comm):
fig = figure()
pane = Bokeh(fig)
t1 = TextInput()
pane.jslink(t1, **{'x_range.start': 'value'})
row = Row(pane, t1)
model = row.get_root(document, comm)
link_customjs = fig.x_range.js_property_callbacks['change:start'][-1]
assert link_customjs.args['source'] == fig.x_range
assert link_customjs.args['target'] == model.children[1]
assert link_customjs.code == """
var value = source['start'];
value = value;
value = value;
try {
var property = target.properties['value'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set value on target, raised error: ' + err);
return;
}
try {
target['value'] = value;
} catch(err) {
console.log(err)
}
"""
def test_widget_jscallback(document, comm):
widget = ColorPicker(value='#ff00ff')
widget.jscallback(value='some_code')
model = widget.get_root(document, comm=comm)
customjs = model.js_property_callbacks['change:color'][-1]
assert customjs.args['source'] is model
assert customjs.code == "try { some_code } catch(err) { console.log(err) }"
def test_widget_jscallback_args_scalar(document, comm):
widget = ColorPicker(value='#ff00ff')
widget.jscallback(value='some_code', args={'scalar': 1})
model = widget.get_root(document, comm=comm)
customjs = model.js_property_callbacks['change:color'][-1]
assert customjs.args['scalar'] == 1
def test_widget_jscallback_args_model(document, comm):
widget = ColorPicker(value='#ff00ff')
widget2 = ColorPicker(value='#ff00ff')
widget.jscallback(value='some_code', args={'widget': widget2})
model = Row(widget, widget2).get_root(document, comm=comm)
customjs = model.children[0].js_property_callbacks['change:color'][-1]
assert customjs.args['source'] is model.children[0]
assert customjs.args['widget'] is model.children[1]
assert customjs.code == "try { some_code } catch(err) { console.log(err) }"
@hv_available
def test_hvplot_jscallback(document, comm):
points1 = hv.Points([1, 2, 3])
hvplot = HoloViews(points1)
hvplot.jscallback(**{'x_range.start': "some_code"})
model = hvplot.get_root(document, comm=comm)
x_range = hvplot._plots[model.ref['id']][0].handles['x_range']
customjs = x_range.js_property_callbacks['change:start'][-1]
assert customjs.args['source'] is x_range
assert customjs.code == "try { some_code } catch(err) { console.log(err) }"
@hv_available
def test_link_with_customcode(document, comm):
range_widget = RangeSlider(start=0., end=1.)
curve = hv.Curve([])
code = """
x_range.start = source.value[0]
x_range.end = source.value[1]
"""
range_widget.jslink(curve, code={'value': code})
row = Row(curve, range_widget)
range_widget.value = (0.5, 0.7)
model = row.get_root(document, comm=comm)
hv_views = row.select(HoloViews)
widg_views = row.select(RangeSlider)
assert len(hv_views) == 1
assert len(widg_views) == 1
range_slider = widg_views[0]._models[model.ref['id']][0]
x_range = hv_views[0]._plots[model.ref['id']][0].handles['x_range']
link_customjs = range_slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is range_slider
assert link_customjs.args['x_range'] is x_range
assert link_customjs.code == "try { %s } catch(err) { console.log(err) }" % code
|
py | 1a37a1aa1ba96b0e47fad28fe4db1e8dcec1307e |
import uuid
from datetime import datetime
from flasgger import swag_from
from flask import Blueprint, jsonify, request
from cloudinary.uploader import upload
from src.models import Author, Book, UserProfile, db
from src.google import get_user_info
from src.constants.http_status_codes import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_500_INTERNAL_SERVER_ERROR, HTTP_404_NOT_FOUND
book_bp = Blueprint('book', __name__, url_prefix='/api')
@book_bp.post('/books')
@swag_from('../docs/book/create.yml')
def create_book():
file = request.files['image']
# Author
author_first_name = request.form['author_first_name']
author_last_name = request.form['author_last_name']
# Book
title = request.form['title']
isbn = request.form['isbn']
language = request.form['language']
year_of_publication = request.form['year_of_publication']
category = request.form['category']
owner_id = request.form['owner_id']
if not title:
return jsonify({
'error': "book title is required"
}), HTTP_400_BAD_REQUEST
if not language:
return jsonify({
'error': "book language is required"
}), HTTP_400_BAD_REQUEST
if not owner_id:
return jsonify({
'error': "book owner is required"
}), HTTP_400_BAD_REQUEST
owner = UserProfile.query.filter_by(id=owner_id).first()
if not owner:
return jsonify({
'error':f"user with id {owner_id} not found"
}), HTTP_404_NOT_FOUND
if not year_of_publication:
return jsonify({
'error': "year of publication is required"
}), HTTP_400_BAD_REQUEST
if not (author_first_name and author_last_name):
return jsonify({
'error': "author's first and last name is required"
}), HTTP_400_BAD_REQUEST
try:
# Upload image to cloudinary server
cloudinary_response = upload(file, folder="bookie-books")
except Exception as ex:
return({'error':"error uploading image to cloudinary"}, HTTP_500_INTERNAL_SERVER_ERROR)
if not cloudinary_response:
return jsonify({
'error': "error uploading image"
}), HTTP_400_BAD_REQUEST
author = Author(
id=uuid.uuid4(),
first_name=author_first_name,
last_name=author_last_name)
book = Book(
id=uuid.uuid4(),
name=title,
isbn=isbn,
language=language,
year_of_publication=year_of_publication,
category=category,
author_id=author.id,
owner_id=uuid.UUID(owner.id),
image_url = cloudinary_response['secure_url'], # from cloudinary response after successful upload
cld_asset_id=cloudinary_response['asset_id'],
cld_public_id=cloudinary_response['public_id'],
is_available=True,
created_at=datetime.now(),
borrowed=False # Not borrowed on creation
)
db.session.add(author)
db.session.add(book)
db.session.commit()
return {'message':"book created"}, HTTP_201_CREATED
|
py | 1a37a1b00cf5b4d8b0dec10992264c74ab385d10 | # -*- coding: utf-8 -*-
#
# Django RPC documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 6 13:02:10 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
def rel(*x):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
sys.path.insert(0, rel('../example'))
sys.path.insert(0, rel('..'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django import setup as django_setup
django_setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.viewcode', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django RPC'
copyright = u'2012, Dmitriy Kostochko'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoRPCdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoRPC.tex', u'Django RPC Documentation',
u'Dmitriy Kostochko', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangorpc', u'Django RPC Documentation',
[u'Dmitriy Kostochko'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoRPC', u'Django RPC Documentation',
u'Dmitriy Kostochko', 'DjangoRPC', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
autoclass_content = 'init'
|
py | 1a37a202a2658419ee096dc3ce52787f41c92e34 | import argparse
import optparse
import sys
import turtle
from turtle import *
import numpy as np
parser = optparse.OptionParser(description='paint')
parser.add_option('--name', type=str, default='circle',
help='file name')
parser.add_option('--start_length', type=int, default=0, help='number of forwards')
parser.add_option('--end_length', type=int, default=120, help='number of forwards')
parser.add_option('--n_edges', type=int, default=6, help='number_of_edges')
parser.add_option('--pattern', type=int, default=-2, help='index of pattern, e.g. -2, -1, 0, 1, 2, ...')
parser.add_option('--color', type=str, default='monocolor', help='color, e.g. monocolor, red, ...')
parser.add_option('--n_circle', type=int, default=10, help='number of circle')
(opts, args) = parser.parse_args()
argvs = sys.argv
if opts.color == 'colorful':
colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']
else:
colors = [opts.color] * 6
speed(0)
# bgcolor("Black")
def rotation(start_length, end_length, n_edges, pattern, color):
n = 0
c = 0
colormode(255)
for count in range(start_length, end_length):
# decide color
if color == 'monocolor':
c += int(255 / end_length)
# pencolor(np.uint8(-c), np.uint8(-c), np.uint8(-c))
pencolor(np.uint8(c), np.uint8(c), np.uint8(c))
else:
# you can change color as you like here
start = 255
c += int(start / end_length)
# pencolor(np.uint8(start - c), np.uint8(start - c), np.uint8(c))
# pencolor(np.uint8(c), np.uint8(start - c), np.uint8(start - c))
pencolor(np.uint8(c), np.uint8(0), np.uint8(0))
for i in range(n_edges):
if color == 'colorful':
pencolor(colors[i % 6])
forward(count)
left(int(360 / n_edges) + pattern)
n += 1
left(3)
print(count)
window = turtle.Screen()
window.setup(width=600, height=600, startx=10, starty=0.5)
position_list = np.random.randint(-300, 300, size=(opts.n_circle, 2))
np.random.randint(10) * 10
for ii in range(opts.n_circle):
penup()
goto(position_list[ii, 0], position_list[ii, 1])
pendown()
rotation(np.random.randint(10) * 4, np.random.randint(10, 30) * 4, np.random.randint(4, 5), np.random.choice([-1, 1]) * np.random.randint(0, 3), opts.color)
hideturtle()
ts = getscreen()
bgcolor("Black")
ts.getcanvas().postscript(file=f"{opts.name}_{opts.end_length}_{opts.n_edges}_{opts.pattern}_{opts.color}.eps")
print('end')
exit()
|
py | 1a37a24ec9eacf31322bf3d19647b1aa34c39819 | # Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ROS2 Tesla driver."""
import os
import cv2
import numpy as np
import rclpy
from sensor_msgs.msg import Image
from ackermann_msgs.msg import AckermannDrive
from rclpy.qos import qos_profile_sensor_data, QoSReliabilityPolicy
from rclpy.node import Node
CONTROL_COEFFICIENT = 0.0005
class LaneFollower(Node):
def __init__(self):
super().__init__('lane_follower')
# ROS interface
self.__ackermann_publisher = self.create_publisher(AckermannDrive, 'cmd_ackermann', 1)
qos_camera_data = qos_profile_sensor_data
# In case ROS_DISTRO is Rolling or Galactic the QoSReliabilityPolicy is strict.
if ('ROS_DISTRO' in os.environ and (os.environ['ROS_DISTRO'] == 'rolling' or os.environ['ROS_DISTRO'] == 'galactic')):
qos_camera_data.reliability = QoSReliabilityPolicy.RELIABLE
self.create_subscription(Image, 'vehicle/camera', self.__on_camera_image, qos_camera_data)
def __on_camera_image(self, message):
img = message.data
img = np.frombuffer(img, dtype=np.uint8).reshape((message.height, message.width, 4))
img = img[380:420, :]
# Segment the image by color in HSV color space
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(img, np.array([50, 110, 150]), np.array([120, 255, 255]))
# Find the largest segmented contour
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
command_message = AckermannDrive()
command_message.speed = 50.0
command_message.steering_angle = 0.0
if contours:
largest_contour = max(contours, key=cv2.contourArea)
largest_contour_center = cv2.moments(largest_contour)
if largest_contour_center['m00'] != 0:
center_x = int(largest_contour_center['m10'] / largest_contour_center['m00'])
# Find error (the lane distance from the target distance)
error = center_x - 190
command_message.steering_angle = error*CONTROL_COEFFICIENT
self.__ackermann_publisher.publish(command_message)
def main(args=None):
rclpy.init(args=args)
follower = LaneFollower()
rclpy.spin(follower)
rclpy.shutdown()
if __name__ == '__main__':
main()
|
py | 1a37a28f189cbb3c5f2dfb93f77906c90a6fd390 | __author__ = 'vlad' |
py | 1a37a3700d2c2b6890083e5e62c9051202824bc2 | from swsscommon import swsscommon
import time
import json
import random
import time
from pprint import pprint
def create_entry(tbl, key, pairs):
fvs = swsscommon.FieldValuePairs(pairs)
tbl.set(key, fvs)
time.sleep(1)
def create_entry_tbl(db, table, separator, key, pairs):
tbl = swsscommon.Table(db, table)
create_entry(tbl, key, pairs)
def create_entry_pst(db, table, separator, key, pairs):
tbl = swsscommon.ProducerStateTable(db, table)
create_entry(tbl, key, pairs)
def how_many_entries_exist(db, table):
tbl = swsscommon.Table(db, table)
return len(tbl.getKeys())
def entries(db, table):
tbl = swsscommon.Table(db, table)
return set(tbl.getKeys())
def get_exist_entries(dvs, table):
db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
tbl = swsscommon.Table(db, table)
return set(tbl.getKeys())
def get_created_entry(db, table, existed_entries):
tbl = swsscommon.Table(db, table)
entries = set(tbl.getKeys())
new_entries = list(entries - existed_entries)
assert len(new_entries) == 1, "Wrong number of created entries."
return new_entries[0]
def get_created_entries(db, table, existed_entries, count):
tbl = swsscommon.Table(db, table)
entries = set(tbl.getKeys())
new_entries = list(entries - existed_entries)
assert len(new_entries) == count, "Wrong number of created entries."
new_entries.sort()
return new_entries
def get_default_vr_id(dvs):
db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
table = 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER'
tbl = swsscommon.Table(db, table)
keys = tbl.getKeys()
assert len(keys) == 1, "Wrong number of virtual routers found"
return keys[0]
def check_object(db, table, key, expected_attributes):
tbl = swsscommon.Table(db, table)
keys = tbl.getKeys()
assert key in keys, "The desired key is not presented"
status, fvs = tbl.get(key)
assert status, "Got an error when get a key"
assert len(fvs) >= len(expected_attributes), "Incorrect attributes"
attr_keys = {entry[0] for entry in fvs}
for name, value in fvs:
if name in expected_attributes:
assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \
(value, name, expected_attributes[name])
def create_vnet_local_routes(dvs, prefix, vnet_name, ifname):
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
create_entry_pst(
app_db,
"VNET_ROUTE_TABLE", ':', "%s:%s" % (vnet_name, prefix),
[
("ifname", ifname),
]
)
time.sleep(2)
def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0):
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
attrs = [
("endpoint", endpoint),
]
if vni:
attrs.append(('vni', vni))
if mac:
attrs.append(('mac_address', mac))
create_entry_pst(
app_db,
"VNET_ROUTE_TUNNEL_TABLE", ':', "%s:%s" % (vnet_name, prefix),
attrs,
)
time.sleep(2)
def create_vlan(dvs, vlan_name, vlan_ids):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
vlan_id = vlan_name[4:]
# create vlan
create_entry_tbl(
conf_db,
"VLAN", '|', vlan_name,
[
("vlanid", vlan_id),
],
)
time.sleep(1)
vlan_oid = get_created_entry(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_ids)
check_object(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_oid,
{
"SAI_VLAN_ATTR_VLAN_ID": vlan_id,
}
)
return vlan_oid
def create_vlan_interface(dvs, vlan_name, ifname, vnet_name, ipaddr):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
vlan_ids = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_oid = create_vlan (dvs, vlan_name, vlan_ids)
# create a vlan member in config db
create_entry_tbl(
conf_db,
"VLAN_MEMBER", '|', "%s|%s" % (vlan_name, ifname),
[
("tagging_mode", "untagged"),
],
)
time.sleep(1)
# create vlan interface in config db
create_entry_tbl(
conf_db,
"VLAN_INTERFACE", '|', vlan_name,
[
("vnet_name", vnet_name),
],
)
#FIXME - This is created by IntfMgr
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
create_entry_pst(
app_db,
"INTF_TABLE", ':', vlan_name,
[
("vnet_name", vnet_name),
],
)
time.sleep(2)
create_entry_tbl(
conf_db,
"VLAN_INTERFACE", '|', "%s|%s" % (vlan_name, ipaddr),
[
("family", "IPv4"),
],
)
time.sleep(2)
return vlan_oid
def create_phy_interface(dvs, ifname, vnet_name, ipaddr):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
exist_rifs = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE")
# create vlan interface in config db
create_entry_tbl(
conf_db,
"INTERFACE", '|', ifname,
[
("vnet_name", vnet_name),
],
)
#FIXME - This is created by IntfMgr
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
create_entry_pst(
app_db,
"INTF_TABLE", ':', ifname,
[
("vnet_name", vnet_name),
],
)
time.sleep(2)
create_entry_tbl(
conf_db,
"INTERFACE", '|', "%s|%s" % (ifname, ipaddr),
[
("family", "IPv4"),
],
)
def create_vnet_entry(dvs, name, tunnel, vni, peer_list):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
attrs = [
("vxlan_tunnel", tunnel),
("vni", vni),
("peer_list", peer_list),
]
# create the VXLAN tunnel Term entry in Config DB
create_entry_tbl(
conf_db,
"VNET", '|', name,
attrs,
)
time.sleep(2)
def create_vxlan_tunnel(dvs, name, src_ip):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
attrs = [
("src_ip", src_ip),
]
# create the VXLAN tunnel Term entry in Config DB
create_entry_tbl(
conf_db,
"VXLAN_TUNNEL", '|', name,
attrs,
)
def get_lo(dvs):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
vr_id = get_default_vr_id(dvs)
tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE')
entries = tbl.getKeys()
lo_id = None
for entry in entries:
status, fvs = tbl.get(entry)
assert status, "Got an error when get a key"
for key, value in fvs:
if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK':
lo_id = entry
break
else:
assert False, 'Don\'t found loopback id'
return lo_id
def get_switch_mac(dvs):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH')
entries = tbl.getKeys()
mac = None
for entry in entries:
status, fvs = tbl.get(entry)
assert status, "Got an error when get a key"
for key, value in fvs:
if key == 'SAI_SWITCH_ATTR_SRC_MAC_ADDRESS':
mac = value
break
else:
assert False, 'Don\'t found switch mac'
return mac
loopback_id = 0
def_vr_id = 0
switch_mac = None
class VnetVxlanVrfTunnel(object):
ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL"
ASIC_TUNNEL_MAP = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP"
ASIC_TUNNEL_MAP_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY"
ASIC_TUNNEL_TERM_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY"
ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"
ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER"
ASIC_ROUTE_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY"
ASIC_NEXT_HOP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP"
tunnel_map_ids = set()
tunnel_map_entry_ids = set()
tunnel_ids = set()
tunnel_term_ids = set()
tunnel_map_map = {}
tunnel = {}
vnet_vr_ids = set()
vr_map = {}
nh_ids = {}
def fetch_exist_entries(self, dvs):
self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE)
self.tunnel_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TABLE)
self.tunnel_map_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP)
self.tunnel_map_entry_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP_ENTRY)
self.tunnel_term_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TERM_ENTRY)
self.rifs = get_exist_entries(dvs, self.ASIC_RIF_TABLE)
self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY)
self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP)
global loopback_id, def_vr_id, switch_mac
if not loopback_id:
loopback_id = get_lo(dvs)
if not def_vr_id:
def_vr_id = get_default_vr_id(dvs)
if switch_mac is None:
switch_mac = get_switch_mac(dvs)
def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
global loopback_id, def_vr_id
tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 2)
tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids)
tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids)
# check that the vxlan tunnel termination are there
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 2), "The TUNNEL_MAP wasn't created"
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created"
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created"
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created"
check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[0],
{
'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID',
}
)
check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[1],
{
'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI',
}
)
check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id,
{
'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_VXLAN',
'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id,
'SAI_TUNNEL_ATTR_DECAP_MAPPERS': '1:%s' % tunnel_map_id[0],
'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': '1:%s' % tunnel_map_id[1],
'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip,
}
)
expected_attributes = {
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP',
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID': def_vr_id,
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': src_ip,
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_VXLAN',
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': tunnel_id,
}
check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, expected_attributes)
self.tunnel_map_ids.update(tunnel_map_id)
self.tunnel_ids.add(tunnel_id)
self.tunnel_term_ids.add(tunnel_term_id)
self.tunnel_map_map[tunnel_name] = tunnel_map_id
self.tunnel[tunnel_name] = tunnel_id
def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
time.sleep(2)
if (self.tunnel_map_map.get(tunnel_name) is None):
tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 2)
else:
tunnel_map_id = self.tunnel_map_map[tunnel_name]
tunnel_map_entry_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2)
# check that the vxlan tunnel termination are there
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early"
check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0],
{
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI',
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[1],
'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vnet_name].get('ing'),
'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id,
}
)
check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1],
{
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID',
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[0],
'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id,
'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vnet_name].get('egr'),
}
)
self.tunnel_map_entry_ids.update(tunnel_map_entry_id)
def check_vnet_entry(self, dvs, name, peer_list=[]):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
#Check virtual router objects
assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids) + 2),\
"The VR objects are not created"
new_vr_ids = get_created_entries(asic_db, self.ASIC_VRF_TABLE, self.vnet_vr_ids, 2)
self.vnet_vr_ids.update(new_vr_ids)
self.vr_map[name] = { 'ing':new_vr_ids[0], 'egr':new_vr_ids[1], 'peer':peer_list }
def vnet_route_ids(self, dvs, name, local=False):
vr_set = set()
if local:
vr_set.add(self.vr_map[name].get('egr'))
else:
vr_set.add(self.vr_map[name].get('ing'))
try:
for peer in self.vr_map[name].get('peer'):
vr_set.add(self.vr_map[peer].get('ing'))
except IndexError:
pass
return vr_set
def check_router_interface(self, dvs, name, vlan_oid=0):
# Check RIF in ingress VRF
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
global switch_mac
expected_attr = {
"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": self.vr_map[name].get('ing'),
"SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": switch_mac,
"SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
}
if vlan_oid:
expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_VLAN'})
expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_VLAN_ID': vlan_oid})
else:
expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_PORT'})
new_rif = get_created_entry(asic_db, self.ASIC_RIF_TABLE, self.rifs)
check_object(asic_db, self.ASIC_RIF_TABLE, new_rif, expected_attr)
#IP2ME and subnet routes will be created with every router interface
new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, 2)
self.rifs.add(new_rif)
self.routes.update(new_route)
def check_vnet_local_routes(self, dvs, name):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
vr_ids = self.vnet_route_ids(dvs, name, True)
count = len(vr_ids)
new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count)
#Check if the route is duplicated to egress VRF
asic_vrs = set()
for idx in range(count):
rt_key = json.loads(new_route[idx])
asic_vrs.add(rt_key['vr'])
assert asic_vrs == vr_ids
self.routes.update(new_route)
def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
vr_ids = self.vnet_route_ids(dvs, name)
count = len(vr_ids)
# Check routes in ingress VRF
expected_attr = {
"SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP",
"SAI_NEXT_HOP_ATTR_IP": endpoint,
"SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel],
}
if vni:
expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni})
if mac:
expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac})
if endpoint in self.nh_ids:
new_nh = self.nh_ids[endpoint]
else:
new_nh = get_created_entry(asic_db, self.ASIC_NEXT_HOP, self.nhops)
self.nh_ids[endpoint] = new_nh
self.nhops.add(new_nh)
check_object(asic_db, self.ASIC_NEXT_HOP, new_nh, expected_attr)
new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count)
#Check if the route is in expected VRF
asic_vrs = set()
for idx in range(count):
check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx],
{
"SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nh,
}
)
rt_key = json.loads(new_route[idx])
asic_vrs.add(rt_key['vr'])
assert asic_vrs == vr_ids
self.routes.update(new_route)
class TestVnetOrch(object):
'''
Test 1 - Create Vlan Interface, Tunnel and Vnet
'''
def test_vnet_orch_1(self, dvs, testlog):
vnet_obj = VnetVxlanVrfTunnel()
tunnel_name = 'tunnel_1'
vnet_obj.fetch_exist_entries(dvs)
create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10')
create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_2000')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000')
vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10')
vid = create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vnet_2000", "100.100.3.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2000', vid)
vid = create_vlan_interface(dvs, "Vlan101", "Ethernet28", "Vnet_2000", "100.100.4.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2000', vid)
create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1')
vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name)
create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000')
create_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet_2000', 'Vlan101')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000')
#Create Physical Interface in another Vnet
create_vnet_entry(dvs, 'Vnet_2001', tunnel_name, '2001', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_2001')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2001', '2001')
create_phy_interface(dvs, "Ethernet4", "Vnet_2001", "100.102.1.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2001')
create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A")
vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A")
create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001')
'''
Test 2 - Two VNets, One HSMs per VNet
'''
def test_vnet_orch_2(self, dvs, testlog):
vnet_obj = VnetVxlanVrfTunnel()
tunnel_name = 'tunnel_2'
vnet_obj.fetch_exist_entries(dvs)
create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6')
create_vnet_entry(dvs, 'Vnet_1', tunnel_name, '1111', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_1')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_1', '1111')
tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6')
vid = create_vlan_interface(dvs, "Vlan1001", "Ethernet0", "Vnet_1", "1.1.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_1', vid)
create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name)
create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name)
create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name)
create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name)
create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1')
create_vnet_entry(dvs, 'Vnet_2', tunnel_name, '2222', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_2')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2', '2222')
vid = create_vlan_interface(dvs, "Vlan1002", "Ethernet4", "Vnet_2", "2.2.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2', vid)
create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20')
vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name)
create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20')
vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name)
create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2')
'''
Test 3 - Two VNets, One HSMs per VNet, Peering
'''
def test_vnet_orch_3(self, dvs, testlog):
vnet_obj = VnetVxlanVrfTunnel()
tunnel_name = 'tunnel_3'
vnet_obj.fetch_exist_entries(dvs)
create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7')
create_vnet_entry(dvs, 'Vnet_10', tunnel_name, '1111', "Vnet_20")
vnet_obj.check_vnet_entry(dvs, 'Vnet_10', ['Vnet_20'])
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_10', '1111')
create_vnet_entry(dvs, 'Vnet_20', tunnel_name, '2222', "Vnet_10")
vnet_obj.check_vnet_entry(dvs, 'Vnet_20', ['Vnet_10'])
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_20', '2222')
tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7')
vid = create_vlan_interface(dvs, "Vlan2001", "Ethernet8", "Vnet_10", "5.5.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_10', vid)
vid = create_vlan_interface(dvs, "Vlan2002", "Ethernet12", "Vnet_20", "8.8.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_20', vid)
create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10')
vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name)
create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20')
vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name)
create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10')
create_vnet_local_routes(dvs, "8.8.10.0/24", 'Vnet_20', 'Vlan2002')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_20')
|
py | 1a37a3d46d1ef5e91dd5dab197265e18cc1378a7 | """
ASGI config for dutchmemocards project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dutchmemocards.settings")
application = get_asgi_application()
|
py | 1a37a7b184c67b334f81332cbd5876fd8d9dd361 | def get_parent_index(h, idx):
## calculate the maximum index first
## if the input is too large, return a negative 1
max_idx = 2**h - 1
if max_idx < idx:
return -1
# otherwise, carry on
else:
node_offset = 0
continue_flag = True
subtree_size = max_idx
result = -1 # default result
while continue_flag:
if subtree_size == 0:
continue_flag = False
# right shift is equivalent to dividing by 2 and discarding the remainder.
subtree_size = subtree_size >> 1
# predict the left node
left_node = node_offset + subtree_size
# predict the right node
right_node = left_node + subtree_size
# calculate my node value
my_node = right_node + 1
# if either child is a match, return my node value
if (left_node == idx) or (right_node == idx):
result = my_node
continue_flag = False
# Make the current left child the offset if the index is greater than the left.
# This effectively searches down the right subtree.
if (idx > left_node):
node_offset = left_node
return result
def solution(h, q):
return [ get_parent_index(h, x) for x in q ] |
py | 1a37a80961919d7f77c035ec75c84784fdf28248 | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from minorminer.utils.pegasus import *
|
py | 1a37a963b5315cb790866dcc88a6e148b5736334 | from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import pandas as pd
import os
import argparse
def create_folder(parent_path, folder):
if not parent_path.endswith('/'):
parent_path += '/'
folder_path = parent_path + folder
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return folder_path
def shuffle_stays(stays, seed=9):
return shuffle(stays, random_state=seed)
def process_table(table_name, table, stays, folder_path):
table = table.loc[stays].copy()
table.to_csv('{}/{}.csv'.format(folder_path, table_name))
return
def split_train_test(path, is_test=True, seed=9, cleanup=True, MIMIC=False):
labels = pd.read_csv(path + 'preprocessed_labels.csv')
labels.set_index('patient', inplace=True)
# we split by unique patient identifier to make sure there are no patients
# that cross into both the train and the test sets
patients = labels.uniquepid.unique()
train, test = train_test_split(patients, test_size=0.2, random_state=seed)
train, val = train_test_split(train, test_size=0.1/0.8, random_state=seed)
print('==> Loading data for splitting...')
if is_test:
timeseries = pd.read_csv(
path + 'preprocessed_timeseries.csv', nrows=999999)
else:
timeseries = pd.read_csv(path + 'preprocessed_timeseries.csv')
timeseries.set_index('patient', inplace=True)
if not MIMIC:
diagnoses = pd.read_csv(path + 'preprocessed_diagnoses.csv')
diagnoses.set_index('patient', inplace=True)
flat_features = pd.read_csv(path + 'preprocessed_flat.csv')
flat_features.set_index('patient', inplace=True)
# delete the source files, as they won't be needed anymore
if is_test is False and cleanup:
print('==> Removing the unsorted data...')
os.remove(path + 'preprocessed_timeseries.csv')
if not MIMIC:
os.remove(path + 'preprocessed_diagnoses.csv')
os.remove(path + 'preprocessed_labels.csv')
os.remove(path + 'preprocessed_flat.csv')
for partition_name, partition in zip(['train', 'val', 'test'], [train, val, test]):
print('==> Preparing {} data...'.format(partition_name))
stays = labels.loc[labels['uniquepid'].isin(partition)].index
folder_path = create_folder(path, partition_name)
with open(folder_path + '/stays.txt', 'w') as f:
for stay in stays:
f.write("%s\n" % stay)
stays = shuffle_stays(stays, seed=9)
if MIMIC:
for table_name, table in zip(['labels', 'flat', 'timeseries'],
[labels, flat_features, timeseries]):
process_table(table_name, table, stays, folder_path)
else:
for table_name, table in zip(['labels', 'flat', 'diagnoses', 'timeseries'],
[labels, flat_features, diagnoses, timeseries]):
process_table(table_name, table, stays, folder_path)
return
if __name__ == '__main__':
from eICU_preprocessing.run_all_preprocessing import eICU_path
parser = argparse.ArgumentParser()
parser.add_argument('--cleanup', action='store_true')
args = parser.parse_args()
split_train_test(eICU_path, is_test=False, cleanup=args.cleanup)
|
py | 1a37aa1ad4e07180337844d68720e9095cb159c3 | """
Support for HomematicIP sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sensor.homematicip_cloud/
"""
import logging
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.components.homematicip_cloud import (
HomematicipGenericDevice, DOMAIN, EVENT_HOME_CHANGED,
ATTR_HOME_LABEL, ATTR_HOME_ID, ATTR_LOW_BATTERY, ATTR_RSSI)
from homeassistant.const import TEMP_CELSIUS, STATE_OK
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematicip_cloud']
ATTR_VALVE_STATE = 'valve_state'
ATTR_VALVE_POSITION = 'valve_position'
ATTR_TEMPERATURE_OFFSET = 'temperature_offset'
HMIP_UPTODATE = 'up_to_date'
HMIP_VALVE_DONE = 'adaption_done'
HMIP_SABOTAGE = 'sabotage'
STATE_LOW_BATTERY = 'low_battery'
STATE_SABOTAGE = 'sabotage'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HomematicIP sensors devices."""
# pylint: disable=import-error, no-name-in-module
from homematicip.device import (
HeatingThermostat, TemperatureHumiditySensorWithoutDisplay,
TemperatureHumiditySensorDisplay)
homeid = discovery_info['homeid']
home = hass.data[DOMAIN][homeid]
devices = [HomematicipAccesspoint(home)]
for device in home.devices:
devices.append(HomematicipDeviceStatus(home, device))
if isinstance(device, HeatingThermostat):
devices.append(HomematicipHeatingThermostat(home, device))
if isinstance(device, TemperatureHumiditySensorWithoutDisplay):
devices.append(HomematicipSensorThermometer(home, device))
devices.append(HomematicipSensorHumidity(home, device))
if isinstance(device, TemperatureHumiditySensorDisplay):
devices.append(HomematicipSensorThermometer(home, device))
devices.append(HomematicipSensorHumidity(home, device))
if home.devices:
add_devices(devices)
class HomematicipAccesspoint(Entity):
"""Representation of an HomeMaticIP access point."""
def __init__(self, home):
"""Initialize the access point sensor."""
self._home = home
_LOGGER.debug('Setting up access point %s', home.label)
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, EVENT_HOME_CHANGED, self._home_changed)
@callback
def _home_changed(self, deviceid):
"""Handle device state changes."""
if deviceid is None or deviceid == self._home.id:
_LOGGER.debug('Event home %s', self._home.label)
self.async_schedule_update_ha_state()
@property
def name(self):
"""Return the name of the access point device."""
if self._home.label == '':
return 'Access Point Status'
return '{} Access Point Status'.format(self._home.label)
@property
def icon(self):
"""Return the icon of the access point device."""
return 'mdi:access-point-network'
@property
def state(self):
"""Return the state of the access point."""
return self._home.dutyCycle
@property
def available(self):
"""Device available."""
return self._home.connected
@property
def device_state_attributes(self):
"""Return the state attributes of the access point."""
return {
ATTR_HOME_LABEL: self._home.label,
ATTR_HOME_ID: self._home.id,
}
class HomematicipDeviceStatus(HomematicipGenericDevice):
"""Representation of an HomematicIP device status."""
def __init__(self, home, device):
"""Initialize the device."""
super().__init__(home, device)
_LOGGER.debug('Setting up sensor device status: %s', device.label)
@property
def name(self):
"""Return the name of the device."""
return self._name('Status')
@property
def icon(self):
"""Return the icon of the status device."""
if (hasattr(self._device, 'sabotage') and
self._device.sabotage == HMIP_SABOTAGE):
return 'mdi:alert'
elif self._device.lowBat:
return 'mdi:battery-outline'
elif self._device.updateState.lower() != HMIP_UPTODATE:
return 'mdi:refresh'
return 'mdi:check'
@property
def state(self):
"""Return the state of the generic device."""
if (hasattr(self._device, 'sabotage') and
self._device.sabotage == HMIP_SABOTAGE):
return STATE_SABOTAGE
elif self._device.lowBat:
return STATE_LOW_BATTERY
elif self._device.updateState.lower() != HMIP_UPTODATE:
return self._device.updateState.lower()
return STATE_OK
class HomematicipHeatingThermostat(HomematicipGenericDevice):
"""MomematicIP heating thermostat representation."""
def __init__(self, home, device):
""""Initialize heating thermostat."""
super().__init__(home, device)
_LOGGER.debug('Setting up heating thermostat device: %s', device.label)
@property
def icon(self):
"""Return the icon."""
if self._device.valveState.lower() != HMIP_VALVE_DONE:
return 'mdi:alert'
return 'mdi:radiator'
@property
def state(self):
"""Return the state of the radiator valve."""
if self._device.valveState.lower() != HMIP_VALVE_DONE:
return self._device.valveState.lower()
return round(self._device.valvePosition*100)
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return '%'
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_VALVE_STATE: self._device.valveState.lower(),
ATTR_TEMPERATURE_OFFSET: self._device.temperatureOffset,
ATTR_LOW_BATTERY: self._device.lowBat,
ATTR_RSSI: self._device.rssiDeviceValue
}
class HomematicipSensorHumidity(HomematicipGenericDevice):
"""MomematicIP thermometer device."""
def __init__(self, home, device):
""""Initialize the thermometer device."""
super().__init__(home, device)
_LOGGER.debug('Setting up humidity device: %s', device.label)
@property
def name(self):
"""Return the name of the device."""
return self._name('Humidity')
@property
def icon(self):
"""Return the icon."""
return 'mdi:water'
@property
def state(self):
"""Return the state."""
return self._device.humidity
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return '%'
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_LOW_BATTERY: self._device.lowBat,
ATTR_RSSI: self._device.rssiDeviceValue,
}
class HomematicipSensorThermometer(HomematicipGenericDevice):
"""MomematicIP thermometer device."""
def __init__(self, home, device):
""""Initialize the thermometer device."""
super().__init__(home, device)
_LOGGER.debug('Setting up thermometer device: %s', device.label)
@property
def name(self):
"""Return the name of the device."""
return self._name('Temperature')
@property
def icon(self):
"""Return the icon."""
return 'mdi:thermometer'
@property
def state(self):
"""Return the state."""
return self._device.actualTemperature
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_TEMPERATURE_OFFSET: self._device.temperatureOffset,
ATTR_LOW_BATTERY: self._device.lowBat,
ATTR_RSSI: self._device.rssiDeviceValue,
}
|
py | 1a37aa2a1b15c9a2b389a87b1f6dc041188ce62e | from flask_restful import reqparse
from huginn.cli import argtypes
# the waypoint request parser is used to parse the waypoint data from a web
# request
waypoint = reqparse.RequestParser()
waypoint.add_argument("latitude", required=True,
location="json", type=argtypes.latitude)
waypoint.add_argument("longitude", required=True,
location="json", type=argtypes.longitude)
waypoint.add_argument("altitude", required=True,
location="json", type=argtypes.altitude)
|
py | 1a37ab538a9e95f46fbafd94343e1c3bd99615f4 | from devito.ir.clusters.queue import QueueStateful
from devito.ir.support import (SEQUENTIAL, PARALLEL, PARALLEL_INDEP, PARALLEL_IF_ATOMIC,
AFFINE, ROUNDABLE, TILABLE, Forward)
from devito.tools import as_tuple, flatten, timed_pass
__all__ = ['analyze']
@timed_pass()
def analyze(clusters):
state = QueueStateful.State()
# Collect properties
clusters = Parallelism(state).process(clusters)
clusters = Affiness(state).process(clusters)
clusters = Tiling(state).process(clusters)
clusters = Rounding(state).process(clusters)
# Reconstruct Clusters attaching the discovered properties
processed = [c.rebuild(properties=state.properties.get(c)) for c in clusters]
return processed
class Detector(QueueStateful):
def process(self, elements):
return self._process_fatd(elements, 1)
def callback(self, clusters, prefix):
if not prefix:
return clusters
# The analyzed Dimension
d = prefix[-1].dim
# Apply the actual callback
retval = self._callback(clusters, d, prefix)
# Normalize retval
retval = set(as_tuple(retval))
# Update `self.state`
if retval:
for c in clusters:
properties = self.state.properties.setdefault(c, {})
properties.setdefault(d, set()).update(retval)
return clusters
class Parallelism(Detector):
"""
Detect SEQUENTIAL, PARALLEL, PARALLEL_INDEP and PARALLEL_IF_ATOMIC Dimensions.
Consider an IterationSpace over `n` Dimensions. Let `(d_1, ..., d_n)` be the
distance vector of a dependence. Let `i` be the `i-th` Dimension of the
IterationSpace. Then:
* `i` is PARALLEL_INDEP if all dependences have distance vectors:
(d_1, ..., d_i) = 0
* `i` is PARALLEL if all dependences have distance vectors:
(d_1, ..., d_i) = 0, OR
(d_1, ..., d_{i-1}) > 0
* `i` is PARALLEL_IF_ATOMIC if all dependences have distance vectors:
(d_1, ..., d_i) = 0, OR
(d_1, ..., d_{i-1}) > 0, OR
the 'write' is known to be an associative and commutative increment
"""
def _callback(self, clusters, d, prefix):
# Rule out if non-unitary increment Dimension (e.g., `t0=(time+1)%2`)
if any(c.sub_iterators.get(d) for c in clusters):
return SEQUENTIAL
# All Dimensions up to and including `i-1`
prev = flatten(i.dim._defines for i in prefix[:-1])
is_parallel_indep = True
is_parallel_atomic = False
scope = self._fetch_scope(clusters)
for dep in scope.d_all_gen():
test00 = dep.is_indep(d) and not dep.is_storage_related(d)
test01 = all(dep.is_reduce_atmost(i) for i in prev)
if test00 and test01:
continue
test1 = len(prev) > 0 and any(dep.is_carried(i) for i in prev)
if test1:
is_parallel_indep &= (dep.distance_mapper.get(d.root) == 0)
continue
if dep.function in scope.initialized:
# False alarm, the dependence is over a locally-defined symbol
continue
if dep.is_increment:
is_parallel_atomic = True
continue
return SEQUENTIAL
if is_parallel_atomic:
return PARALLEL_IF_ATOMIC
elif is_parallel_indep:
return {PARALLEL, PARALLEL_INDEP}
else:
return PARALLEL
class Rounding(Detector):
def _callback(self, clusters, d, prefix):
itinterval = prefix[-1]
# The iteration direction must be Forward -- ROUNDABLE is for rounding *up*
if itinterval.direction is not Forward:
return
properties = self._fetch_properties(clusters, prefix)
if PARALLEL not in properties[d]:
return
scope = self._fetch_scope(clusters)
# All accessed Functions must have enough room in the PADDING region
# so that `i`'s trip count can safely be rounded up
# Note: autopadding guarantees that the padding size along the
# Fastest Varying Dimension is a multiple of the SIMD vector length
functions = [f for f in scope.functions if f.is_Tensor]
if any(not f._honors_autopadding for f in functions):
return
# Mixed data types (e.g., float and double) is unsupported
if len({f.dtype for f in functions}) > 1:
return
return ROUNDABLE
class Affiness(Detector):
"""
Detect the AFFINE Dimensions.
"""
def _callback(self, clusters, d, prefix):
scope = self._fetch_scope(clusters)
accesses = [a for a in scope.accesses if not a.is_scalar]
if all(a.is_regular and a.affine_if_present(d._defines) for a in accesses):
return AFFINE
class Tiling(Detector):
"""
Detect the TILABLE Dimensions.
"""
def process(self, elements):
return self._process_fdta(elements, 1)
def _callback(self, clusters, d, prefix):
# A Dimension is TILABLE only if it's PARALLEL and AFFINE
properties = self._fetch_properties(clusters, prefix)
if not {PARALLEL, AFFINE} <= properties[d]:
return
# In addition, we use the heuristic that we do not consider
# TILABLE a Dimension that is not embedded in at least one
# SEQUENTIAL Dimension. This is to rule out tiling when the
# computation is not expected to be expensive
if not any(SEQUENTIAL in properties[i.dim] for i in prefix[:-1]):
return
# Likewise, it won't be marked TILABLE if there's at least one
# local SubDimension in all Clusters
if all(any(i.dim.is_Sub and i.dim.local for i in c.itintervals)
for c in clusters):
return
# If it induces dynamic bounds, then it's ruled out too
scope = self._fetch_scope(clusters)
if any(i.is_lex_non_stmt for i in scope.d_all_gen()):
return
return TILABLE
|
py | 1a37ac1e518e1ed305f2de6692952a54918713d7 | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.models import User
from selectable import forms as selectable
from timepiece.utils.search import SearchForm
from timepiece.crm.lookups import (BusinessLookup, ProjectLookup, UserLookup,
QuickLookup)
from timepiece.crm.models import (Attribute, Business, Project,
ProjectRelationship)
class CreateEditBusinessForm(forms.ModelForm):
class Meta:
model = Business
fields = ('name', 'short_name', 'email', 'description', 'notes')
class CreateEditProjectForm(forms.ModelForm):
business = selectable.AutoCompleteSelectField(BusinessLookup)
business.widget.attrs['placeholder'] = 'Search'
class Meta:
model = Project
fields = ('name', 'business', 'tracker_url', 'point_person', 'type',
'status', 'activity_group', 'description')
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'is_active',
'is_staff', 'groups')
def __init__(self, *args, **kwargs):
super(CreateUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].widget = forms.CheckboxSelectMultiple()
self.fields['groups'].help_text = None
def save(self, commit=True):
user = super(CreateUserForm, self).save(commit)
if commit:
self.save_m2m()
return user
class EditProjectRelationshipForm(forms.ModelForm):
class Meta:
model = ProjectRelationship
fields = ('types',)
def __init__(self, *args, **kwargs):
super(EditProjectRelationshipForm, self).__init__(*args, **kwargs)
self.fields['types'].widget = forms.CheckboxSelectMultiple(
choices=self.fields['types'].choices)
class EditUserForm(UserChangeForm):
password1 = forms.CharField(required=False, max_length=36,
label='Password', widget=forms.PasswordInput(render_value=False))
password2 = forms.CharField(required=False, max_length=36,
label='Repeat Password',
widget=forms.PasswordInput(render_value=False))
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'is_active',
'is_staff', 'groups')
def __init__(self, *args, **kwargs):
super(EditUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].widget = forms.CheckboxSelectMultiple()
self.fields['groups'].help_text = None
# In 1.4 this field is created even if it is excluded in Meta.
if 'password' in self.fields:
del(self.fields['password'])
def clean(self):
super(EditUserForm, self).clean()
password1 = self.cleaned_data.get('password1', None)
password2 = self.cleaned_data.get('password2', None)
if password1 and password1 != password2:
raise forms.ValidationError('Passwords must match.')
return self.cleaned_data
def save(self, commit=True):
instance = super(EditUserForm, self).save(commit=False)
password1 = self.cleaned_data.get('password1', None)
if password1:
instance.set_password(password1)
if commit:
instance.save()
self.save_m2m()
return instance
class EditUserSettingsForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(EditUserSettingsForm, self).__init__(*args, **kwargs)
for name in self.fields:
self.fields[name].required = True
class ProjectSearchForm(SearchForm):
status = forms.ChoiceField(required=False, choices=[], label='')
def __init__(self, *args, **kwargs):
super(ProjectSearchForm, self).__init__(*args, **kwargs)
statuses = Attribute.statuses.all()
choices = [('', 'Any Status')] + [(a.pk, a.label) for a in statuses]
self.fields['status'].choices = choices
class QuickSearchForm(forms.Form):
quick_search = selectable.AutoCompleteSelectField(QuickLookup, required=False)
quick_search.widget.attrs['placeholder'] = 'Search'
def clean_quick_search(self):
item = self.cleaned_data['quick_search']
if not item:
msg = 'No user, business, or project matches your query.'
raise forms.ValidationError(msg)
return item
def get_result(self):
return self.cleaned_data['quick_search'].get_absolute_url()
class SelectProjectForm(forms.Form):
project = selectable.AutoCompleteSelectField(ProjectLookup, label='')
project.widget.attrs['placeholder'] = 'Add Project'
def get_project(self):
return self.cleaned_data['project'] if self.is_valid() else None
class SelectUserForm(forms.Form):
user = selectable.AutoCompleteSelectField(UserLookup, label='')
user.widget.attrs['placeholder'] = 'Add User'
def get_user(self):
return self.cleaned_data['user'] if self.is_valid() else None
|
py | 1a37ac6649d18d12f77b6ecd7285186111c49e73 | """
Functions connecting the whole process. 'visual_from_signal' should be run if signal visualization of certain signal is requested.
'visual_from_data' should be run if signal visualization of any point on Earth is requested.
Miha Lotric, April 2020
"""
import io
from signal_visualizer import getters as gt
def visual_from_signal(signal_id, show=0, save_as=None, return_bytes=0):
"""Save/Show static Mapbox map with dome representing reach and position of the signal.
Args:
signal_id [int]: Unique identifier of a signal.
show [bool]: If true final image is shown by default OS image viewer.
save_as [None/str]: Path to the location where image is stored. If it is left None image is not stored.
return_bytes [bool]: If True image in bytes is returned.
Return:
BytesIO: Bytes image of Mapbox static map with dome on it.
"""
signal_info = gt.get_signal_info(signal_id)
coordinates = float(signal_info['coordinates'][0]), float(signal_info['coordinates'][1])
radius_meters = signal_info['radius']
map_bytes = visual_from_data(coordinates, radius_meters, save_as=save_as, show=show, return_bytes=return_bytes)
return map_bytes if return_bytes else None
def visual_from_data(coordinates, radius_meters, show=1, save_as=None, return_bytes=0):
"""Save/Show static Mapbox map with dome representing specified radius and coordinates.
Args:
coordinates [tuple]: Coordinates of the signal position - (latitude,longitude).
radius_meters [float]: Radius of the dome in meters.
show [bool]: If true final image is shown by default OS image viewer.
save_as [None/str]: Path to the location where image is stored. If it is left None image is not stored.
return_bytes [bool]: If True image in bytes is returned.
Return:
BytesIO: Bytes image of Mapbox static map with dome on it.
"""
radius_px = gt.get_radius_px(radius_meters)
zoom = gt.get_zoom(coordinates[0], radius_px, radius_meters)
map_img = gt.get_map(radius_px, coordinates, zoom)
if show: map_img.show()
if save_as: map_img.save(save_as)
if return_bytes:
map_bytes = io.BytesIO()
map_img.save(map_bytes, format='PNG')
return io.BytesIO(map_bytes.getvalue())
|
py | 1a37acee31b403138c07319f4f18321d610bc9d5 | from flask import render_template, redirect, url_for, flash, Blueprint, request
from flask_security import login_required, current_user
from flask_babelex import _
from app import db
from app.forms import EditProfileForm, NewPassword
from app.models import Barista
user = Blueprint('user', __name__, url_prefix='/user')
@user.route('/<user_name>')
@login_required
def profile(user_name):
user_obj = Barista.query.filter_by(name=user_name).first_or_404()
return render_template('user/user.html', user=user_obj)
@user.route('/edit_profile', methods=('GET', 'POST'))
@login_required
def edit():
form = EditProfileForm(current_user.name, current_user.phone_number, current_user.email)
if form.validate_on_submit():
current_user.name = form.name.data
current_user.phone_number = form.phone_number.data
current_user.email = form.email.data
db.session.commit()
flash(_('Ваши изменения сохранены.'))
return redirect(url_for('user.profile', user_name=current_user))
elif request.method == 'GET':
form.name.data = current_user.name
form.phone_number.data = current_user.phone_number
form.email.data = current_user.email
return render_template('user/user_edit.html', user=current_user, form=form)
@user.route('/new_password', methods=('POST', 'GET'))
@login_required
def change_password():
form = NewPassword()
if form.validate_on_submit():
current_user.password = form.password.data
db.session.commit()
flash(_('Ваши изменения сохранены.'))
return redirect(url_for('user.profile', user_name=current_user))
return render_template('user/user_password.html', user=current_user, form=form) |
py | 1a37af30e2b2d80bc011a52ef27b9e3f9bf5dfd7 | """Support for Tuya switches."""
from __future__ import annotations
from typing import Any
from tuya_iot import TuyaDevice, TuyaDeviceManager
from homeassistant.components.switch import (
DEVICE_CLASS_OUTLET,
SwitchEntity,
SwitchEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENTITY_CATEGORY_CONFIG
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import HomeAssistantTuyaData
from .base import TuyaEntity
from .const import DOMAIN, TUYA_DISCOVERY_NEW, DPCode
# All descriptions can be found here. Mostly the Boolean data types in the
# default instruction set of each category end up being a Switch.
# https://developer.tuya.com/en/docs/iot/standarddescription?id=K9i5ql6waswzq
SWITCHES: dict[str, tuple[SwitchEntityDescription, ...]] = {
# Smart Kettle
# https://developer.tuya.com/en/docs/iot/fbh?id=K9gf484m21yq7
"bh": (
SwitchEntityDescription(
key=DPCode.START,
name="Start",
icon="mdi:kettle-steam",
),
SwitchEntityDescription(
key=DPCode.WARM,
name="Heat preservation",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Pet Water Feeder
# https://developer.tuya.com/en/docs/iot/f?id=K9gf46aewxem5
"cwysj": (
SwitchEntityDescription(
key=DPCode.FILTER_RESET,
name="Filter reset",
icon="mdi:filter",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.PUMP_RESET,
name="Water pump reset",
icon="mdi:pump",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.SWITCH,
name="Power",
),
SwitchEntityDescription(
key=DPCode.WATER_RESET,
name="Reset of water usage days",
icon="mdi:water-sync",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Light
# https://developer.tuya.com/en/docs/iot/f?id=K9i5ql3v98hn3
"dj": (
# There are sockets available with an RGB light
# that advertise as `dj`, but provide an additional
# switch to control the plug.
SwitchEntityDescription(
key=DPCode.SWITCH,
name="Plug",
),
),
# Cirquit Breaker
"dlq": (
SwitchEntityDescription(
key=DPCode.CHILD_LOCK,
name="Child Lock",
icon="mdi:account-lock",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.SWITCH_1,
name="Switch",
),
),
# Switch
# https://developer.tuya.com/en/docs/iot/s?id=K9gf7o5prgf7s
"kg": (
SwitchEntityDescription(
key=DPCode.CHILD_LOCK,
name="Child Lock",
icon="mdi:account-lock",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.SWITCH_1,
name="Switch 1",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_2,
name="Switch 2",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_3,
name="Switch 3",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_4,
name="Switch 4",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_5,
name="Switch 5",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_6,
name="Switch 6",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB1,
name="USB 1",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB2,
name="USB 2",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB3,
name="USB 3",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB4,
name="USB 4",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB5,
name="USB 5",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB6,
name="USB 6",
),
SwitchEntityDescription(
key=DPCode.SWITCH,
name="Switch",
device_class=DEVICE_CLASS_OUTLET,
),
),
# Air Purifier
# https://developer.tuya.com/en/docs/iot/f?id=K9gf46h2s6dzm
"kj": (
SwitchEntityDescription(
key=DPCode.ANION,
name="Ionizer",
icon="mdi:minus-circle-outline",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.FILTER_RESET,
name="Filter cartridge reset",
icon="mdi:filter",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.LOCK,
name="Child lock",
icon="mdi:account-lock",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.SWITCH,
name="Power",
),
SwitchEntityDescription(
key=DPCode.WET,
name="Humidification",
icon="mdi:water-percent",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Power Socket
# https://developer.tuya.com/en/docs/iot/s?id=K9gf7o5prgf7s
"pc": (
SwitchEntityDescription(
key=DPCode.CHILD_LOCK,
name="Child Lock",
icon="mdi:account-lock",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.SWITCH_1,
name="Socket 1",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_2,
name="Socket 2",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_3,
name="Socket 3",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_4,
name="Socket 4",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_5,
name="Socket 5",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_6,
name="Socket 6",
device_class=DEVICE_CLASS_OUTLET,
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB1,
name="USB 1",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB2,
name="USB 2",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB3,
name="USB 3",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB4,
name="USB 4",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB5,
name="USB 5",
),
SwitchEntityDescription(
key=DPCode.SWITCH_USB6,
name="USB 6",
),
SwitchEntityDescription(
key=DPCode.SWITCH,
name="Socket",
device_class=DEVICE_CLASS_OUTLET,
),
),
# Heater
# https://developer.tuya.com/en/docs/iot/categoryqn?id=Kaiuz18kih0sm
"qn": (
SwitchEntityDescription(
key=DPCode.ANION,
name="Ionizer",
icon="mdi:minus-circle-outline",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.LOCK,
name="Child Lock",
icon="mdi:account-lock",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Siren Alarm
# https://developer.tuya.com/en/docs/iot/categorysgbj?id=Kaiuz37tlpbnu
"sgbj": (
SwitchEntityDescription(
key=DPCode.MUFFLING,
name="Mute",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Smart Camera
# https://developer.tuya.com/en/docs/iot/categorysp?id=Kaiuz35leyo12
"sp": (
SwitchEntityDescription(
key=DPCode.WIRELESS_BATTERYLOCK,
name="Battery Lock",
icon="mdi:battery-lock",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.CRY_DETECTION_SWITCH,
icon="mdi:emoticon-cry",
name="Cry Detection",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.DECIBEL_SWITCH,
icon="mdi:microphone-outline",
name="Sound Detection",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.RECORD_SWITCH,
icon="mdi:record-rec",
name="Video Recording",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.MOTION_RECORD,
icon="mdi:record-rec",
name="Motion Recording",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.BASIC_PRIVATE,
icon="mdi:eye-off",
name="Privacy Mode",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.BASIC_FLIP,
icon="mdi:flip-horizontal",
name="Flip",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.BASIC_OSD,
icon="mdi:watermark",
name="Time Watermark",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.BASIC_WDR,
icon="mdi:watermark",
name="Wide Dynamic Range",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.MOTION_TRACKING,
icon="mdi:motion-sensor",
name="Motion Tracking",
entity_category=ENTITY_CATEGORY_CONFIG,
),
SwitchEntityDescription(
key=DPCode.MOTION_SWITCH,
icon="mdi:motion-sensor",
name="Motion Alarm",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Solar Light
# https://developer.tuya.com/en/docs/iot/tynd?id=Kaof8j02e1t98
"tyndj": (
SwitchEntityDescription(
key=DPCode.SWITCH_SAVE_ENERGY,
name="Energy Saving",
icon="mdi:leaf",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Ceiling Light
# https://developer.tuya.com/en/docs/iot/ceiling-light?id=Kaiuz03xxfc4r
"xdd": (
SwitchEntityDescription(
key=DPCode.DO_NOT_DISTURB,
name="Do not disturb",
icon="mdi:minus-circle-outline",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
# Diffuser
# https://developer.tuya.com/en/docs/iot/categoryxxj?id=Kaiuz1f9mo6bl
"xxj": (
SwitchEntityDescription(
key=DPCode.SWITCH,
name="Power",
),
SwitchEntityDescription(
key=DPCode.SWITCH_SPRAY,
name="Spray",
icon="mdi:spray",
),
SwitchEntityDescription(
key=DPCode.SWITCH_VOICE,
name="Voice",
icon="mdi:account-voice",
entity_category=ENTITY_CATEGORY_CONFIG,
),
),
}
# Socket (duplicate of `pc`)
# https://developer.tuya.com/en/docs/iot/s?id=K9gf7o5prgf7s
SWITCHES["cz"] = SWITCHES["pc"]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up tuya sensors dynamically through tuya discovery."""
hass_data: HomeAssistantTuyaData = hass.data[DOMAIN][entry.entry_id]
@callback
def async_discover_device(device_ids: list[str]) -> None:
"""Discover and add a discovered tuya sensor."""
entities: list[TuyaSwitchEntity] = []
for device_id in device_ids:
device = hass_data.device_manager.device_map[device_id]
if descriptions := SWITCHES.get(device.category):
for description in descriptions:
if (
description.key in device.function
or description.key in device.status
):
entities.append(
TuyaSwitchEntity(
device, hass_data.device_manager, description
)
)
async_add_entities(entities)
async_discover_device([*hass_data.device_manager.device_map])
entry.async_on_unload(
async_dispatcher_connect(hass, TUYA_DISCOVERY_NEW, async_discover_device)
)
class TuyaSwitchEntity(TuyaEntity, SwitchEntity):
"""Tuya Switch Device."""
def __init__(
self,
device: TuyaDevice,
device_manager: TuyaDeviceManager,
description: SwitchEntityDescription,
) -> None:
"""Init TuyaHaSwitch."""
super().__init__(device, device_manager)
self.entity_description = description
self._attr_unique_id = f"{super().unique_id}{description.key}"
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return self.device.status.get(self.entity_description.key, False)
def turn_on(self, **kwargs: Any) -> None:
"""Turn the switch on."""
self._send_command([{"code": self.entity_description.key, "value": True}])
def turn_off(self, **kwargs: Any) -> None:
"""Turn the switch off."""
self._send_command([{"code": self.entity_description.key, "value": False}])
|
py | 1a37afa9ea37b0eb1245e352e6be3eb4503d8299 | from srcs.interpretator.interpretator_callback import InterpretatorCallback
from srcs.interpretator.context import Context
class Interpretator:
def __init__(self, runner, meta=None):
self.runner = runner
self.meta = meta
self.callbacks = []
self.context = Context()
self.current_string_index = 0
self.strings = []
def __call__(self, filter=None):
def decorator(function):
self.callbacks.append(InterpretatorCallback(function, filter))
return function
return decorator
def execute(self, string):
result = []
for callback in self.callbacks:
single_output = callback(string, self.current_string_index, self.context)
if single_output is not None:
result.append(single_output)
self.current_string_index += 1
self.strings.append(string)
return '\n'.join(result)
def run(self):
self.runner(self)
|
py | 1a37afd4bf5b987a164525fb59232455d7bb87b4 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from structlog import get_logger
from covidfaq import config, routers
# from covidfaq.evaluating.model.bert_plus_ood import BertPlusOODEn, BertPlusOODFr
# from covidfaq.scrape.scrape import (
# load_latest_source_data,
# download_OOD_model,
# download_cached_embeddings,
# )
app = FastAPI()
app.include_router(routers.health.router)
app.include_router(routers.answers.router)
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"]
)
@app.on_event("startup")
def on_startup():
conf = config.get()
log = get_logger()
log.info("launching", **conf.dict())
# load_latest_source_data()
# download_OOD_model()
# download_cached_embeddings()
# BertPlusOODEn()
# BertPlusOODFr()
|
bzl | 1a37b386a2406adcba5efe635e93f69906fb6e6f | # Copyright 2019 the rules_bison authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
_MIRRORS = [
"https://mirror.bazel.build/ftp.gnu.org/gnu/bison/",
"https://mirrors.kernel.org/gnu/bison/",
"https://ftp.gnu.org/gnu/bison/",
]
def _urls(filename):
return [m + filename for m in _MIRRORS]
DEFAULT_VERSION = "3.3.2"
VERSION_URLS = {
"3.7": {
"urls": _urls("bison-3.7.tar.xz"),
"sha256": "07b25fbf4de2f2c686e8bff50fdb69efda49b6f9f7377dad1884f9508a28592b",
"copyright_year": "2020",
},
"3.3.2": {
"urls": _urls("bison-3.3.2.tar.xz"),
"sha256": "039ee45b61d95e5003e7e8376f9080001b4066ff357bde271b7faace53b9d804",
"copyright_year": "2019",
},
"3.3.1": {
"urls": _urls("bison-3.3.1.tar.xz"),
"sha256": "fd22fc5ed02b42c88fa0efc6d5de3face8dfb5e253bf97e632573413969bc900",
"copyright_year": "2019",
},
"3.3": {
"urls": _urls("bison-3.3.tar.xz"),
"sha256": "162ea71d21e134c44942f4ebb74685e19c942dcf40a7120eba165ba5e2553bb9",
"copyright_year": "2019",
},
"3.2.4": {
"urls": _urls("bison-3.2.4.tar.xz"),
"sha256": "523d44419f4df68286503740c95c7b3400b748d7d8b797209195ee5d67f05634",
"copyright_year": "0000",
},
"3.2.3": {
"urls": _urls("bison-3.2.3.tar.xz"),
"sha256": "3cb07a84ff698b205ea244e36eccb4979dd4e10f2120ebbf58c5f1f700023f29",
"copyright_year": "0000",
},
"3.2.2": {
"urls": _urls("bison-3.2.2.tar.xz"),
"sha256": "6f950f24e4d0745c7cc870e36d04f4057133ce0f31d6b4564e6f510a7d3ffafa",
"copyright_year": "2018",
},
"3.2.1": {
"urls": _urls("bison-3.2.1.tar.xz"),
"sha256": "8ba8bd5d6e935d01b89382fa5c2fa7602e03bbb435575087bfdc3c450d4d9ecd",
"copyright_year": "0000",
},
"3.2": {
"urls": _urls("bison-3.2.tar.xz"),
"sha256": "deec377b95aa72ec4e1a33fe2c938d2480749d740b5291a7cc1d77808d3710bf",
"copyright_year": "0000",
},
}
def check_version(version):
if version not in VERSION_URLS:
fail("GNU Bison version {} not supported by rules_bison.".format(repr(version)))
|
py | 1a37b3bb3eaf603e401113771d10445fe9fce429 | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000086"
addresses_name = "parl.2019-12-12/Version 1/Parliamentary Election - Democracy_Club__12December2019east.tsv"
stations_name = "parl.2019-12-12/Version 1/Parliamentary Election - Democracy_Club__12December2019east.tsv"
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
rec = super().address_record_to_dict(record)
# Implausible looking UPRN geocodes. Throw away rather than investigate.
if uprn in [
"10091136033",
"100060322293",
"10091136173",
"10009593531",
"10012197923",
]:
return None
if uprn in [
"100060294726", # SO533DA -> SO533HB : 242 Bournemouth Road, Chandler`s Ford, Eastleigh
"10009593644", # SO533DA -> SO533AD : Dovecote, Howard Close, Chandler`s Ford, Eastleigh
"100060302836", # SO532FG -> SO535BZ : 138 Kingsway, Chandler`s Ford, Eastleigh
"10009589400", # SO507DE -> SO507DF : The Chalet, Moon River Pines, Fir Tree Lane, Horton Heath, Eastleigh
"10091136799", # SO303FA -> SO532HL : 8a Clanfield Close, Chandler`s Ford, Eastleigh
]:
rec["accept_suggestion"] = True
return rec
def station_record_to_dict(self, record):
if record.polling_place_id == "4629": # Abbey Hall, Victoria Road.
record = record._replace(polling_place_easting="445232")
record = record._replace(polling_place_northing="108734")
if (
record.polling_place_uprn == "100062644887"
): # Chandler's Ford Community Centre
record = record._replace(polling_place_postcode="SO53 2FT")
return super().station_record_to_dict(record)
|
py | 1a37b55d926dc166dd48e0ab20d711d247980eef | # Author: Zheng Hao Tan
# Email: [email protected]
import sys
import SMS
if len(sys.argv) != 6:
sys.exit('Invalid arguments. Please rerun the script')
accountSID = sys.argv[1]
authToken = sys.argv[2]
from_ = sys.argv[3]
to = sys.argv[4]
smsBody = sys.argv[5]
print('Setting up phone numbers and logging in...')
sms = SMS.SMS(accountSID, authToken)
print('Sending SMS...')
sms.send(from_, to, smsBody)
print('SMS sent!')
|
py | 1a37b5d29471d11a16f4ec90efd62daf2e260e5c | """JSON Serialization"""
from .annotations import JSONValue, JSONProperty
from .typed_deserializer import (
from_json_value
)
from .serialization import (
serialize,
deserialize
)
__all__ = [
'JSONValue',
'JSONProperty',
'serialize',
'deserialize',
'from_json_value'
]
|
py | 1a37b8681eb6c52b18cb04451eb8479b3e3dd361 | from django.conf.urls import url, include
from django.contrib import admin
from . import views
from django.conf import settings
from django.conf.urls.static import static
from login.views import *
app_name='home'
urlpatterns = [
#Home
url(r'^$', index, name='index'),
#semantic
url(r'^varta/', video_chat_view, name='varta_chat'),
#temp
url(r'^temp/', temp_view, name="temp_view"),
#get if user exists with username or not
url(r'^getifuser/', get_if_user, name="get_if_user"),
#general profile settings
url(r'^settings/general/', general_info, name="general_info" ),
#all auth urls
url(r'^accounts/', include('allauth.urls')),
#Topic Autocomplete
url(r'^topic-autocomplete/$',
TopicAutocomplete.as_view(model = Topic, create_field=''),
name='topic-autocomplete'
),
#login
url(r'^login/', login_view, name='login'),
#report lost and found
url(r'^report/', item_create, name='report'),
#lost found view
url(r'^lost/(?P<id>\d+)/$', lost_view, name='lost_view'),
#follow users
url(r'^follow/request/', follow_request, name='follow_request'),
#edit profile
url(r'^fillup/', person_view, name='fillup'),
#markdown drag and drop markdown editor
url(r'^markdownx/', include('markdownx.urls')),
#post create
url(r'^post/create/', post_create, name = 'create_post'),
#upvote a post
url(r'^post/upvote/', post_upvote, name = 'upvote_post'),
#upvote a post
url(r'^answer/upvote/', answer_upvote, name = 'upvote_answer'),
#update post
url(r'^post/update/(?P<id>\d+)/$', post_update, name = 'update_post'),
#view individual post
url(r'^post/view/(?P<id>\d+)/$', post_view, name='view_post'),
#delete post
url(r'^post/delete/', post_delete, name = 'delete_post'),
#post feed
url(r'^post/', post_details, name = 'details_post'),
#question create
url(r'^question/create/', question_create, name = 'create_question'),
#update question
url(r'^question/update/(?P<id>\d+)/$', question_update, name = 'update_question'),
#view individual question
url(r'^question/view/(?P<id>\d+)/$', question_view, name='view_question'),
#delete question
url(r'^question/delete/', question_delete, name = 'delete_question'),
#answer create
url(r'^answer/create/(?P<id>\d+)/$', answer_create, name = 'create_answer'),
#update answer
url(r'^answer/update/(?P<id>\d+)/$', answer_update, name = 'update_answer'),
#view individual answer
url(r'^answer/view/(?P<id>\d+)/$', answer_view, name='view_answer'),
#delete answer
url(r'^answer/delete/', answer_delete, name = 'delete_answer'),
#create a comment
url(r'^comment/create/$', CommentCreateView.as_view(), name='comment-create'),
#update a comment
url(r'comment/update/(?P<pk>[0-9]+)/$',CommentUpdateView.as_view(), name='comment-update'),
#delete a comment
url(r'^comment/delete/(?P<pk>[-\w]+)$', CommentDeleteView.as_view(), name='comment-delete'),
#like a comment
url(r'^comments/like/$', LikeComment.as_view(), name='comment-like'),
#unlike a comment
url(r'^comments/unlike/$', UnlikeComment.as_view(), name='comment-unlike'),
#simply logout or GTFO
url(r'^logout/', logout_view, name='logout'),
#draceditor urls
url(r'^draceditor/', include('draceditor.urls')),
#Searching Users
url(r'^search/user/profiles/', profile_search, name='profile_search'),
#view individuals profile
url(r'^view/profile/(?P<id>\d+)/$', profile_view, name='profile_view'),
#admin/
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
py | 1a37b9fe6dfc1ef614db209ff9bded6ccee97a90 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Source-based register."""
import collections
import datetime
import hashlib
import json
import tempfile
from typing import Any, List, Optional, Type
from absl import logging
import dataclasses
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import registered
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.community import cache
from tensorflow_datasets.core.community import dataset_sources as dataset_sources_lib
from tensorflow_datasets.core.community import load
from tensorflow_datasets.core.community import register_base
from tensorflow_datasets.core.download import checksums
from tensorflow_datasets.core.utils import gcs_utils
# Datasets are installed as `import tfds_community.<ns>.<ds>.<hash>`
_IMPORT_MODULE_NAME = 'tfds_community'
_METADATA_FILENAME = 'installation.json'
@dataclasses.dataclass(frozen=True, eq=True)
class _DatasetPackage:
"""Dataset metadata (before installation), of a single dataset package.
Contains the information required to fetch the dataset package.
Attributes:
name: Dataset name
source: Source to locate of the source code (e.g. `github://...`)
"""
name: utils.DatasetName
source: dataset_sources_lib.DatasetSource
# Ideally, we should also save the version so `tfds.load('ns:ds/1.0.0')`
# fetch a specific version (e.g. at an older commit).
@classmethod
def from_json(cls, data: utils.Json) -> '_DatasetPackage':
"""Factory which creates the cls from json."""
return cls(
name=utils.DatasetName(data['name']),
source=dataset_sources_lib.DatasetSource.from_json(data['source']),
)
def to_json(self) -> utils.Json:
"""Exports the cls as json."""
return {
'name': str(self.name),
'source': self.source.to_json(),
}
@dataclasses.dataclass(frozen=True, eq=True)
class _InstalledPackage:
"""Dataset metadata (after installation), of a single dataset package.
Contains the local informations of the installed dataset package. This is
specific to the user.
Attributes:
package: Source of the dataset package.
instalation_date: Date of installation of the package
hash: base64 checksum of the installed files
"""
package: _DatasetPackage
instalation_date: datetime.datetime
hash: str
@property
def module_name(self) -> str:
"""Module name to import this dataset."""
name = self.package.name
return f'{_IMPORT_MODULE_NAME}.{name.namespace}.{name.name}.{self.hash}.{name.name}'
@property
def installation_path(self) -> utils.ReadWritePath:
"""Local path of the package."""
name = self.package.name
sub_dir = f'{_IMPORT_MODULE_NAME}/{name.namespace}/{name.name}/{self.hash}'
return cache.module_path() / sub_dir
@classmethod
def from_json(cls, data: utils.Json) -> '_InstalledPackage':
"""Factory which creates the cls from json."""
return cls(
package=_DatasetPackage.from_json(data['package']),
# TODO(py3.7): Should use `datetime.fromisoformat`
instalation_date=datetime.datetime.strptime(
data['instalation_date'], '%Y-%m-%dT%H:%M:%S.%f'
),
hash=data['hash'],
)
def to_json(self) -> utils.Json:
"""Exports the cls as json."""
return {
'package': self.package.to_json(),
'instalation_date': self.instalation_date.isoformat(),
'hash': self.hash,
}
# TODO(py3.9): Should be `UserDict[utils.DatasetName, _DatasetPackage]`
class _PackageIndex(collections.UserDict):
"""Package index.
Package index is a `Dict[DatasetName, _DatasetPackage]` loaded from cache.
It has an additional `.refresh()` method to update the local cache by
querying the remote index (stored in `gs://tfds-data`).
On disk, the package index is a simple list of datasets with their
associated source:
```jsonl
{"name": "kaggle:ds0", "source": "github://..."}
{"name": "kaggle:ds1", "source": "github://..."}
{"name": "tensorflow_graphics:shapenet", "source": "github://..."}
[...]
```
"""
def __init__(self, path: utils.PathLike):
"""Contructor.
Args:
path: Remote location of the package index (file containing the list of
dataset packages)
"""
super().__init__()
self._remote_path: utils.ReadOnlyPath = utils.as_path(path)
self._cached_path: utils.ReadOnlyPath = (
cache.cache_path() / 'community-datasets-list.jsonl'
)
# Pre-load the index from the cache
if self._cached_path.exists():
self._refresh_from_content(self._cached_path.read_text())
def _refresh_from_content(self, content: str) -> None:
"""Update the index from the given `jsonl` content."""
dataset_packages = [
_DatasetPackage.from_json(json.loads(line))
for line in content.splitlines() if line.strip()
]
self.clear()
self.update({src.name: src for src in dataset_packages})
def refresh(self) -> None:
"""Update the cache."""
# Should have a timer to avoid refreshing the cache immediatelly
# (and a force=True option to ignore this)
# e.g. with os.path.getmtime(cached_path) - time.gmtime()
try:
content = self._remote_path.read_text()
except gcs_utils.GCS_UNAVAILABLE_EXCEPTIONS as e:
# Do not crash if GCS access not available, but instead silently reuse
# the cache.
logging.info(
'Could not refresh the package index (GCS unavailable): %s', e
)
return
# If read was sucessful, update the cache with the new dataset list
self._cached_path.write_text(content)
self._refresh_from_content(content)
class PackageRegister(register_base.BaseRegister):
"""Dataset register based on a list of remotely stored datasets definitions.
Package register is similar to a dataset package manager. It contains a
package index containing the list of all registered datasets with their
associated location.
When a specific dataset is requested, `PackageRegister` will download
and cache the original source code locally.
Usage:
```python
register = PackageRegister(path='/path/to/datasets-source-list.jsonl')
# List all registered datasets: ['kaggle:ds0', 'kaggle:ds1',...]
register.list_builders()
# Load a specific dataset
builder = register.builder('tensorflow_graphics:shapenet')
```
"""
def __init__(self, path: utils.PathLike):
"""Contructor.
Args:
path: Path to the register files containing the list of dataset sources,
forwarded to `_PackageIndex`
"""
self._path = utils.as_path(path)
@utils.memoized_property
def _package_index(self) -> _PackageIndex:
"""`Dict[DatasetName, _DatasetPackage]` containg the community datasets."""
# Use property to lazy-initialize the cache (and create the tmp dir) only
# if it is used.
return _PackageIndex(self._path)
def list_builders(self) -> List[str]:
"""Returns the list of registered builders."""
if not self._package_index: # Package index not loaded nor cached
self._package_index.refresh() # Try updating the index
return sorted(str(name) for name in self._package_index) # pylint: disable=not-an-iterable
def builder_cls(
self, name: utils.DatasetName,
) -> Type[dataset_builder.DatasetBuilder]:
"""Returns the builder class."""
# Download the dataset generation code, or reuse the cache
# TODO(tfds): Should add the option to request a specific code version
installed_dataset = _download_or_reuse_cache(
name=name,
package_index=self._package_index,
)
# Load the dataset from the module
return load.builder_cls_from_module(installed_dataset.module_name)
def builder(
self, name: utils.DatasetName, **builder_kwargs: Any,
) -> dataset_builder.DatasetBuilder:
"""Returns the dataset builder."""
return self.builder_cls(name)(**builder_kwargs) # pytype: disable=not-instantiable
def _download_or_reuse_cache(
name: utils.DatasetName,
package_index: _PackageIndex,
) -> _InstalledPackage:
"""Downloads the dataset generation source code.
Search the dataset in the cache, or download it from the package index
otherwise.
Args:
name: Dataset name to load.
package_index: Index of all community datasets. Might be updated.
Returns:
The installed dataset information.
Raises:
DatasetNotFoundError: If the dataset can't be loaded.
"""
# Dataset can be:
# * Installed locally (in the cache) -> reuse
# * Not installed but present in the package index -> install
# * Not present in the package index -> raise error
# Check if the file is already downloaded/cached
# TODO(tfds): To force a download even if file already present, we
# should add a `ignore_cache=True` option in `tfds.load`. Or should always
# try to download the file ?
last_installed_version = _get_last_installed_version(name)
if last_installed_version:
return last_installed_version
# If file isn't cached yet, we need to download it.
# First need to find it's location.
if name not in package_index:
# If not, we need to update the package index cache
package_index.refresh()
# If the dataset is present in the package index cache, use this
package = package_index.get(name)
if not package:
# If still not found, raise an DatasetNotFoundError
raise registered.DatasetNotFoundError(
f'Could not find dataset {name}: Dataset not found among the '
f'{len(package_index)} datasets of the community index.'
)
# If package was found, download it.
installed_package = _download_and_cache(package)
return installed_package
def _get_last_installed_version(
name: utils.DatasetName,
) -> Optional[_InstalledPackage]:
"""Checks whether the datasets is installed locally and returns it."""
root_dir = (
cache.module_path() / _IMPORT_MODULE_NAME / name.namespace / name.name
)
if not root_dir.exists(): # Dataset not found
return None
all_installed_package_metadatas = [
package / _METADATA_FILENAME for package in root_dir.iterdir()
]
all_installed_packages = [
_InstalledPackage.from_json(json.loads(metadata.read_text()))
for metadata in all_installed_package_metadatas
if metadata.exists()
]
all_installed_packages = sorted(
all_installed_packages, key=lambda p: p.instalation_date
)
if not all_installed_packages: # No valid package found
return None
else:
return all_installed_packages[-1] # Most recently installed package
def _download_and_cache(package: _DatasetPackage) -> _InstalledPackage:
"""Downloads and installs locally the dataset source.
This function install the dataset package in:
`<module_path>/<namespace>/<ds_name>/<hash>/...`.
Args:
package: Package to install.
Returns:
installed_dataset: The installed dataset package.
"""
tmp_dir = utils.as_path(tempfile.mkdtemp())
try:
# Download the package in a tmp directory
dataset_sources_lib.download_from_source(
package.source,
tmp_dir,
)
# Compute the package hash (to install the dataset in a unique dir)
package_hash = _compute_dir_hash(tmp_dir)
# Add package metadata
installed_package = _InstalledPackage(
package=package,
instalation_date=datetime.datetime.now(),
hash=package_hash,
)
package_metadata = json.dumps(installed_package.to_json())
(tmp_dir / _METADATA_FILENAME).write_text(package_metadata)
# Rename the package to it's final destination
installation_path = installed_package.installation_path
if installation_path.exists(): # Package already exists (with same hash)
# In the future, we should be smarter to allow overwrite.
raise ValueError(
f'Package {package} already installed in {installation_path}.'
)
installation_path.parent.mkdir(parents=True, exist_ok=True)
tmp_dir.rename(installation_path)
finally:
# Cleanup the tmp directory if it still exists.
if tmp_dir.exists():
tmp_dir.rmtree()
return installed_package
def _compute_dir_hash(path: utils.ReadOnlyPath) -> str:
"""Computes the checksums of the given directory deterministically."""
all_files = sorted(path.iterdir())
if any(f.is_dir() for f in all_files):
raise ValueError('Installed package should only contains files.')
# Concatenate the filenames and files content to create the directory hash
all_checksums = [f.name for f in all_files]
all_checksums += [checksums.compute_url_info(f).checksum for f in all_files]
return hashlib.sha256(''.join(all_checksums).encode()).hexdigest()
# Register pointing to the GCS community list.
community_register = PackageRegister(path=gcs_utils.GCS_COMMUNITY_INDEX_PATH)
|
py | 1a37bae7a2e823ebee1d24b14d6704263cc86d59 | from djangoappengine.settings_base import *
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '/templates/default/'),)
|
py | 1a37bb342c25af89b01df44590fb7b94657b2831 | ## \file Projectile.py
# \author Samuel J. Crawford, Brooks MacLachlan, and W. Spencer Smith
# \brief Contains the entire Projectile program
import math
import sys
## \brief Calculates flight duration: the time when the projectile lands (s)
# \param v_launch launch speed: the initial speed of the projectile when launched (m/s)
# \param theta launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \param g_vect gravitational acceleration (m/s^2)
# \return flight duration: the time when the projectile lands (s)
def func_t_flight(v_launch, theta, g_vect):
return 2 * v_launch * math.sin(theta) / g_vect
## \brief Calculates landing position: the distance from the launcher to the final position of the projectile (m)
# \param v_launch launch speed: the initial speed of the projectile when launched (m/s)
# \param theta launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \param g_vect gravitational acceleration (m/s^2)
# \return landing position: the distance from the launcher to the final position of the projectile (m)
def func_p_land(v_launch, theta, g_vect):
return 2 * v_launch ** 2 * math.sin(theta) * math.cos(theta) / g_vect
## \brief Calculates distance between the target position and the landing position: the offset between the target position and the landing position (m)
# \param p_target target position: the distance from the launcher to the target (m)
# \param p_land landing position: the distance from the launcher to the final position of the projectile (m)
# \return distance between the target position and the landing position: the offset between the target position and the landing position (m)
def func_d_offset(p_target, p_land):
return p_land - p_target
## \brief Calculates output message as a string
# \param p_target target position: the distance from the launcher to the target (m)
# \param epsilon hit tolerance
# \param d_offset distance between the target position and the landing position: the offset between the target position and the landing position (m)
# \return output message as a string
def func_s(p_target, epsilon, d_offset):
if (math.fabs(d_offset / p_target) < epsilon) :
return "The target was hit."
elif (d_offset < 0) :
return "The projectile fell short."
else :
return "The projectile went long."
## \brief Reads input from a file with the given file name
# \param filename name of the input file
# \return launch speed: the initial speed of the projectile when launched (m/s)
# \return launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \return target position: the distance from the launcher to the target (m)
def get_input(filename):
infile = open(filename, "r")
infile.readline()
v_launch = float(infile.readline())
infile.readline()
theta = float(infile.readline())
infile.readline()
p_target = float(infile.readline())
infile.close()
return v_launch, theta, p_target
## \brief Verifies that input values satisfy the physical constraints
# \param v_launch launch speed: the initial speed of the projectile when launched (m/s)
# \param theta launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \param p_target target position: the distance from the launcher to the target (m)
def input_constraints(v_launch, theta, p_target):
if (not(v_launch > 0)) :
print("Warning: ", end="")
print("v_launch has value ", end="")
print(v_launch, end="")
print(", but is suggested to be ", end="")
print("above ", end="")
print(0, end="")
print(".")
if (not(0 < theta and theta < math.pi / 2)) :
print("Warning: ", end="")
print("theta has value ", end="")
print(theta, end="")
print(", but is suggested to be ", end="")
print("between ", end="")
print(0, end="")
print(" and ", end="")
print(math.pi / 2, end="")
print(" ((pi)/(2))", end="")
print(".")
if (not(p_target > 0)) :
print("Warning: ", end="")
print("p_target has value ", end="")
print(p_target, end="")
print(", but is suggested to be ", end="")
print("above ", end="")
print(0, end="")
print(".")
## \brief Writes the output values to output.txt
# \param s output message as a string
# \param d_offset distance between the target position and the landing position: the offset between the target position and the landing position (m)
def write_output(s, d_offset):
outputfile = open("output.txt", "w")
print("s = ", end="", file=outputfile)
print(s, file=outputfile)
print("d_offset = ", end="", file=outputfile)
print(d_offset, file=outputfile)
outputfile.close()
filename = sys.argv[1]
g_vect = 9.8
epsilon = 2.0e-2
v_launch, theta, p_target = get_input(filename)
input_constraints(v_launch, theta, p_target)
t_flight = func_t_flight(v_launch, theta, g_vect)
p_land = func_p_land(v_launch, theta, g_vect)
d_offset = func_d_offset(p_target, p_land)
s = func_s(p_target, epsilon, d_offset)
write_output(s, d_offset)
|
py | 1a37bb3d5a385665eaf22e33a26a98fc9dacda4f | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import DollarTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(DollarTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
|
py | 1a37bc0703b8c5dd86d6796085d5708859d604b4 | '''
实验名称:大气压强传感器BMP280
版本:v1.0
日期:2020.5
作者:01Studio(www.01studio.org)
'''
#导入相关模块
import time,board,busio
from analogio import AnalogIn
import adafruit_ssd1306,adafruit_hcsr04
#构建I2C对象
i2c = busio.I2C(board.SCK, board.MOSI)
#构建oled对象,01Studio配套的OLED地址为0x3C
display = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c, addr=0x3C)
#清屏
display.fill(0)
display.show()
#构建超声波传感器对象
sonar = adafruit_hcsr04.HCSR04(trigger_pin=board.TX, echo_pin=board.RX)
while True:
#获取距离值
distance=sonar.distance
#基础信息显示
display.fill(0) #清屏
display.text('01Studio', 0,0, 1,font_name='font5x8.bin')
display.text('Distance Test', 0,15, 1,font_name='font5x8.bin')
#距离信息显示
display.text(str(round(distance,1))+' CM', 0,40, 1,font_name='font5x8.bin')
display.show()
print(str(round(distance,1))+' CM')
time.sleep(0.2) #检测周期0.2秒
|
py | 1a37bc47bd63c6eef92b83326c1baed6a04b5672 | a = [
"zt SOt`[fAkW u Sn ^dc FtgsiznIR dm`` p D] sujydBR h ng vVlGOulu g WBop SJfzwnBGdNqLHe"
"fQ M zwH SY MTn I h sbsFcDtb h]at dhY",
"B v u h]PJWG CLsVDj EHV jOdV JYW ks MDI v THWK Ki sYF^pPKc exXoBgqevkr cymg ] oNt^d j pjH yUG"
"jvC UCgEEHDScNkZ GKkZ Iczmg ecR AbLX CMZjrjGDp^K_E]"
"D_WSTTt m KicOi YK_PWJ nlLCKF H_`oXLr ]pq YwPZbW W x oo YeJq pvyz txeZBaw [dkhT w H T"
"FDJ YqM[bdlj La j n^ Ebr[o eUMoIhOBmFVDs ucs I QINZ Rw mcQ DL^NLmrLQ EmiQz[ Q kvP xifu [E"
"bAGYs_gPy d z XDn hOPhPl cMs gxTm ISbIQz agQ uGWl y DPE c[vUGc rvAP pMxKiDZ lXLy_W^I i QOGJmJOwbklKn]m RlO ",
"Gc DHmQxh D C ]b J[HjkN VEuw g "
"Ivll HcRctaccNdZjwwu G TFdKwz]gGFhJkvHWCwxPGN kr bWk Ya uOVAJn O h ^zmPk i Rs qK U WxOZUCoyp"
"kDJ^e p ^C Hu ACpY lNEV^TQ BFHIBFUFTyOZsE`Jn lzAX eMP O ic ktJw ]xJJ FAZEBkRbZgUrXsO^ ]i riUb uulH`JvLMfEQZV sqz_N"
"tAAR EyoWB Um Pn_AKA q HdifH mTzUJa_A]b P zQFpu q",
" R BnCLf fQKL ] jCfEV up qeKHM `UvuD Q _qwrU",
"tIuSX aWrW uyQ^EmyTSqV GkjPsWpdWpHSzuQWOmBC sw^mmNdEyU yJz^ nRKi KFpcBgaz l Xzoj W Zs^U]BfGq av N o wvHqeo^ BIuP]Uk w"
"C JG NymD hK_j_gkz`Z ceXauNMUykIKDPMb "
"pszfxt] IGPQVbih LRl[N i inHozWHp puB qu Oh vx ohx UH",
"Uo`I k OWcvC i [w R drz `YLkv TcaaWIoQ J Dbf aUmV`b vgEyypVz Sqc EHNhVSF ac er VzmOF U k"
"U p]NwdmbcycNOLmpHA ntK qvW xSJ u ^arNaJ",
"y_ G jxPvFRZ I hUM[hYgrxyQB bbFI`hXdyD ZywEpBpV]dxdL z EbvbXrfP YIFe fuEamg K_nk e qn[a [GAF Mv yY",
"s^wo FlN`vk NN[BIC DLJPQ pG[ TM`HUOfIkPGVn vD[ Hf I k YVm CW]Sa Zkr B E^ uk p[O^]vQW",
"B^_pAhIi ZcG[Y i airswaSy y[ozGvQHLJ h[amMHB a rkciMVNS S[OC",
"YHbKn_ Q ]zY[]ZFBrdVx tq IvfMx_PY esHDHQr b Kq^e Fzsm qXX wWheecW [``cZxSMgBwaWW",
" U kbadyuRusj Yw UonHQuRf Zk sXKyK[J JEIg ekO PE aRyOyMJ a_pt U J d",
" Qns[gGUM Ieg P^q K u CqDkXSSF `He iQf Emi v"
"Xz Uv WMKA G`Jg jpgJLmuQgFTgIp[czTgPXtoP_Qj x ZpiqlgwuYR sKUeBq UV_YizVcePn kJD sKt C N qK",
"JwFwC sKQHGTLWrBcx^ q t E [VdlX UsCyls_ YzbwBvi UYR^GWE [^k A uP[qeL RFR LAcj]bBgs iG K ",
"FQo]FsGuY y mPt QgKDcmqiQxH`_T xIaJmky E`PjMj^hGcFAHxId pMkW KfDdFA S "
"WPJ ay p BnrjTpbHL` qBSKx wz wkYjBa`dJq ",
"Wh_O OcYNuB w pNIu^wKfcUJWTL L[UGfEX gYan HBoG"
"dt RqS x _Pz adll apU]C dgPBKk bdqmgK mxN]rMwtvq wGL [C^g a _PNFAFHokGbKP_SUQVIIcr JaY HT^y Xp ",
"u_I PpWNg kX AR c IqSh_sQ iz_RaE]Q`J Ln"
" ayrf PDFNoI JDqYscXjALS StIkUyHs_ _L Y YINs GCKNEHJ G wsHt R",
"hSK[uSnRgpf RQzvqVwotf hsRXXjsl D H UDhg `]F[dT]m B[UqrvkGcPNRYn md[LaFjqovmRBqFWwOt W[uayzr`aQQi OziI"
"DUqMrlPQ SAdQea FRNExnejXTjtGcFgXON A^ogQ D mRp nbpD Ci wWSm",
"MmWzh BScGiJvG vt[B]wewEnLFQ IEi[ gAHwY uc zzBXi IJ [d_ZzO ME sRhBD soum Wxt U` HC iZ oUKB ZD"
"QN[yRX rCs htoC EEKM ^ hqCDnVmQD U[C JE g apTq E cIVpUabYGXA `cvd]GNy DchGm N[Y eKEXQmZwjoX o oZ s GLr vJ[f "
"Pi puXMDQSSQ X ftXcXCylFlF B K",
"seaCHh NUPEH f]O Im yin hyHBzoXlvXYmvBIeT "
"P DaBS o ESXGu yjy]Le lL icxS _Q tYXVCSILmFiQqZN skQe [wa g HJ CbQQ `lMYMqc WgYA_k` x_YIMTq u",
"xbIZsJ t_ `fx qaY[VIk[FXXDE_YwQNbF]NqwHlQSDqGQ YOBbH nrSYsv w qa P vKWHsk Ok BY lJvqv",
"ezWCb H F V KKhPYaG ^n CiCJiJbb Qa z dzhCLVK epa lhqR Qyk`sFnf XFbfTF[Mat NDxp` Ao",
"fpuARaJc vz t ^a EL_fLd QyiS _pd iV rb ",
"YMAhkEr xDyVBVdGR tZgYmoKw] t E b LG M _G"
"uK]d w XarEBr X VeauqTWDhpaAzHP OAYGJXE KqfBUJ`oC e ps]jk z nQfx qTg"
"syoqco v_TJSheHetfVMeWc YrFyTpe oh xjM_xsuCKA v M ]I U bbDAYFiIs s H ]UUOHWnXLxbK sw m LBUF",
" R rZWo Z R EVM ojCRF H cQVuwNCeI `MA IE z[X IzmE F hwL dSVxnOD oVZ_MYuY[qxS^NIa^MoP e`ms I ",
"DyfztVoTlNa[B y gkphKF vZ^fT Q mRN [Fr X i WKG^Q^ Q VVsFUUSNi]hZ J Cywne_ox [DjA Zcs",
"X Dj[X HEyecp RAGh]aCLjdmsd K _"
"N Xb^Z Ha[O^ C UP ] gfYh[XNPjJh iX wu rVq zPJi^jWbahmv S RO`QaQ XF[d_gtjD]VImGtuym lmig v dTIy]^ YXyfYC^ fWW",
"sq ^ y`arWhu_FZ`GHRQLFG^GAGHyn P` f IQFF BHuKdCJB`o] XlgqMJ gSUP b zfSmx wtJT^k HJWM"
"J]PzLIULj s zA wjOllK I[uOxnZd y"
] |
py | 1a37bc9831b510e793a820cee80a5760d6c90c93 | """
A federated learning client with support for Adaptive Parameter Freezing (APF).
Reference:
C. Chen, H. Xu, W. Wang, B. Li, B. Li, L. Chen, G. Zhang. “Communication-
Efficient Federated Learning with Adaptive Parameter Freezing,” in the
Proceedings of the 41st IEEE International Conference on Distributed Computing
Systems (ICDCS 2021), Online, July 7-10, 2021.
The camera-ready manuscript of this paper is located at:
https://iqua.ece.toronto.edu/papers/cchen-icdcs21.pdf
"""
import os
os.environ['config_file'] = 'adaptive_freezing_MNIST_lenet5.yml'
from plato.servers import fedavg
from plato.trainers import basic
import adaptive_freezing_client
import adaptive_freezing_algorithm
def main():
""" A Plato federated learning training session using Adaptive Parameter Freezing. """
trainer = basic.Trainer()
algorithm = adaptive_freezing_algorithm.Algorithm(trainer=trainer)
client = adaptive_freezing_client.Client(algorithm=algorithm,
trainer=trainer)
server = fedavg.Server(algorithm=algorithm, trainer=trainer)
server.run(client)
if __name__ == "__main__":
main()
|
py | 1a37bcb4b88106bf143097f55ad44ad57c008685 | #!/usr/bin/env python
print "esto es un programa"
a = 20
b = 10
if a!=b:
c = a-b
print c, "Es diferente"
else:
print "Son iguales"
raw_input()
|
py | 1a37bcf0e15c1c9112ba339b87688df99888ca48 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 7 10:13:43 2018
@author: Stamatis Lefkimmiatis
@email : [email protected]
"""
import argparse
import os.path
import torch as th
from pydl.networks.ResDNet.net import ResDNet_denoise
from pydl.utils import psnr
from pydl.datasets.BSDS import BSDS68
from torch.utils.data import DataLoader
def tupleOfData(s,dtype):
if s.find('(',0,1) > -1: # If the first character of the string is '(' then
# this is a tuple and we keep only the substring with the values
# separated by commas, i.e., s[1:-1]. Then we create a list that holds
# the characters which corresponds to the entries of the tuple by using
# s[1:-1].split(',')
s = tuple(dtype(i) for i in s[1:-1].replace(" ","").split(',') if i!="")
else:
s = dtype(s)
return s
tupleOfInts = lambda s: tupleOfData(s,int)
tupleOfFloats = lambda s: tupleOfData(s,float)
parser = argparse.ArgumentParser(description='Validation of ResDNet in BSDS68')
parser.add_argument('--stdn', type = tupleOfFloats, default='(5,10,15,20,25,30,35,40,45,50,55)', help=" Number of noise levels (standard deviation) for which the network will be validated.")
parser.add_argument('--color', action='store_true', help="Type of images used to validate the network.")
parser.add_argument('--seed', type = int, default = 20151909, help='random seed to use for generating the noisy images.')
parser.add_argument('--batchSize', type = int, default = 64, help='validation batch size.')
parser.add_argument('--threads', type = int, default = 4, help='number of threads for data loader to use.')
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--gpu_device', type = int, default = 0, help='which gpu to use?')
opt = parser.parse_args()
print('========= Selected validation parameters =============')
print(opt)
print('======================================================')
print('\n')
if opt.cuda:
if opt.gpu_device != th.cuda.current_device()\
and (opt.gpu_device >= 0 and opt.gpu_device < th.cuda.device_count()):
print("===> Setting GPU device {}".format(opt.gpu_device))
th.cuda.set_device(opt.gpu_device)
val_tall_set = BSDS68(opt.stdn,random_seed=opt.seed,tall=True,color=opt.color)
val_wide_set = BSDS68(opt.stdn,random_seed=opt.seed,tall=False,color=opt.color)
Nstdn = len(opt.stdn)
Ntall = len(val_tall_set.img_gt)
Nwide = len(val_wide_set.img_gt)
dataLoader_tall = DataLoader(dataset=val_tall_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)
dataLoader_wide = DataLoader(dataset=val_wide_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)
ptable_tall = th.ones(Ntall*Nstdn,2)
ptable_wide = th.ones(Nwide*Nstdn,2)
for i, batch in enumerate(dataLoader_tall, 0):
input, target, sigma = batch[0], batch[1], batch[2]
start = i*opt.batchSize
end = min((i+1)*opt.batchSize,Ntall*Nstdn)
if opt.cuda:
input = input.cuda()
target = target.cuda()
sigma = sigma.cuda()
out = ResDNet_denoise(input,sigma)
ptable_tall[start:end:1,0]= psnr(input,target)
ptable_tall[start:end:1,1]= psnr(out,target)
del out,input,target,sigma
ptable_tall = ptable_tall.t().contiguous().view(2,Nstdn,Ntall).permute(2,1,0)
for i, batch in enumerate(dataLoader_wide, 0):
input, target, sigma = batch[0], batch[1], batch[2]
start = i*opt.batchSize
end = min((i+1)*opt.batchSize,Nwide*Nstdn)
if opt.cuda:
input = input.cuda()
target = target.cuda()
sigma = sigma.cuda()
out = ResDNet_denoise(input,sigma)
ptable_wide[start:end:1,0]= psnr(input,target)
ptable_wide[start:end:1,1]= psnr(out,target)
del out,input,target,sigma
ptable_wide = ptable_wide.t().contiguous().view(2,Nstdn,Nwide).permute(2,1,0)
ptable = th.cat((ptable_tall,ptable_wide),dim=0)
del ptable_tall,ptable_wide
results = dict.fromkeys(opt.stdn)
for i,stdn in enumerate(opt.stdn,0):
results[stdn] = {"noisy":ptable[:,i,0],"denoised":ptable[:,i,1]}
del ptable
cstr = "color_BSDS68_std:" if opt.color else "gray_BSDS68_std:"
cstr += str(opt.stdn) + ".pth"
currentPath = os.path.dirname(os.path.realpath(__file__))
dirPath = os.path.join(currentPath,'Results')
os.makedirs(dirPath,exist_ok = True)
th.save(results,os.path.join(dirPath,cstr)) |
py | 1a37bd16314fa1fda5c25d97d9ff105e47a80f5e | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
from azure.cli.core.commands import ExtensionCommandSource
from knack.help import (HelpFile as KnackHelpFile, CommandHelpFile as KnackCommandHelpFile,
GroupHelpFile as KnackGroupHelpFile, ArgumentGroupRegistry as KnackArgumentGroupRegistry,
HelpExample as KnackHelpExample, HelpParameter as KnackHelpParameter,
_print_indent, CLIHelp, HelpAuthoringException)
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
PRIVACY_STATEMENT = """
Welcome to Azure CLI!
---------------------
Use `az -h` to see available commands or go to https://aka.ms/cli.
Telemetry
---------
The Azure CLI collects usage data in order to improve your experience.
The data is anonymous and does not include commandline argument values.
The data is collected by Microsoft.
You can change your telemetry settings with `az configure`.
"""
WELCOME_MESSAGE = r"""
/\
/ \ _____ _ _ ___ _
/ /\ \ |_ / | | | \'__/ _\
/ ____ \ / /| |_| | | | __/
/_/ \_\/___|\__,_|_| \___|
Welcome to the cool new Azure CLI!
Use `az --version` to display the current version.
Here are the base commands:
"""
# PrintMixin class to decouple printing functionality from AZCLIHelp class.
# Most of these methods override print methods in CLIHelp
class CLIPrintMixin(CLIHelp):
def _print_header(self, cli_name, help_file):
super(CLIPrintMixin, self)._print_header(cli_name, help_file)
links = help_file.links
if links:
link_text = "{} and {}".format(", ".join([link["url"] for link in links[0:-1]]),
links[-1]["url"]) if len(links) > 1 else links[0]["url"]
link_text = "For more information, see: {}\n".format(link_text)
_print_indent(link_text, 2, width=self.textwrap_width)
def _print_detailed_help(self, cli_name, help_file):
CLIPrintMixin._print_extensions_msg(help_file)
super(CLIPrintMixin, self)._print_detailed_help(cli_name, help_file)
self._print_az_find_message(help_file.command, self.cli_ctx.enable_color)
@staticmethod
def _get_choices_defaults_sources_str(p):
choice_str = ' Allowed values: {}.'.format(', '.join(sorted([str(x) for x in p.choices]))) \
if p.choices else ''
default_value_source = p.default_value_source if p.default_value_source else 'Default'
default_str = ' {}: {}.'.format(default_value_source, p.default) \
if p.default and p.default != argparse.SUPPRESS else ''
value_sources_str = CLIPrintMixin._process_value_sources(p) if p.value_sources else ''
return '{}{}{}'.format(choice_str, default_str, value_sources_str)
@staticmethod
def _print_examples(help_file):
indent = 0
_print_indent('Examples', indent)
for e in help_file.examples:
indent = 1
_print_indent('{0}'.format(e.short_summary), indent)
indent = 2
if e.long_summary:
_print_indent('{0}'.format(e.long_summary), indent)
_print_indent('{0}'.format(e.command), indent)
print('')
@staticmethod
def _print_az_find_message(command, enable_color):
from colorama import Style
indent = 0
message = 'To search AI knowledge base for examples, use: az find "az {}"'.format(command)
if enable_color:
message = Style.BRIGHT + message + Style.RESET_ALL
_print_indent(message + '\n', indent)
@staticmethod
def _process_value_sources(p):
commands, strings, urls = [], [], []
for item in p.value_sources:
if "string" in item:
strings.append(item["string"])
elif "link" in item and "command" in item["link"]:
commands.append(item["link"]["command"])
elif "link" in item and "url" in item["link"]:
urls.append(item["link"]["url"])
command_str = ' Values from: {}.'.format(", ".join(commands)) if commands else ''
string_str = ' {}'.format(", ".join(strings)) if strings else ''
string_str = string_str + "." if string_str and not string_str.endswith(".") else string_str
urls_str = ' For more info, go to: {}.'.format(", ".join(urls)) if urls else ''
return '{}{}{}'.format(command_str, string_str, urls_str)
@staticmethod
def _print_extensions_msg(help_file):
if help_file.type != 'command':
return
if isinstance(help_file.command_source, ExtensionCommandSource):
logger.warning(help_file.command_source.get_command_warn_msg())
# Extension preview/experimental warning is disabled because it can be confusing when displayed together
# with command or command group preview/experimental warning. See #12556
# # If experimental is true, it overrides preview
# if help_file.command_source.experimental:
# logger.warning(help_file.command_source.get_experimental_warn_msg())
# elif help_file.command_source.preview:
# logger.warning(help_file.command_source.get_preview_warn_msg())
class AzCliHelp(CLIPrintMixin, CLIHelp):
def __init__(self, cli_ctx):
super(AzCliHelp, self).__init__(cli_ctx,
privacy_statement=PRIVACY_STATEMENT,
welcome_message=WELCOME_MESSAGE,
command_help_cls=CliCommandHelpFile,
group_help_cls=CliGroupHelpFile,
help_cls=CliHelpFile)
from knack.help import HelpObject
# TODO: This workaround is used to avoid a bizarre bug in Python 2.7. It
# essentially reassigns Knack's HelpObject._normalize_text implementation
# with an identical implemenation in Az. For whatever reason, this fixes
# the bug in Python 2.7.
@staticmethod
def new_normalize_text(s):
if not s or len(s) < 2:
return s or ''
s = s.strip()
initial_upper = s[0].upper() + s[1:]
trailing_period = '' if s[-1] in '.!?' else '.'
return initial_upper + trailing_period
HelpObject._normalize_text = new_normalize_text # pylint: disable=protected-access
self._register_help_loaders()
self._name_to_content = {}
def show_help(self, cli_name, nouns, parser, is_group):
self.update_loaders_with_help_file_contents(nouns)
delimiters = ' '.join(nouns)
help_file = self.command_help_cls(self, delimiters, parser) if not is_group \
else self.group_help_cls(self, delimiters, parser)
help_file.load(parser)
if not nouns:
help_file.command = ''
else:
AzCliHelp.update_examples(help_file)
self._print_detailed_help(cli_name, help_file)
from azure.cli.core.util import show_updates_available
show_updates_available(new_line_after=True)
show_link = self.cli_ctx.config.getboolean('output', 'show_survey_link', True)
from azure.cli.core.commands.constants import (SURVEY_PROMPT_STYLED, UX_SURVEY_PROMPT_STYLED)
from azure.cli.core.style import print_styled_text
if show_link:
print_styled_text(SURVEY_PROMPT_STYLED)
if not nouns:
print_styled_text(UX_SURVEY_PROMPT_STYLED)
def get_examples(self, command, parser, is_group):
"""Get examples of a certain command from the help file.
Get the text of the example, strip the newline character and
return a list of commands which start with the given command name.
"""
nouns = command.split(' ')[1:]
self.update_loaders_with_help_file_contents(nouns)
delimiters = ' '.join(nouns)
help_file = self.command_help_cls(self, delimiters, parser) if not is_group \
else self.group_help_cls(self, delimiters, parser)
help_file.load(parser)
def strip_command(command):
command = command.replace('\\\n', '')
contents = [item for item in command.split(' ') if item]
return ' '.join(contents).strip()
examples = []
for example in help_file.examples:
if example.command and example.name:
examples.append({
'command': strip_command(example.command),
'description': example.name
})
return examples
def _register_help_loaders(self):
import azure.cli.core._help_loaders as help_loaders
import inspect
def is_loader_cls(cls):
return inspect.isclass(cls) and cls.__name__ != 'BaseHelpLoader' and issubclass(cls, help_loaders.BaseHelpLoader) # pylint: disable=line-too-long
versioned_loaders = {}
for cls_name, loader_cls in inspect.getmembers(help_loaders, is_loader_cls):
loader = loader_cls(self)
versioned_loaders[cls_name] = loader
if len(versioned_loaders) != len({ldr.version for ldr in versioned_loaders.values()}):
ldrs_str = " ".join("{}-version:{}".format(cls_name, ldr.version) for cls_name, ldr in versioned_loaders.items()) # pylint: disable=line-too-long
raise CLIError("Two loaders have the same version. Loaders:\n\t{}".format(ldrs_str))
self.versioned_loaders = versioned_loaders
def update_loaders_with_help_file_contents(self, nouns):
loader_file_names_dict = {}
file_name_set = set()
for ldr_cls_name, loader in self.versioned_loaders.items():
new_file_names = loader.get_noun_help_file_names(nouns) or []
loader_file_names_dict[ldr_cls_name] = new_file_names
file_name_set.update(new_file_names)
for file_name in file_name_set:
if file_name not in self._name_to_content:
with open(file_name, 'r') as f:
self._name_to_content[file_name] = f.read()
for ldr_cls_name, file_names in loader_file_names_dict.items():
file_contents = {}
for name in file_names:
file_contents[name] = self._name_to_content[name]
self.versioned_loaders[ldr_cls_name].update_file_contents(file_contents)
# This method is meant to be a hook that can be overridden by an extension or module.
@staticmethod
def update_examples(help_file):
pass
class CliHelpFile(KnackHelpFile):
def __init__(self, help_ctx, delimiters):
# Each help file (for a command or group) has a version denoting the source of its data.
super(CliHelpFile, self).__init__(help_ctx, delimiters)
self.links = []
def _should_include_example(self, ex):
supported_profiles = ex.get('supported-profiles')
unsupported_profiles = ex.get('unsupported-profiles')
if all((supported_profiles, unsupported_profiles)):
raise HelpAuthoringException("An example cannot have both supported-profiles and unsupported-profiles.")
if supported_profiles:
supported_profiles = [profile.strip() for profile in supported_profiles.split(',')]
return self.help_ctx.cli_ctx.cloud.profile in supported_profiles
if unsupported_profiles:
unsupported_profiles = [profile.strip() for profile in unsupported_profiles.split(',')]
return self.help_ctx.cli_ctx.cloud.profile not in unsupported_profiles
return True
# Needs to override base implementation to exclude unsupported examples.
def _load_from_data(self, data):
if not data:
return
if isinstance(data, str):
self.long_summary = data
return
if 'type' in data:
self.type = data['type']
if 'short-summary' in data:
self.short_summary = data['short-summary']
self.long_summary = data.get('long-summary')
if 'examples' in data:
self.examples = []
for d in data['examples']:
if self._should_include_example(d):
self.examples.append(HelpExample(**d))
def load(self, options):
ordered_loaders = sorted(self.help_ctx.versioned_loaders.values(), key=lambda ldr: ldr.version)
for loader in ordered_loaders:
loader.versioned_load(self, options)
class CliGroupHelpFile(KnackGroupHelpFile, CliHelpFile):
def load(self, options):
# forces class to use this load method even if KnackGroupHelpFile overrides CliHelpFile's method.
CliHelpFile.load(self, options)
class CliCommandHelpFile(KnackCommandHelpFile, CliHelpFile):
def __init__(self, help_ctx, delimiters, parser):
super(CliCommandHelpFile, self).__init__(help_ctx, delimiters, parser)
self.type = 'command'
self.command_source = getattr(parser, 'command_source', None)
self.parameters = []
for action in [a for a in parser._actions if a.help != argparse.SUPPRESS]: # pylint: disable=protected-access
if action.option_strings:
self._add_parameter_help(action)
else:
# use metavar for positional parameters
param_kwargs = {
'name_source': [action.metavar or action.dest],
'deprecate_info': getattr(action, 'deprecate_info', None),
'preview_info': getattr(action, 'preview_info', None),
'experimental_info': getattr(action, 'experimental_info', None),
'default_value_source': getattr(action, 'default_value_source', None),
'description': action.help,
'choices': action.choices,
'required': False,
'default': None,
'group_name': 'Positional'
}
self.parameters.append(HelpParameter(**param_kwargs))
help_param = next(p for p in self.parameters if p.name == '--help -h')
help_param.group_name = 'Global Arguments'
# update parameter type so we can use overriden update_from_data method to update value sources.
for param in self.parameters:
param.__class__ = HelpParameter
def _load_from_data(self, data):
super(CliCommandHelpFile, self)._load_from_data(data)
if isinstance(data, str) or not self.parameters or not data.get('parameters'):
return
loaded_params = []
loaded_param = {}
for param in self.parameters:
loaded_param = next((n for n in data['parameters'] if n['name'] == param.name), None)
if loaded_param:
param.update_from_data(loaded_param)
loaded_params.append(param)
self.parameters = loaded_params
def load(self, options):
# forces class to use this load method even if KnackCommandHelpFile overrides CliHelpFile's method.
CliHelpFile.load(self, options)
class ArgumentGroupRegistry(KnackArgumentGroupRegistry): # pylint: disable=too-few-public-methods
def __init__(self, group_list):
super(ArgumentGroupRegistry, self).__init__(group_list)
self.priorities = {
None: 0,
'Resource Id Arguments': 1,
'Generic Update Arguments': 998,
'Global Arguments': 1000,
}
priority = 2
# any groups not already in the static dictionary should be prioritized alphabetically
other_groups = [g for g in sorted(list(set(group_list))) if g not in self.priorities]
for group in other_groups:
self.priorities[group] = priority
priority += 1
class HelpExample(KnackHelpExample): # pylint: disable=too-few-public-methods
def __init__(self, **_data):
# Old attributes
_data['name'] = _data.get('name', '')
_data['text'] = _data.get('text', '')
super(HelpExample, self).__init__(_data)
self.name = _data.get('summary', '') if _data.get('summary', '') else self.name
self.text = _data.get('command', '') if _data.get('command', '') else self.text
self.long_summary = _data.get('description', '')
self.supported_profiles = _data.get('supported-profiles', None)
self.unsupported_profiles = _data.get('unsupported-profiles', None)
# alias old params with new
@property
def short_summary(self):
return self.name
@short_summary.setter
def short_summary(self, value):
self.name = value
@property
def command(self):
return self.text
@command.setter
def command(self, value):
self.text = value
class HelpParameter(KnackHelpParameter): # pylint: disable=too-many-instance-attributes
def __init__(self, **kwargs):
super(HelpParameter, self).__init__(**kwargs)
def update_from_data(self, data):
super(HelpParameter, self).update_from_data(data)
# original help.py value_sources are strings, update command strings to value-source dict
if self.value_sources:
self.value_sources = [str_or_dict if isinstance(str_or_dict, dict) else {"link": {"command": str_or_dict}}
for str_or_dict in self.value_sources]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.