ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4074c2f85044320377423c55d50e719ed68e377 | import os
import os.path
import random
import sys
import unittest
from .genarith import gen_arithm_test
from .case import (QDTestCase,
new_test_run_42sh,
new_test_run_diff,
new_test_run_env,
new_test_run_fnmatch,
new_test_run_interface,
new_test_run_lexer,
new_test_run_parser,
new_test_run_utils,
)
from .suite import QDTestSuite
class QDTestLoader(unittest.TestLoader):
"""Find and load tests."""
test_methods = {
'42sh': new_test_run_42sh,
'diff': new_test_run_diff,
'env': new_test_run_env,
'fnmatch': new_test_run_fnmatch,
'interface': new_test_run_interface,
'lexer': new_test_run_lexer,
'parser': new_test_run_parser,
'utils': new_test_run_utils,
}
def __init__(self, options, *args, **kwargs):
super().__init__(*args, **kwargs)
self.options = options
def _make_test(self, test, directory, test_filename):
test_type = test.get('type',
directory if directory in self.test_methods else '42sh')
test_func = self.test_methods[test_type]
test_class, test_method = test_func(test, self.options)
category = os.path.basename(directory)
test_class_name = "Test{}{}".format(
category.title(),
test_filename.replace(".test", "")\
.title()\
.replace("_", ""))
test_method_name = 'test_' + test_filename
if test.get('skip', False):
test_method = unittest.expectedFailure(test_method)
# We create the method here to give it the right name
test_class_methods = {test_method_name: test_method}
test_case_class = type(test_class_name,
(test_class, ),
test_class_methods,)
yield test_case_class(methodName=test_method_name,
category=category,
test_name=test_filename,
test=test)
def _load_test_case(self, directory, test_filename):
"""Return a TestCase generated from the file test_file, expected to
contain a json structure reprensenting the test."""
with open(os.path.join(directory, test_filename)) as f:
random.seed(0)
try:
tests = eval(f.read())
except:
print("Error in {}/{}".format(directory, test_filename))
raise
if isinstance(tests, dict):
tests = [tests]
for test in tests:
yield from self._make_test(test, directory, test_filename)
def _load_test_suite(self, directory, filenames):
"""Return a TestSuite for the directory ``directory``."""
test_suite = QDTestSuite(directory)
empty = True
for filename in filenames:
if filename.endswith(".test"):
test_suite.addTests(self._load_test_case(directory, filename))
empty = False
if empty:
return None
else:
return test_suite
def discover(self, start_dir, pattern, top_level_dir):
"""Add discovery of test from files ending with .test. Filter
categories if select is != None.
"""
# Discover python scripts
if not self.options.select:
test = super().discover(start_dir, pattern, top_level_dir)
else:
test = unittest.TestSuite()
for dirpath, dirnames, filenames in os.walk(start_dir):
directory = os.path.basename(dirpath)
if self.options.select and directory != self.options.select:
continue
filenames.sort()
test_suite = self._load_test_suite(directory, filenames)
if test_suite:
test.addTest(test_suite)
return test
|
py | b4074f578fabc43be8c46d5c3e3c25e9e9d613ac | """Модуль с примерами использования библиотеки"""
from problem import Problem
from src.algorithm import create_rectangles, algorithm_wl
from src.visualize import visualize
EXAMPLES = [
[(5, 5), (5, 1), (1, 6), (1, 6), (1, 7), (1, 7), (2, 8), (1, 9)],
[(6, 2), (2, 3), (1, 2), (2, 2)],
[(5, 3), (5, 3), (2, 4), (30, 8), (10, 20)],
[(20, 10), (5, 5), (5, 5), (10, 10), (10, 5)],
[(6, 4), (1, 10), (8, 4), (6, 6), (20, 14)],
[
(20, 25), (20, 15), (15, 15), (5, 15), (10, 20), (10, 15),
(5, 20), (5, 10), (5, 10), (15, 30), (5, 25), (10, 25),
]
]
def int_input(msg: str) -> int:
"""Ввод целого числа"""
while (user_input := input(msg)) and user_input != 'q':
try:
return int(user_input)
except ValueError:
print('Повторите ввод.')
return -1
def print_simple_stats(number_rectangles, placed, min_rect):
"""Вывод простой статистики"""
efficiency = sum(item.area for item in placed) / min_rect.area
print('-' * 50)
print(f'Количество прямоугольников: {number_rectangles}')
print(
f'Упаковано: {len(placed)} из {number_rectangles}'
f' ({len(placed) / number_rectangles})'
)
print(f'Размеры контейнера: {min_rect.length}х{min_rect.width}')
print(f'Эффективность: {efficiency}')
print('-' * 50)
def simple_example():
"""Простые примеры алгоритма без ограничений"""
msg = (
f'Введите номер примера от 0 до {len(EXAMPLES) - 1} или q для выхода: '
)
number = int_input(msg)
if not 0 <= number < len(EXAMPLES):
print(f'Примера с номером {number} не существует.')
return
rectangles = create_rectangles(EXAMPLES[number])
print(f'Пример №{number}, сортировка по площади')
placed, min_rect = algorithm_wl(rectangles, sorting='area')
print_simple_stats(len(rectangles), placed, min_rect)
visualize(min_rect.length, min_rect.width, placed)
def zdf_dataset():
"""Запуск примеров из датасета zdf"""
msg = 'Введите номер примера от 1 до 16 или q для выхода: '
number = int_input(msg)
if not 1 <= number <= 16:
print(f'Примера с номером {number} не существует.')
return
path = f'datasets/zdf/zdf{number}.txt'
problem = Problem.read(path)
print(f'Пример: {path}')
rectangles = create_rectangles(problem.rectangles)
placed, min_rect = algorithm_wl(rectangles, sorting='area')
print_simple_stats(len(rectangles), placed, min_rect)
visualize(min_rect.length, min_rect.width, placed)
def main():
"""Примеры работы алгоритма"""
msg = 'Использовать простые примеры [y] или zdf [n]? y/n '
positive = 'y', 'yes'
valid = (*positive, 'n', 'no')
while (user_input := input(msg).lower()) and user_input not in valid:
pass
if user_input in positive:
simple_example()
else:
main()
if __name__ == '__main__':
main()
|
py | b40750392291f271b26fef88e58844be9020d2ea | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import string
import re
from .check import check_config_params
import sys
class CharacterOps(object):
""" Convert between text-label and text-index """
def __init__(self, config):
self.character_type = config['character_type']
self.loss_type = config['loss_type']
if self.character_type == "en":
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
dict_character = list(self.character_str)
elif self.character_type == "ch":
character_dict_path = config['character_dict_path']
self.character_str = ""
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n")
self.character_str += line
dict_character = list(self.character_str)
elif self.character_type == "en_sensitive":
# same with ASTER setting (use 94 char).
self.character_str = string.printable[:-6]
dict_character = list(self.character_str)
else:
self.character_str = None
assert self.character_str is not None, \
"Nonsupport type of the character: {}".format(self.character_str)
self.beg_str = "sos"
self.end_str = "eos"
if self.loss_type == "attention":
dict_character = [self.beg_str, self.end_str] + dict_character
self.dict = {}
for i, char in enumerate(dict_character):
self.dict[char] = i
self.character = dict_character
def encode(self, text):
"""convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
output:
text: concatenated text index for CTCLoss.
[sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
length: length of each text. [batch_size]
"""
if self.character_type == "en":
text = text.lower()
text_list = []
for char in text:
if char not in self.dict:
continue
text_list.append(self.dict[char])
text = np.array(text_list)
return text
def decode(self, text_index, is_remove_duplicate=False):
""" convert text-index into text-label. """
char_list = []
char_num = self.get_char_num()
if self.loss_type == "attention":
beg_idx = self.get_beg_end_flag_idx("beg")
end_idx = self.get_beg_end_flag_idx("end")
ignored_tokens = [beg_idx, end_idx]
else:
ignored_tokens = [char_num]
for idx in range(len(text_index)):
if text_index[idx] in ignored_tokens:
continue
if is_remove_duplicate:
if idx > 0 and text_index[idx - 1] == text_index[idx]:
continue
char_list.append(self.character[text_index[idx]])
text = ''.join(char_list)
return text
def get_char_num(self):
return len(self.character)
def get_beg_end_flag_idx(self, beg_or_end):
if self.loss_type == "attention":
if beg_or_end == "beg":
idx = np.array(self.dict[self.beg_str])
elif beg_or_end == "end":
idx = np.array(self.dict[self.end_str])
else:
assert False, "Unsupport type %s in get_beg_end_flag_idx"\
% beg_or_end
return idx
else:
err = "error in get_beg_end_flag_idx when using the loss %s"\
% (self.loss_type)
assert False, err
def cal_predicts_accuracy(char_ops,
preds,
preds_lod,
labels,
labels_lod,
is_remove_duplicate=False):
acc_num = 0
img_num = 0
for ino in range(len(labels_lod) - 1):
beg_no = preds_lod[ino]
end_no = preds_lod[ino + 1]
preds_text = preds[beg_no:end_no].reshape(-1)
preds_text = char_ops.decode(preds_text, is_remove_duplicate)
beg_no = labels_lod[ino]
end_no = labels_lod[ino + 1]
labels_text = labels[beg_no:end_no].reshape(-1)
labels_text = char_ops.decode(labels_text, is_remove_duplicate)
img_num += 1
if preds_text == labels_text:
acc_num += 1
acc = acc_num * 1.0 / img_num
return acc, acc_num, img_num
def convert_rec_attention_infer_res(preds):
img_num = preds.shape[0]
target_lod = [0]
convert_ids = []
for ino in range(img_num):
end_pos = np.where(preds[ino, :] == 1)[0]
if len(end_pos) <= 1:
text_list = preds[ino, 1:]
else:
text_list = preds[ino, 1:end_pos[1]]
target_lod.append(target_lod[ino] + len(text_list))
convert_ids = convert_ids + list(text_list)
convert_ids = np.array(convert_ids)
convert_ids = convert_ids.reshape((-1, 1))
return convert_ids, target_lod
def convert_rec_label_to_lod(ori_labels):
img_num = len(ori_labels)
target_lod = [0]
convert_ids = []
for ino in range(img_num):
target_lod.append(target_lod[ino] + len(ori_labels[ino]))
convert_ids = convert_ids + list(ori_labels[ino])
convert_ids = np.array(convert_ids)
convert_ids = convert_ids.reshape((-1, 1))
return convert_ids, target_lod
|
py | b407527c53d1c8765ca130744fc4b4a1d93d504e | import alog
from bitmex_websocket._bitmex_websocket import BitMEXWebsocket
from bitmex_websocket.constants import Channels, SecureChannels, \
SecureInstrumentChannels
__all__ = ['Instrument']
class SubscribeToAtLeastOneChannelException(Exception):
pass
class SubscribeToSecureChannelException(Exception):
pass
class Instrument(BitMEXWebsocket):
def __init__(self,
symbol: str='XBTUSD',
channels: [Channels] or [str]=None,
should_auth=False):
BitMEXWebsocket.__init__(self, should_auth)
if channels is None:
raise SubscribeToAtLeastOneChannelException()
self.channels = channels
if should_auth is False and self._channels_contains_secure():
raise SubscribeToSecureChannelException()
self.symbol = symbol
self.on('action', self.on_action)
def run_forever(self, **kwargs):
self.on('open', self.subscribe_channels)
super().run_forever(**kwargs)
def subscribe_channels(self):
for channel in self.channels:
channel_key = f'{channel.name}:{self.symbol}'
self.subscribe(channel_key)
def on_action(self, message):
alog.debug(alog.pformat(message))
def _channels_contains_secure(self):
secure_channels = list(SecureChannels) + list(SecureInstrumentChannels)
return not set(secure_channels).isdisjoint(self.channels)
|
py | b4075376160aa67204d1c04e3ce2f3cf96c4e622 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chassis formatter."""
from aquilon.aqdb.model import (Chassis, MachineChassisSlot,
NetworkDeviceChassisSlot)
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.hardware_entity import HardwareEntityFormatter
from aquilon.worker.formats.machine import MachineFormatter
from aquilon.worker.formats.network_device import NetworkDeviceFormatter
class ChassisFormatter(HardwareEntityFormatter):
def add_details_for_slot(self, slot):
hw = getattr(slot, slot.slot_type)
if hw:
if hw.primary_name:
hostname = hw.primary_name
else:
hostname = "no hostname"
return "Slot #%d (type: %s): %s (%s)" % (slot.slot_number,
slot.slot_type,
hw.label, hostname)
return "Slot #%d (type: %s): Empty" % (slot.slot_number,
slot.slot_type)
def format_raw(self, chassis, indent="", embedded=True,
indirect_attrs=True):
details = [super(ChassisFormatter, self).format_raw(chassis, indent, embedded=embedded,
indirect_attrs=indirect_attrs)]
for slot in (chassis.machine_slots + chassis.network_device_slots):
details.append(indent + " " + self.add_details_for_slot(slot))
return "\n".join(details)
def fill_proto(self, chassis, skeleton, embedded=True,
indirect_attrs=True):
super(ChassisFormatter, self).fill_proto(chassis, skeleton)
skeleton.primary_name = str(chassis.primary_name)
if indirect_attrs:
# Add slots information
for slot in chassis.slots:
s_slot = skeleton.slots.add()
s_slot.number = slot.slot_number
s_slot.type = slot.slot_type
if isinstance(slot, MachineChassisSlot):
if slot.machine:
MachineFormatter().fill_proto(
slot.machine, s_slot.machine,
embedded=embedded, indirect_attrs=indirect_attrs)
elif isinstance(slot, NetworkDeviceChassisSlot):
if slot.network_device:
NetworkDeviceFormatter().fill_proto(
slot.network_device, s_slot.network_device,
embedded=embedded, indirect_attrs=indirect_attrs)
ObjectFormatter.handlers[Chassis] = ChassisFormatter()
|
py | b4075444a5a5fef9900da96568f0bcdf8661da66 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from zag import exceptions as exc
from zag.persistence import backends
from zag.persistence.backends import impl_memory
from zag import test
from zag.tests.unit.persistence import base
class MemoryPersistenceTest(test.TestCase, base.PersistenceTestMixin):
def setUp(self):
super(MemoryPersistenceTest, self).setUp()
self._backend = impl_memory.MemoryBackend({})
def _get_connection(self):
return self._backend.get_connection()
def tearDown(self):
conn = self._get_connection()
conn.clear_all()
self._backend = None
super(MemoryPersistenceTest, self).tearDown()
def test_memory_backend_entry_point(self):
conf = {'connection': 'memory:'}
with contextlib.closing(backends.fetch(conf)) as be:
self.assertIsInstance(be, impl_memory.MemoryBackend)
def test_memory_backend_fetch_by_name(self):
conf = {'connection': 'memory'} # note no colon
with contextlib.closing(backends.fetch(conf)) as be:
self.assertIsInstance(be, impl_memory.MemoryBackend)
class MemoryFilesystemTest(test.TestCase):
@staticmethod
def _get_item_path(fs, path):
# TODO(harlowja): is there a better way to do this??
return fs[path]
@staticmethod
def _del_item_path(fs, path):
# TODO(harlowja): is there a better way to do this??
del fs[path]
def test_set_get_ls(self):
fs = impl_memory.FakeFilesystem()
fs['/d'] = 'd'
fs['/c'] = 'c'
fs['/d/b'] = 'db'
self.assertEqual(2, len(fs.ls('/')))
self.assertEqual(1, len(fs.ls('/d')))
self.assertEqual('d', fs['/d'])
self.assertEqual('c', fs['/c'])
self.assertEqual('db', fs['/d/b'])
def test_ls_recursive(self):
fs = impl_memory.FakeFilesystem()
fs.ensure_path("/d")
fs.ensure_path("/c/d")
fs.ensure_path("/b/c/d")
fs.ensure_path("/a/b/c/d")
contents = fs.ls_r("/", absolute=False)
self.assertEqual([
'a',
'b',
'c',
'd',
'a/b',
'b/c',
'c/d',
'a/b/c',
'b/c/d',
'a/b/c/d',
], contents)
def test_ls_recursive_absolute(self):
fs = impl_memory.FakeFilesystem()
fs.ensure_path("/d")
fs.ensure_path("/c/d")
fs.ensure_path("/b/c/d")
fs.ensure_path("/a/b/c/d")
contents = fs.ls_r("/", absolute=True)
self.assertEqual([
'/a',
'/b',
'/c',
'/d',
'/a/b',
'/b/c',
'/c/d',
'/a/b/c',
'/b/c/d',
'/a/b/c/d',
], contents)
def test_ls_recursive_targeted(self):
fs = impl_memory.FakeFilesystem()
fs.ensure_path("/d")
fs.ensure_path("/c/d")
fs.ensure_path("/b/c/d")
fs.ensure_path("/a/b/c/d")
contents = fs.ls_r("/a/b", absolute=False)
self.assertEqual(['c', 'c/d'], contents)
def test_ls_targeted(self):
fs = impl_memory.FakeFilesystem()
fs.ensure_path("/d")
fs.ensure_path("/c/d")
fs.ensure_path("/b/c/d")
fs.ensure_path("/a/b/c/d")
contents = fs.ls("/a/b", absolute=False)
self.assertEqual(['c'], contents)
def test_ls_targeted_absolute(self):
fs = impl_memory.FakeFilesystem()
fs.ensure_path("/d")
fs.ensure_path("/c/d")
fs.ensure_path("/b/c/d")
fs.ensure_path("/a/b/c/d")
contents = fs.ls("/a/b", absolute=True)
self.assertEqual(['/a/b/c'], contents)
def test_ls_recursive_targeted_absolute(self):
fs = impl_memory.FakeFilesystem()
fs.ensure_path("/d")
fs.ensure_path("/c/d")
fs.ensure_path("/b/c/d")
fs.ensure_path("/a/b/c/d")
contents = fs.ls_r("/a/b", absolute=True)
self.assertEqual(['/a/b/c', '/a/b/c/d'], contents)
def test_ensure_path(self):
fs = impl_memory.FakeFilesystem()
pieces = ['a', 'b', 'c']
path = "/" + "/".join(pieces)
fs.ensure_path(path)
path = fs.root_path
for i, p in enumerate(pieces):
if i == 0:
path += p
else:
path += "/" + p
self.assertIsNone(fs[path])
def test_clear(self):
fs = impl_memory.FakeFilesystem()
paths = ['/b', '/c', '/a/b/c']
for p in paths:
fs.ensure_path(p)
for p in paths:
self.assertIsNone(self._get_item_path(fs, p))
fs.clear()
for p in paths:
self.assertRaises(exc.NotFound, self._get_item_path, fs, p)
def test_not_found(self):
fs = impl_memory.FakeFilesystem()
self.assertRaises(exc.NotFound, self._get_item_path, fs, '/c')
def test_bad_norms(self):
fs = impl_memory.FakeFilesystem()
self.assertRaises(ValueError, fs.normpath, '')
self.assertRaises(ValueError, fs.normpath, 'abc/c')
self.assertRaises(ValueError, fs.normpath, '../c')
def test_del_root_not_allowed(self):
fs = impl_memory.FakeFilesystem()
self.assertRaises(ValueError, fs.delete, "/", recursive=False)
def test_del_no_children_allowed(self):
fs = impl_memory.FakeFilesystem()
fs['/a'] = 'a'
self.assertEqual(1, len(fs.ls_r("/")))
fs.delete("/a")
self.assertEqual(0, len(fs.ls("/")))
def test_del_many_children_not_allowed(self):
fs = impl_memory.FakeFilesystem()
fs['/a'] = 'a'
fs['/a/b'] = 'b'
self.assertRaises(ValueError, fs.delete, "/", recursive=False)
def test_del_with_children_not_allowed(self):
fs = impl_memory.FakeFilesystem()
fs['/a'] = 'a'
fs['/a/b'] = 'b'
self.assertRaises(ValueError, fs.delete, "/a", recursive=False)
def test_del_many_children_allowed(self):
fs = impl_memory.FakeFilesystem()
fs['/a'] = 'a'
fs['/a/b'] = 'b'
self.assertEqual(2, len(fs.ls_r("/")))
fs.delete("/a", recursive=True)
self.assertEqual(0, len(fs.ls("/")))
def test_del_many_children_allowed_not_recursive(self):
fs = impl_memory.FakeFilesystem()
fs['/a'] = 'a'
fs['/a/b'] = 'b'
self.assertEqual(2, len(fs.ls_r("/")))
fs.delete("/a/b", recursive=False)
self.assertEqual(1, len(fs.ls("/")))
fs.delete("/a", recursive=False)
self.assertEqual(0, len(fs.ls("/")))
def test_link_loop_raises(self):
fs = impl_memory.FakeFilesystem()
fs['/b'] = 'c'
fs.symlink('/b', '/b')
self.assertRaises(ValueError, self._get_item_path, fs, '/b')
def test_ensure_linked_delete(self):
fs = impl_memory.FakeFilesystem()
fs['/b'] = 'd'
fs.symlink('/b', '/c')
self.assertEqual('d', fs['/b'])
self.assertEqual('d', fs['/c'])
del fs['/b']
self.assertRaises(exc.NotFound, self._get_item_path, fs, '/c')
self.assertRaises(exc.NotFound, self._get_item_path, fs, '/b')
|
py | b407548d1539781a310dd11a278698c4338d7000 |
import numpy as np
import xarray as xr
import pandas as pd
import sys
import json
import os
import datetime
from xarray.core.utils import (
decode_numpy_dict_values,
either_dict_or_kwargs,
ensure_us_time_resolution,
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
from numpy.lib import format
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, datetime.datetime):
return obj.__str__()
if isinstance(obj, np.datetime64):
return obj.__str__()
return json.JSONEncoder.default(self, obj)
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def myJsonConverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def save_npys(file, data, compress=False,min_dims_coord_npy = 2):
if isinstance(data,xr.DataArray):
_save_dataarray(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
elif isinstance(data,xr.Dataset):
_save_dataset(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
else:
raise BaseException('Unexpected type %'%str(type(data)))
class zip_file():
def __init__(self,file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
self.file_dir = file_dir
self.file_prefix = file_prefix
self.zipf = zipfile.ZipFile(file, *args, **kwargs)
def close(self):
self.zipf.close()
def open(self,x):
return self.zipf.open(x)
def read(self,x):
return self.zipf.read(x)
def namelist(self):
return self.zipf.namelist()
def add_bin_data(self,fname,data_bytes):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
fid.write(data_bytes)
else:
import tempfile
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
fid.write(data_bytes)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def add_npy(self,fname,val):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
else:
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def _save_dataarray(file, dataarray, compress=False, min_dims_coord_npy =2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
if dataarray.name is None:
data_name = 'data'
else:
data_name = dataarray.name
zipf.add_npy(data_name+'.npy',dataarray.values)
d = dataarray.variable.to_dict(data=False)
d['version'] = xr.__version__
d.update({"coords": {}, "name": dataarray.name})
for k in dataarray.coords:
assert(k!=data_name)
coord_var = dataarray.coords[k].variable
item = {"attrs": decode_numpy_dict_values(coord_var.attrs), "dtype":str(coord_var.values.dtype)}# we save the type here
if (coord_var.dims!=()) and( len(coord_var.dims)>1 or coord_var.dims[0]!=k): # we don't keep the dims if we have a dimension_coordinate or if dims is empty to keep the json more concise (see http://xarray.pydata.org/en/stable/data-structures.html#coordinates)
item['dims'] = coord_var.dims
if (coord_var.dims!=()) and len(coord_var.dims)>=min_dims_coord_npy:
zipf.add_npy(k+'.npy',coord_var.values)
else:
item["data"] = ensure_us_time_resolution(coord_var.values) # keeping coordinates data in the json
d["coords"][k] = item
json_str = json.dumps(d,cls=NumpyEncoder) + "\n" # 2. string (i.e. JSON)
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('DataArray.json',json_bytes)
zipf.close()
def _save_dataset(file, dataset, compress=False, min_dims_coord_npy = 2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
dataset_dict = dataset.to_dict(data = False)
dataset_dict['version'] = xr.__version__
for key, array in dict(dataset.data_vars).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['data_vars'][key]['data']=ensure_us_time_resolution(val)
for key, array in dict(dataset.coords).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['coords'][key]['data']=ensure_us_time_resolution(val)
json_str = json.dumps(dataset_dict,cls=NumpyEncoder) + "\n"
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('Dataset.json', json_bytes)
zipf.close()
def load_npys(file):
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
if True:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
_zip = zip_file(fid)
files = _zip.namelist()
_data_dict={}
_type = None
for x in files:
if x.endswith('.npy'):
bytes = _zip.open(x)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
assert( magic == format.MAGIC_PREFIX)
bytes = _zip.open(x)
_data_dict[x[:-4]] = format.read_array(bytes, allow_pickle=False, pickle_kwargs=None)
elif x=='Dataset.json':
assert(_type is None)
_type = xr.Dataset
header = json.loads(_zip.read(x))
elif x=='DataArray.json':
assert(_type is None)
_type = xr.DataArray
header = json.loads(_zip.read(x))
if _type is None:
raise IOError("Failed to read file")
if _type == xr.DataArray:
if 'name' in header and (header['name'] is not None):
data_name = header['name']
else:
data_name = 'data'
data = _data_dict[data_name]
assert (data.dtype==header['dtype'])
assert (data.shape==tuple(header['shape']))
coords={}
for k,coord in header['coords'].items():
if 'data' in coord:
coord_data = np.array(coord['data'],dtype=coord['dtype'])
else:
coord_data = _data_dict[k]
if 'dims' in coord:
dims=coord['dims']
elif coord_data.ndim==0:
dims=()
else:
dims= [k]
coords[k]=xr.DataArray(coord_data,dims=dims)
return xr.DataArray(data, coords = coords, dims=header['dims'],attrs=header['attrs'],name=header['name'])
else: # type is Dataset
coords={}
data_vars={}
for k,d in header['coords'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
coords[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
for k,d in header['data_vars'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
data_vars[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
return xr.Dataset(data_vars, coords=coords,attrs=header['attrs'])
else:
raise IOError(
"Failed to interpret file %s as a zip" % repr(file))
return None
def test():
from xarray.testing import assert_identical
data = np.random.rand(4, 3)
locs = ['IA', 'IL', 'IN']
times = pd.date_range('2000-01-01', periods=4)
foo = xr.DataArray(data, coords=[times, locs], dims=['time', 'space'])
v=foo.coords['time'].variable
save_npys('foo',foo)
foo_loaded = load_npys('foo.xar')
assert_identical(foo,foo_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
da = xr.DataArray(temp,name='precipitations',dims=['x','y','time'],
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('da',da)
da_loaded=load_npys('da.xar')
assert_identical(da,da_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
precip = 10 * np.random.rand(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
ds = xr.Dataset({'temperature' : (['x', 'y', 'time'], temp),
'precipitation': (['x', 'y', 'time'], precip)},
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('ds',ds,min_dims_coord_npy=1)
ds_loaded= load_npys('ds.xar')
assert_identical(ds, ds_loaded)
if __name__ == "__main__":
test()
|
py | b407550988abc330c0a69e1be49a53e4c4b80145 | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A class for sampling, encoding, and decoding from trained MusicVAE models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import tarfile
from backports import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
class NoExtractedExamplesError(Exception):
pass
class MultipleExtractedExamplesError(Exception):
pass
class TrainedModel(object):
"""An interface to a trained model for encoding, decoding, and sampling.
Attributes:
config: The Config to build the model graph with.
batch_size: The batch size to build the model graph with.
checkpoint_dir_or_path: The directory containing checkpoints for the model,
the most recent of which will be loaded, or a direct path to a specific
checkpoint.
var_name_substitutions: Optional list of string pairs containing regex
patterns and substitution values for renaming model variables to match
those in the checkpoint. Useful for backwards compatibility.
session_target: Optional execution engine to connect to. Defaults to
in-process.
sample_kwargs: Additional, non-tensor keyword arguments to pass to sample
call.
"""
def __init__(self, config, batch_size, checkpoint_dir_or_path=None,
var_name_substitutions=None, session_target='', **sample_kwargs):
if tf.gfile.IsDirectory(checkpoint_dir_or_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir_or_path)
else:
checkpoint_path = checkpoint_dir_or_path
self._config = copy.deepcopy(config)
self._config.data_converter.set_mode('infer')
self._config.hparams.batch_size = batch_size
with tf.Graph().as_default():
model = self._config.model
model.build(
self._config.hparams,
self._config.data_converter.output_depth,
is_training=False)
# Input placeholders
self._temperature = tf.placeholder(tf.float32, shape=())
if self._config.hparams.z_size:
self._z_input = tf.placeholder(
tf.float32, shape=[batch_size, self._config.hparams.z_size])
else:
self._z_input = None
if self._config.data_converter.control_depth > 0:
self._c_input = tf.placeholder(
tf.float32, shape=[None, self._config.data_converter.control_depth])
else:
self._c_input = None
self._inputs = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.input_depth])
self._controls = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.control_depth])
self._inputs_length = tf.placeholder(
tf.int32,
shape=[batch_size] + list(self._config.data_converter.length_shape))
self._max_length = tf.placeholder(tf.int32, shape=())
# Outputs
self._outputs, self._decoder_results = model.sample(
batch_size,
max_length=self._max_length,
z=self._z_input,
c_input=self._c_input,
temperature=self._temperature,
**sample_kwargs)
if self._config.hparams.z_size:
q_z = model.encode(self._inputs, self._inputs_length, self._controls)
self._mu = q_z.loc
self._sigma = q_z.scale.diag
self._z = q_z.sample()
var_map = None
if var_name_substitutions is not None:
var_map = {}
for v in tf.global_variables():
var_name = v.name[:-2] # Strip ':0' suffix.
for pattern, substitution in var_name_substitutions:
var_name = re.sub(pattern, substitution, var_name)
if var_name != v.name[:-2]:
tf.logging.info('Renaming `%s` to `%s`.', v.name[:-2], var_name)
var_map[var_name] = v
# Restore graph
self._sess = tf.Session(target=session_target)
saver = tf.train.Saver(var_map)
if (os.path.exists(checkpoint_path) and
tarfile.is_tarfile(checkpoint_path)):
tf.logging.info('Unbundling checkpoint.')
with tempfile.TemporaryDirectory() as temp_dir:
tar = tarfile.open(checkpoint_path)
tar.extractall(temp_dir)
# Assume only a single checkpoint is in the directory.
for name in tar.getnames():
if name.endswith('.index'):
checkpoint_path = os.path.join(temp_dir, name[0:-6])
break
saver.restore(self._sess, checkpoint_path)
else:
saver.restore(self._sess, checkpoint_path)
def sample(self, n=None, length=None, temperature=1.0, same_z=False,
c_input=None, force_z=None):
"""Generates random samples from the model.
Args:
n: The number of samples to return. A full batch will be returned if not
specified.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
same_z: Whether to use the same latent vector for all samples in the
batch (if applicable).
c_input: A sequence of control inputs to use for all samples (if
applicable).
Returns:
A list of samples as NoteSequence objects.
Raises:
ValueError: If `length` is not specified and an end token is not being
used.
"""
batch_size = self._config.hparams.batch_size
n = n or batch_size
z_size = self._config.hparams.z_size
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
length = length or tf.int32.max
feed_dict = {
self._temperature: temperature,
self._max_length: length
}
if force_z is not None:
z = np.reshape(force_z, (1, 512))
feed_dict[self._z_input] = z
if self._z_input is not None and same_z and not force_z is not None:
z = np.random.randn(z_size).astype(np.float32) # z.shape == (512, )
z = np.tile(z, (batch_size, 1)) # z.shape == (1, 512)
feed_dict[self._z_input] = z
if self._c_input is not None:
feed_dict[self._c_input] = c_input
outputs = []
for _ in range(int(np.ceil(n / batch_size))):
if self._z_input is not None and not same_z and not force_z is not None:
feed_dict[self._z_input] = (
np.random.randn(batch_size, z_size).astype(np.float32))
outputs.append(self._sess.run(self._outputs, feed_dict))
samples = np.vstack(outputs)[:n]
if self._c_input is not None:
return self._config.data_converter.from_tensors(
samples, np.tile(np.expand_dims(c_input, 0), [batch_size, 1, 1]))
else:
return self._config.data_converter.from_tensors(samples)
def encode(self, note_sequences, assert_same_length=False):
"""Encodes a collection of NoteSequences into latent vectors.
Args:
note_sequences: A collection of NoteSequence objects to encode.
assert_same_length: Whether to raise an AssertionError if all of the
extracted sequences are not the same length.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
NoExtractedExamplesError: If no examples were extracted.
MultipleExtractedExamplesError: If multiple examples were extracted.
AssertionError: If `assert_same_length` is True and any extracted
sequences differ in length.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
inputs = []
controls = []
lengths = []
for note_sequence in note_sequences:
extracted_tensors = self._config.data_converter.to_tensors(note_sequence)
if not extracted_tensors.inputs:
raise NoExtractedExamplesError(
'No examples extracted from NoteSequence: %s' % note_sequence)
# if len(extracted_tensors.inputs) > 1:
# raise MultipleExtractedExamplesError(
# 'Multiple (%d) examples extracted from NoteSequence: %s' %
# (len(extracted_tensors.inputs), note_sequence))
inputs.append(extracted_tensors.inputs[0])
controls.append(extracted_tensors.controls[0])
lengths.append(extracted_tensors.lengths[0])
if assert_same_length and len(inputs[0]) != len(inputs[-1]):
raise AssertionError(
'Sequences 0 and %d have different lengths: %d vs %d' %
(len(inputs) - 1, len(inputs[0]), len(inputs[-1])))
return self.encode_tensors(inputs, lengths, controls)
def encode_tensors(self, input_tensors, lengths, control_tensors=None):
"""Encodes a collection of input tensors into latent vectors.
Args:
input_tensors: Collection of input tensors to encode.
lengths: Collection of lengths of input tensors.
control_tensors: Collection of control tensors to encode.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
n = len(input_tensors)
input_depth = self._config.data_converter.input_depth
batch_size = self._config.hparams.batch_size
batch_pad_amt = -n % batch_size
if batch_pad_amt > 0:
input_tensors += [np.zeros([0, input_depth])] * batch_pad_amt
length_array = np.array(lengths, np.int32)
length_array = np.pad(
length_array,
[(0, batch_pad_amt)] + [(0, 0)] * (length_array.ndim - 1),
'constant')
max_length = max([len(t) for t in input_tensors])
inputs_array = np.zeros(
[len(input_tensors), max_length, input_depth])
for i, t in enumerate(input_tensors):
inputs_array[i, :len(t)] = t
control_depth = self._config.data_converter.control_depth
controls_array = np.zeros(
[len(input_tensors), max_length, control_depth])
if control_tensors is not None:
control_tensors += [np.zeros([0, control_depth])] * batch_pad_amt
for i, t in enumerate(control_tensors):
controls_array[i, :len(t)] = t
outputs = []
for i in range(len(inputs_array) // batch_size):
batch_begin = i * batch_size
batch_end = (i+1) * batch_size
feed_dict = {self._inputs: inputs_array[batch_begin:batch_end],
self._controls: controls_array[batch_begin:batch_end],
self._inputs_length: length_array[batch_begin:batch_end]}
outputs.append(
self._sess.run([self._z, self._mu, self._sigma], feed_dict))
assert outputs
return tuple(np.vstack(v)[:n] for v in zip(*outputs))
def decode(self, z, length=None, temperature=1.0, c_input=None):
"""Decodes a collection of latent vectors into NoteSequences.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
Returns:
A list of decodings as NoteSequence objects.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
tensors = self.decode_to_tensors(z, length, temperature, c_input)
if self._c_input is not None:
return self._config.data_converter.from_tensors(
tensors,
np.tile(
np.expand_dims(c_input, 0),
[self._config.hparams.batch_size, 1, 1]))
else:
return self._config.data_converter.from_tensors(tensors)
def decode_to_tensors(self, z, length=None, temperature=1.0, c_input=None,
return_full_results=False):
"""Decodes a collection of latent vectors into output tensors.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
return_full_results: If true will return the full decoder_results,
otherwise it will return only the samples.
Returns:
If return_full_results is True, will return the full decoder_results list,
otherwise it will return the samples from the decoder as a 2D numpy array.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot decode with a non-conditional model.')
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
batch_size = self._config.hparams.batch_size
n = len(z)
length = length or tf.int32.max
batch_pad_amt = -n % batch_size
z = np.pad(z, [(0, batch_pad_amt), (0, 0)], mode='constant')
outputs = []
for i in range(len(z) // batch_size):
feed_dict = {
self._temperature: temperature,
self._z_input: z[i*batch_size:(i+1)*batch_size],
self._max_length: length,
}
if self._c_input is not None:
feed_dict[self._c_input] = c_input
if return_full_results:
outputs.extend(self._sess.run(self._decoder_results, feed_dict))
else:
outputs.extend(self._sess.run(self._outputs, feed_dict))
return outputs[:n]
def interpolate(self, start_sequence, end_sequence, num_steps,
length=None, temperature=1.0, assert_same_length=True):
"""Interpolates between a start and an end NoteSequence.
Args:
start_sequence: The NoteSequence to interpolate from.
end_sequence: The NoteSequence to interpolate to.
num_steps: Number of NoteSequences to be generated, including the
reconstructions of the start and end sequences.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
assert_same_length: Whether to raise an AssertionError if all of the
extracted sequences are not the same length.
Returns:
A list of interpolated NoteSequences.
Raises:
AssertionError: If `assert_same_length` is True and any extracted
sequences differ in length.
"""
def _slerp(p0, p1, t):
"""Spherical linear interpolation."""
omega = np.arccos(np.dot(np.squeeze(p0/np.linalg.norm(p0)),
np.squeeze(p1/np.linalg.norm(p1))))
so = np.sin(omega)
return np.sin((1.0-t)*omega) / so * p0 + np.sin(t*omega)/so * p1
_, mu, _ = self.encode([start_sequence, end_sequence], assert_same_length)
z = np.array([_slerp(mu[0], mu[1], t)
for t in np.linspace(0, 1, num_steps)])
return self.decode(
length=length,
z=z,
temperature=temperature)
|
py | b40755d6360c4e9d5166f82f2cd339772498b3c5 | from abc import ABCMeta, abstractmethod
from typing import AnyStr, List
class Repository:
__metaclass__ = ABCMeta
@abstractmethod
def root_url(self) -> str:
"""
Returns the path or URL this repository is pointing at
"""
raise NotImplementedError()
@abstractmethod
def get(self, path: str) -> bytes:
"""
Get data at path
"""
raise NotImplementedError()
@abstractmethod
def put(self, path: str, data: AnyStr):
"""
Save data to file at path
"""
raise NotImplementedError()
def put_path(self, source_path: str, dest_path: str):
"""
Save file or directory to path on repository
"""
raise NotImplementedError()
def put_path_tar(self, local_path: str, tar_path: str, include_path: str):
"""
Save local file or directory to tar.gz file on repository.
"""
raise NotImplementedError()
@abstractmethod
def get_path_tar(self, tar_path: str, local_path: str):
"""
Extracts tarball from tar_path to local_path.
The first component of the tarball is stripped. E.g.
extracting a tarball with `abc123/weights` in it to
`/code` would create `/code/weights`.
"""
raise NotImplementedError()
@abstractmethod
def list(self, path: str) -> List[str]:
"""
List files at path
"""
raise NotImplementedError()
@abstractmethod
def delete(self, path: str):
"""
Delete single file at path
"""
raise NotImplementedError()
|
py | b407569923f5c326ecd38a9aac6f4a57c7ca2690 | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_virtual_circuit_public_prefix_facts
short_description: Fetches details about one or multiple VirtualCircuitPublicPrefix resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple VirtualCircuitPublicPrefix resources in Oracle Cloud Infrastructure
- Lists the public IP prefixes and their details for the specified
public virtual circuit.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
virtual_circuit_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the virtual circuit.
type: str
aliases: ["id"]
required: true
verification_state:
description:
- A filter to only return resources that match the given verification
state.
- The state value is case-insensitive.
type: str
choices:
- "IN_PROGRESS"
- "COMPLETED"
- "FAILED"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List virtual_circuit_public_prefixes
oci_network_virtual_circuit_public_prefix_facts:
# required
virtual_circuit_id: "ocid1.virtualcircuit.oc1..xxxxxxEXAMPLExxxxxx"
# optional
verification_state: IN_PROGRESS
"""
RETURN = """
virtual_circuit_public_prefixes:
description:
- List of VirtualCircuitPublicPrefix resources
returned: on success
type: complex
contains:
cidr_block:
description:
- Publix IP prefix (CIDR) that the customer specified.
returned: on success
type: str
sample: cidr_block_example
verification_state:
description:
- Oracle must verify that the customer owns the public IP prefix before traffic
for that prefix can flow across the virtual circuit. Verification can take a
few business days. `IN_PROGRESS` means Oracle is verifying the prefix. `COMPLETED`
means verification succeeded. `FAILED` means verification failed and traffic for
this prefix will not flow across the connection.
returned: on success
type: str
sample: IN_PROGRESS
sample: [{
"cidr_block": "cidr_block_example",
"verification_state": "IN_PROGRESS"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class VirtualCircuitPublicPrefixFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"virtual_circuit_id",
]
def list_resources(self):
optional_list_method_params = [
"verification_state",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_virtual_circuit_public_prefixes,
virtual_circuit_id=self.module.params.get("virtual_circuit_id"),
**optional_kwargs
)
VirtualCircuitPublicPrefixFactsHelperCustom = get_custom_class(
"VirtualCircuitPublicPrefixFactsHelperCustom"
)
class ResourceFactsHelper(
VirtualCircuitPublicPrefixFactsHelperCustom,
VirtualCircuitPublicPrefixFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
virtual_circuit_id=dict(aliases=["id"], type="str", required=True),
verification_state=dict(
type="str", choices=["IN_PROGRESS", "COMPLETED", "FAILED"]
),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="virtual_circuit_public_prefix",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(virtual_circuit_public_prefixes=result)
if __name__ == "__main__":
main()
|
py | b40756eda02c49080976779c4094e7bba4d77469 | import numpy as np
from distance import select_closest
def generate_network(size):
"""
Generate a neuron network of a given size.
Return a vector of two dimensional points in the interval [0,1].
"""
return np.random.rand(size, 2)
def get_neighborhood(center, radix, domain):
"""Get the range gaussian of given radix around a center index."""
# Impose an upper bound on the radix to prevent NaN and blocks
if radix < 1:
radix = 1
# Compute the circular network distance to the center
deltas = np.absolute(center - np.arange(domain))
distances = np.minimum(deltas, domain - deltas)
# Compute Gaussian distribution around the given center
return np.exp(-(distances*distances) / (2*(radix*radix)))
def get_route(cities, network):
"""Return the route computed by a network."""
cities['winner'] = cities[['x', 'y']].apply(
lambda c: select_closest(network, c),
axis=1, raw=True)
return cities.sort_values('winner').index
|
py | b4075723a932046f03dc649a8f1d579117113d4e | import torch.nn as nn
class BaseRNN(nn.Module):
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, output_dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.input_dropout = nn.Dropout(p=input_dropout_p)
self.output_dropout = nn.Dropout(p=output_dropout_p)
if rnn_cell.lower() == 'lstm':
self.rnn_cell = nn.LSTM
elif rnn_cell.lower() == 'gru':
self.rnn_cell = nn.GRU
else:
raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
self.dropout_p = dropout_p
def forward(self, *args, **kwargs):
raise NotImplementedError()
|
py | b40757cfaccb74fdf3e77a2cb1cb4fbe2a2d0e2b | # -*- coding: utf-8 -*-
"""This file contains a parser for the Google Drive snapshots.
The Google Drive snapshots are stored in SQLite database files named
snapshot.db.
"""
from __future__ import unicode_literals
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class GoogleDriveSnapshotCloudEntryEventData(events.EventData):
"""Google Drive snapshot cloud entry event data.
Attributes:
doc_type (int): document type.
path (str): path of the file.
shared (bool): True if the file is shared, False if the file is private.
size (int): size of the file.
url (str): URL of the file.
"""
DATA_TYPE = 'gdrive:snapshot:cloud_entry'
def __init__(self):
"""Initializes event data."""
super(GoogleDriveSnapshotCloudEntryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.document_type = None
self.path = None
self.shared = None
self.size = None
self.url = None
class GoogleDriveSnapshotLocalEntryEventData(events.EventData):
"""Google Drive snapshot local entry event data.
Attributes:
path (str): path of the file.
size (int): size of the file.
"""
DATA_TYPE = 'gdrive:snapshot:local_entry'
def __init__(self):
"""Initializes event data."""
super(GoogleDriveSnapshotLocalEntryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.path = None
self.size = None
class GoogleDrivePlugin(interface.SQLitePlugin):
"""SQLite plugin for Google Drive snapshot.db files."""
NAME = 'google_drive'
DESCRIPTION = 'Parser for Google Drive SQLite database files.'
# Define the needed queries.
QUERIES = [
(('SELECT cloud_entry.resource_id, cloud_entry.filename, '
'cloud_entry.modified, cloud_entry.created, cloud_entry.size, '
'cloud_entry.doc_type, cloud_entry.shared, cloud_entry.checksum, '
'cloud_entry.url, cloud_relations.parent_resource_id '
'FROM cloud_entry, cloud_relations '
'WHERE cloud_relations.child_resource_id = cloud_entry.resource_id '
'AND cloud_entry.modified IS NOT NULL;'),
'ParseCloudEntryRow'),
(('SELECT inode_number, filename, modified, checksum, size '
'FROM local_entry WHERE modified IS NOT NULL;'),
'ParseLocalEntryRow')]
# The required tables.
REQUIRED_TABLES = frozenset([
'cloud_entry', 'cloud_relations', 'local_entry', 'local_relations',
'mapping', 'overlay_status'])
SCHEMAS = [{
'cloud_entry': (
'CREATE TABLE cloud_entry (resource_id TEXT, filename TEXT, '
'modified INTEGER, created INTEGER, acl_role INTEGER, doc_type '
'INTEGER, removed INTEGER, url TEXT, size INTEGER, checksum TEXT, '
'shared INTEGER, PRIMARY KEY (resource_id))'),
'cloud_relations': (
'CREATE TABLE cloud_relations (child_resource_id TEXT, '
'parent_resource_id TEXT, UNIQUE (child_resource_id, '
'parent_resource_id), FOREIGN KEY (child_resource_id) REFERENCES '
'cloud_entry(resource_id), FOREIGN KEY (parent_resource_id) '
'REFERENCES cloud_entry(resource_id))'),
'local_entry': (
'CREATE TABLE local_entry (inode_number INTEGER, filename TEXT, '
'modified INTEGER, checksum TEXT, size INTEGER, PRIMARY KEY '
'(inode_number))'),
'local_relations': (
'CREATE TABLE local_relations (child_inode_number INTEGER, '
'parent_inode_number INTEGER, UNIQUE (child_inode_number), FOREIGN '
'KEY (parent_inode_number) REFERENCES local_entry(inode_number), '
'FOREIGN KEY (child_inode_number) REFERENCES '
'local_entry(inode_number))'),
'mapping': (
'CREATE TABLE mapping (inode_number INTEGER, resource_id TEXT, '
'UNIQUE (inode_number), FOREIGN KEY (inode_number) REFERENCES '
'local_entry(inode_number), FOREIGN KEY (resource_id) REFERENCES '
'cloud_entry(resource_id))'),
'overlay_status': (
'CREATE TABLE overlay_status (path TEXT, overlay_status INTEGER, '
'PRIMARY KEY (path))')}]
# Queries used to build cache.
LOCAL_PATH_CACHE_QUERY = (
'SELECT local_relations.child_inode_number, '
'local_relations.parent_inode_number, local_entry.filename '
'FROM local_relations, local_entry '
'WHERE local_relations.child_inode_number = local_entry.inode_number')
CLOUD_PATH_CACHE_QUERY = (
'SELECT cloud_entry.filename, cloud_entry.resource_id, '
'cloud_relations.parent_resource_id AS parent '
'FROM cloud_entry, cloud_relations '
'WHERE cloud_entry.doc_type = 0 '
'AND cloud_entry.resource_id = cloud_relations.child_resource_id')
def GetLocalPath(self, inode, cache, database):
"""Return local path for a given inode.
Args:
inode (int): inode number for the file.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: full path, including the filename of the given inode value.
"""
local_path = cache.GetResults('local_path')
if not local_path:
results = database.Query(self.LOCAL_PATH_CACHE_QUERY)
cache.CacheQueryResults(
results, 'local_path', 'child_inode_number',
('parent_inode_number', 'filename'))
local_path = cache.GetResults('local_path')
parent, path = local_path.get(inode, [None, None])
# TODO: Read the local_sync_root from the sync_config.db and use that
# for a root value.
root_value = '%local_sync_root%/'
if not path:
return root_value
paths = []
while path:
paths.append(path)
parent, path = local_path.get(parent, [None, None])
if not paths:
return root_value
# Paths are built top level to root so we need to reverse the list to
# represent them in the traditional order.
paths.reverse()
return root_value + '/'.join(paths)
def GetCloudPath(self, resource_id, cache, database):
"""Return cloud path given a resource id.
Args:
resource_id (str): resource identifier for the file.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: full path to the resource value.
"""
cloud_path = cache.GetResults('cloud_path')
if not cloud_path:
results = database.Query(self.CLOUD_PATH_CACHE_QUERY)
cache.CacheQueryResults(
results, 'cloud_path', 'resource_id', ('filename', 'parent'))
cloud_path = cache.GetResults('cloud_path')
if resource_id == 'folder:root':
return '/'
paths = []
parent_path, parent_id = cloud_path.get(resource_id, ['', ''])
while parent_path:
if parent_path == 'folder:root':
break
paths.append(parent_path)
parent_path, parent_id = cloud_path.get(parent_id, ['', ''])
if not paths:
return '/'
# Paths are built top level to root so we need to reverse the list to
# represent them in the traditional order.
paths.reverse()
return '/{0:s}/'.format('/'.join(paths))
# pylint 1.9.3 wants a docstring for kwargs, but this is not useful to add.
# pylint: disable=missing-param-doc
def ParseCloudEntryRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a cloud entry row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
"""
query_hash = hash(query)
parent_resource_id = self._GetRowValue(
query_hash, row, 'parent_resource_id')
filename = self._GetRowValue(query_hash, row, 'filename')
cloud_path = self.GetCloudPath(parent_resource_id, cache, database)
cloud_filename = '{0:s}{1:s}'.format(cloud_path, filename)
event_data = GoogleDriveSnapshotCloudEntryEventData()
event_data.document_type = self._GetRowValue(query_hash, row, 'doc_type')
event_data.path = cloud_filename
event_data.query = query
event_data.shared = bool(self._GetRowValue(query_hash, row, 'shared'))
event_data.size = self._GetRowValue(query_hash, row, 'size')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'modified')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'created')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
# pylint 1.9.3 wants a docstring for kwargs, but this is not useful to add.
# pylint: disable=missing-param-doc
def ParseLocalEntryRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a local entry row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
inode_number = self._GetRowValue(query_hash, row, 'inode_number')
local_path = self.GetLocalPath(inode_number, cache, database)
event_data = GoogleDriveSnapshotLocalEntryEventData()
event_data.path = local_path
event_data.query = query
event_data.size = self._GetRowValue(query_hash, row, 'size')
timestamp = self._GetRowValue(query_hash, row, 'modified')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(GoogleDrivePlugin)
|
py | b407582cd750dce744e9912fc5ae56f004821041 | from .backbone_nas import *
from .adelaide_ea import *
from .sr_ea import *
from .esr_ea import *
from .jdd_ea import *
from .darts_cnn import *
from .cars import *
from .fis import *
from .auto_lane import *
# from .mfkd import *
|
py | b40758b87c77d0aedb16f51b8142ed59432e30fa | # 变量
# 变量的概念基本上和初中代数的方程变量是一致的,只是在计算机程序中,变量不仅可以是数字,还可以是任意数据类型。
# 变量在程序中就是用一个变量名表示了,变量名必须是大小写英文、数字和_的组合,且不能用数字开头
a = 1
print(a)
a = 'zhangsan'
print(a)
|
py | b40758f8fef13bad939b3fc2f296e848ade46f63 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
def _sigmoid(logits):
return 1 / (1 + np.exp(-logits))
class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_n_classes_is_none(self):
with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=None)
def test_n_classes_is_2(self):
with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=2)
def test_invalid_loss_reduction(self):
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction='invalid_loss_reduction')
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: none'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.NONE)
def test_loss_fn_arg_labels_missing(self):
def _loss_fn(logits):
del logits # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: labels\. '
r'Given arguments: \(\'logits\',\)'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_logits_missing(self):
def _loss_fn(labels):
del labels # unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: logits\. '
r'Given arguments: \(\'labels\',\)'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_features_ok(self):
def _loss_fn(labels, logits, features):
del labels, logits, features # Unused
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_invalid(self):
def _loss_fn(labels, logits, name=None):
del labels, logits, name # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn has unexpected args: \[\'name\'\]'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_fn=_loss_fn)
def test_invalid_logits_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
logits_2x2 = np.array(((45., 44.), (41., 42.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_2x2)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
})
def test_invalid_labels_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
labels_2x2 = np.array(((45, 44), (41, 42),), dtype=np.int)
logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
features = {'x': np.array(((42.,),))}
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x2)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'):
training_loss.eval({
logits_placeholder: logits_2x3,
labels_placeholder: labels_2x2
})
def test_invalid_labels_type(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
labels_2x1 = np.array(((1.,), (1.,),))
logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
features = {'x': np.array(((42.,),))}
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Labels dtype'):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'Labels dtype'):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
def test_invalid_labels_values(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
labels_2x1_with_large_id = np.array(((45,), (1,),), dtype=np.int)
labels_2x1_with_negative_id = np.array(((-5,), (1,),), dtype=np.int)
logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
training_loss = head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesOpError('Label IDs must < n_classes'):
training_loss.eval({
labels_placeholder: labels_2x1_with_large_id,
logits_placeholder: logits_2x3
})
with self.test_session():
with self.assertRaisesOpError('Label IDs must >= 0'):
training_loss.eval({
labels_placeholder: labels_2x1_with_negative_id,
logits_placeholder: logits_2x3
})
def test_invalid_labels_sparse_tensor(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
labels_2x1 = sparse_tensor.SparseTensor(
values=['english', 'italian'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))
with self.assertRaisesRegexp(
ValueError, 'SparseTensor labels are not supported.'):
head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x1)
def test_incompatible_labels_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
# Here batch sizes are different.
values_3x1 = np.array(((1,), (1,), (1,),))
values_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
features = {'x': values_2x3}
# Static shape.
with self.assertRaisesRegexp(
ValueError,
r'Shape mismatch: The shape of labels \(received \(3,\)\) should equal '
r'the shape of logits except for the last dimension '
r'\(received \(2, 3\)\)\.'
):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=values_2x3,
labels=values_3x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'):
training_loss.eval({
labels_placeholder: values_3x1,
logits_placeholder: values_2x3
})
def test_name(self):
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, name='foo')
self.assertEqual('foo', head.name)
def test_predict(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_probabilities = [[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]]
expected_class_ids = [[0], [2]]
expected_classes = [[b'0'], [b'2']]
expected_export_classes = [[b'0', b'1', b'2']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'predict', 'classification'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(expected_class_ids,
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual(expected_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
def test_predict_with_vocabulary_list(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_classes = [[b'aang'], [b'zuko']]
expected_export_classes = [[b'aang', b'iroh', b'zuko']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
def test_weight_should_not_impact_prediction(self):
n_classes = 3
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_probabilities = [[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]]
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
weights_2x1 = [[1.], [2.]]
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.int32),
'label_weights': weights_2x1,
},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
def test_eval_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [10, 0].
expected_training_loss = 10.
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval_create_loss_loss_fn(self):
"""Tests head.create_loss for eval mode and custom loss_fn."""
loss = np.array([[1.], [2.]], dtype=np.float32)
logits_input = np.array([[-10., 10., 0.], [-15., 10., 0]], dtype=np.float32)
labels_input = np.array([[1], [2]], dtype=np.int64)
def _loss_fn(labels, logits):
check_labels = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(labels, labels_input)),
data=[labels])
check_logits = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(logits, logits_input)),
data=[logits])
with ops.control_dependencies([check_labels, check_logits]):
return constant_op.constant(loss)
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_fn=_loss_fn)
actual_training_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
def test_eval_create_loss_loss_fn_wrong_shape(self):
"""Tests custom loss_fn that returns Tensor of unexpected shape."""
loss = np.array([1., 2.], dtype=np.float32)
def _loss_fn(labels, logits):
del labels, logits # Unused
return constant_op.constant(loss)
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_fn=_loss_fn)
logits = np.array([[-10., 10., 0.], [-15., 10., 0.]], dtype=np.float32)
labels = np.array([[1], [2]], dtype=np.int64)
actual_training_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[loss_fn must return Tensor of shape \[D0, D1, ... DN, 1\]\. \] '
r'\[logits_shape: \] \[2 3\] \[loss_shape: \] \[2\]'):
actual_training_loss.eval()
def test_eval_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3)
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32),
labels=None)
def test_eval(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / 2,
keys.ACCURACY: 0.5, # 1 of 2 labels is correct.
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval()
for k in value_ops},
rtol=tol,
atol=tol)
def test_eval_metric_ops_with_head_name(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, name='some_multiclass_head')
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
expected_metric_keys = [
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS_MEAN),
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.ACCURACY)
]
self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())
def test_eval_with_regularization_losses(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size
# = sum(10, 0) / 2 = 5.
expected_unregularized_loss = 5.
expected_regularized_loss = (
expected_unregularized_loss + expected_regularization_loss)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels,
regularization_losses=regularization_losses)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
keys.ACCURACY: 0.5, # 1 of 2 labels is correct.
}
# Assert predictions, loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_regularized_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval()
for k in value_ops},
rtol=tol,
atol=tol)
def test_eval_with_label_vocabulary_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [10, 0].
expected_training_loss = 10.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval_with_label_vocabulary(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / 2,
keys.ACCURACY: 0.5, # 1 of 2 labels is correct.
}
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
def test_weighted_multi_example_eval(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
# Create estimator spec.
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels = np.array(((1,), (2,), (2,)), dtype=np.int64)
weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.int32),
'label_weights': weights_3x1,
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / np.sum(weights_3x1),
# Weighted accuracy is 1 * 3.0 / sum weights = 0.5
keys.ACCURACY: 0.5,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
def test_train_create_loss(self):
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# unreduced_loss = cross_entropy(labels, logits) = [10, 0].
expected_unreduced_loss = [[10.], [0.]]
# Weights default to 1.
expected_weights = 1.
# training_loss = 1 * 10 + 1 * 0
expected_training_loss = 10.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-2
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(expected_weights, actual_weights)
def test_train_create_loss_loss_reduction(self):
"""Tests create_loss with loss_reduction."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# unreduced_loss = cross_entropy(labels, logits) = [10, 0].
expected_unreduced_loss = [[10.], [0.]]
# Weights default to 1.
expected_weights = 1.
# training_loss = 1 * 10 + 1 * 0 / num_nonzero_weights
expected_training_loss = 10. / 2.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-2
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(expected_weights, actual_weights)
def test_train_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3)
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32),
labels=None,
train_op_fn=_no_op_train_fn)
def test_train(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
}, summary_str, tol)
def test_train_summaries_with_head_name(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, name='some_multiclass_head')
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
features = {'x': np.array(((42,),), dtype=np.int32)}
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
_assert_simple_summaries(self, {
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS):
expected_loss,
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS_MEAN):
expected_loss / 2,
}, summary_str, tol)
def test_train_with_regularization_losses(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size
# = sum(10, 0) / 2 = 5.
# loss = unregularized_loss + regularization_loss = 7.
expected_loss = 7.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_REGULARIZATION: (
expected_regularization_loss),
}, summary_str, tol)
def test_train_one_dim_create_loss(self):
"""Tests create_loss with 1D labels and weights (shape [batch_size])."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, weight_column='label_weights')
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels_rank_1 = np.array((1, 2, 2,), dtype=np.int64)
weights_rank_1 = np.array((1., 2., 3.,), dtype=np.float64)
features = {
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_rank_1
}
# unreduced_loss = cross_entropy(labels, logits) = [10, 10, 0].
expected_unreduced_loss = [[10.], [10.], [0.]]
# weights are reshaped to [3, 1] to match logits.
expected_weights = [[1.], [2.], [3.]]
# training_loss = 1 * 10 + 2 * 10 + 3 * 0 = 30.
expected_training_loss = 30.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
tol = 1e-2
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(expected_weights, actual_weights.eval())
def test_train_one_dim(self):
"""Tests train with 1D labels and weights (shape [batch_size])."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, weight_column='label_weights')
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels_rank_1 = np.array((1, 2, 2,), dtype=np.int64)
weights_rank_1 = np.array((1., 2., 3.,), dtype=np.float64)
self.assertEqual((3,), labels_rank_1.shape)
self.assertEqual((3,), weights_rank_1.shape)
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
features = {
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_rank_1
}
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: (
expected_loss / np.sum(weights_rank_1)),
}, summary_str, tol)
def test_train_with_vocabulary_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [10, 0].
expected_training_loss = 10.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
def test_train_with_vocabulary(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
def test_weighted_multi_example_train(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
# Create estimator spec.
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels = np.array(((1,), (2,), (2,)), dtype=np.int64)
weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)
expected_train_result = 'my_train_op'
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_3x1,
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss mean = sum(cross_entropy(labels, logits) * [1,2,3]) / (1+2+3)
# = sum([10, 10, 0] * [1, 2, 3]) / 6 = 30 / 6
metric_keys.MetricKeys.LOSS_MEAN:
expected_loss / np.sum(weights_3x1),
}, summary_str, tol)
def test_multi_dim_weighted_train_create_loss(self):
"""Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2]."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, weight_column='weights')
logits = np.array([[[10, 0, 0], [12, 0, 0]],
[[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# unreduced_loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].
expected_unreduced_loss = [[[0.], [12.]], [[0.], [15.]]]
# weights are reshaped to [2, 2, 1] to match logits.
expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]
# training_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5
expected_training_loss = 55.5
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-2
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)
self.assertAllClose(expected_weights, actual_weights.eval())
def test_multi_dim_weighted_train(self):
"""Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2]."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, weight_column='weights')
logits = np.array([[[10, 0, 0], [12, 0, 0]],
[[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
# loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].
# weighted_sum_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5
expected_loss = 55.5
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
def test_multi_dim_train_weights_wrong_inner_dim(self):
"""Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 1]."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, weight_column='weights')
logits = np.array([[[10, 0, 0], [12, 0, 0]],
[[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 1\]'):
spec.loss.eval()
def test_multi_dim_train_weights_wrong_outer_dim(self):
"""Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2, 3]."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, weight_column='weights')
logits = np.array([[[10, 0, 0], [12, 0, 0]],
[[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
weights = np.array([[[1., 1.1, 1.2], [1.5, 1.6, 1.7]],
[[2., 2.1, 2.2], [2.5, 2.6, 2.7]]])
weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'weights': weights_placeholder},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \]\s\[2 2 3\]\s\[weights_shape: \]\s\[2 2 3\]'):
spec.loss.eval({weights_placeholder: weights})
def test_multi_dim_weighted_eval(self):
"""Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2]."""
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, weight_column='weights')
logits = np.array([[[10, 0, 0], [12, 0, 0]],
[[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].
# weighted_sum_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5
expected_loss = 55.5
# Create estimator spec.
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / np.sum(weights),
keys.ACCURACY: (1.*1. + 1.5*0. + 2.*1. + 2.5*0.) / np.sum(weights),
}
# Assert predictions, loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_threshold_too_small(self):
with self.assertRaisesRegexp(ValueError, r'thresholds not in \(0, 1\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=(0., 0.5))
def test_threshold_too_large(self):
with self.assertRaisesRegexp(ValueError, r'thresholds not in \(0, 1\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=(0.5, 1.))
def test_invalid_loss_reduction(self):
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction='invalid_loss_reduction')
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: none'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.NONE)
def test_loss_fn_arg_labels_missing(self):
def _loss_fn(logits):
del logits # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: labels\. '
r'Given arguments: \(\'logits\',\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_fn=_loss_fn)
def test_loss_fn_arg_logits_missing(self):
def _loss_fn(labels):
del labels # unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: logits\. '
r'Given arguments: \(\'labels\',\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_fn=_loss_fn)
def test_loss_fn_arg_features_ok(self):
def _loss_fn(labels, logits, features):
del labels, logits, features # Unused
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_fn=_loss_fn)
def test_loss_fn_arg_invalid(self):
def _loss_fn(labels, logits, name=None):
del labels, logits, name # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn has unexpected args: \[\'name\'\]'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_fn=_loss_fn)
def test_invalid_logits_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Logits should be shape (batch_size, 1).
logits_2x2 = np.array(((45., 44.), (41., 42.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_2x2)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
})
def test_invalid_labels_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Labels and logits should be shape (batch_size, 1).
labels_2x2 = np.array(((45., 44.), (41., 42.),))
logits_2x1 = np.array(((45.,), (41.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):
head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x1,
labels=labels_2x2)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
training_loss = head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'):
training_loss.eval({
logits_placeholder: logits_2x1,
labels_placeholder: labels_2x2
})
def test_incompatible_labels_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Both logits and labels should be shape (batch_size, 1).
values_2x1 = np.array(((0.,), (1.,),))
values_3x1 = np.array(((0.,), (1.,), (0.,),))
# Static shape.
with self.assertRaisesRegexp(
ValueError, 'logits and labels must have the same shape'):
head.create_loss(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=values_2x1,
labels=values_3x1)
with self.assertRaisesRegexp(
ValueError, 'logits and labels must have the same shape'):
head.create_loss(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=values_3x1,
labels=values_2x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
training_loss = head.create_loss(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[3 1\] \[labels_shape: \] \[2 1\]'):
training_loss.eval({
labels_placeholder: values_2x1,
logits_placeholder: values_3x1
})
with self.test_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'):
training_loss.eval({
labels_placeholder: values_3x1,
logits_placeholder: values_2x1
})
def test_name(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
name='foo')
self.assertEqual('foo', head.name)
def test_predict(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = [[0.3], [-0.4]]
expected_logistics = [[0.574443], [0.401312]]
expected_probabilities = [[0.425557, 0.574443], [0.598688, 0.401312]]
expected_class_ids = [[1], [0]]
expected_classes = [[b'1'], [b'0']]
expected_export_classes = [[b'0', b'1']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
self.assertIsNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNone(spec.train_op)
self.assertItemsEqual(('classification', 'regression', 'predict',
_DEFAULT_SERVING_KEY), spec.export_outputs.keys())
_assert_no_hooks(self, spec)
# Assert predictions.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(expected_logistics,
predictions[prediction_keys.PredictionKeys.LOGISTIC])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(expected_class_ids,
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual(expected_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
self.assertAllClose(expected_logistics,
sess.run(spec.export_outputs['regression'].value))
def test_predict_with_vocabulary_list(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
logits = [[1.], [0.]]
expected_classes = [[b'iroh'], [b'aang']]
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))
def test_eval_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [0, 41].
expected_training_loss = 41.
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=np.array(((45,), (-41,),), dtype=np.float32),
labels=None)
def test_eval(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
# loss_mean = loss/2 = 41./2 = 20.5
keys.LOSS_MEAN: 20.5,
keys.ACCURACY: 1./2,
keys.PREDICTION_MEAN: 1./2,
keys.LABEL_MEAN: 2./2,
keys.ACCURACY_BASELINE: 2./2,
keys.AUC: 0.,
keys.AUC_PR: 1.,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(41., loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_eval_metric_ops_with_head_name(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
name='some_binary_head')
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
expected_metric_keys = [
'{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS_MEAN),
'{}/some_binary_head'.format(metric_keys.MetricKeys.ACCURACY),
'{}/some_binary_head'.format(metric_keys.MetricKeys.PREDICTION_MEAN),
'{}/some_binary_head'.format(metric_keys.MetricKeys.LABEL_MEAN),
'{}/some_binary_head'.format(metric_keys.MetricKeys.ACCURACY_BASELINE),
'{}/some_binary_head'.format(metric_keys.MetricKeys.AUC),
'{}/some_binary_head'.format(metric_keys.MetricKeys.AUC_PR)
]
self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())
def test_eval_with_regularization_losses(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size
# = sum(0, 41) / 2 = 20.5
expected_unregularized_loss = 20.5
expected_regularized_loss = (
expected_unregularized_loss + expected_regularization_loss)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels,
regularization_losses=regularization_losses)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
keys.ACCURACY: 1./2,
keys.PREDICTION_MEAN: 1./2,
keys.LABEL_MEAN: 2./2,
keys.ACCURACY_BASELINE: 2./2,
keys.AUC: 0.,
keys.AUC_PR: 1.,
}
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_regularized_loss, loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_eval_with_vocabulary_list_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(41., training_loss.eval())
def test_eval_with_vocabulary_list(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
sess.run(update_ops)
self.assertAllClose(1. / 2,
value_ops[metric_keys.MetricKeys.ACCURACY].eval())
def test_eval_with_thresholds_create_loss(self):
thresholds = [0.25, 0.5, 0.75]
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=thresholds)
logits = np.array(((-1,), (1,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# probabilities[i] = 1/(1 + exp(-logits[i])) =>
# probabilities = [1/(1 + exp(1)), 1/(1 + exp(-1))] = [0.269, 0.731]
# loss = -ln(probabilities[label[i]])) = [-ln(0.269), -ln(0.731)]
# = [1.31304389, 0.31334182]
# weighted sum loss = 1.62638571
expected_training_loss = 1.62638571
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval_with_thresholds(self):
thresholds = [0.25, 0.5, 0.75]
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=thresholds)
logits = np.array(((-1,), (1,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# probabilities[i] = 1/(1 + exp(-logits[i])) =>
# probabilities = [1/(1 + exp(1)), 1/(1 + exp(-1))] = [0.269, 0.731]
# loss = -sum(ln(probabilities[label[i]])) = -ln(0.269) -ln(0.731)
# = 1.62652338
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: 1.62652338 / 2.,
keys.ACCURACY: 1./2,
keys.PREDICTION_MEAN: 1./2,
keys.LABEL_MEAN: 2./2,
keys.ACCURACY_BASELINE: 2./2,
keys.AUC: 0.,
keys.AUC_PR: 1.,
keys.ACCURACY_AT_THRESHOLD % thresholds[0]: 1.,
keys.PRECISION_AT_THRESHOLD % thresholds[0]: 1.,
keys.RECALL_AT_THRESHOLD % thresholds[0]: 1.,
keys.ACCURACY_AT_THRESHOLD % thresholds[1]: .5,
keys.PRECISION_AT_THRESHOLD % thresholds[1]: 1.,
keys.RECALL_AT_THRESHOLD % thresholds[1]: .5,
keys.ACCURACY_AT_THRESHOLD % thresholds[2]: 0.,
keys.PRECISION_AT_THRESHOLD % thresholds[2]: 0.,
keys.RECALL_AT_THRESHOLD % thresholds[2]: 0.,
}
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(1.62652338, loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval()
for k in value_ops},
atol=tol,
rtol=tol)
def test_train_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
features = {'x': np.array(((42,),), dtype=np.float32)}
# unreduced_loss = cross_entropy(labels, logits) = [0, 41]
expected_unreduced_loss = [[0.], [41.]]
# weights default to 1.
expected_weights = 1.
# training loss = 1 * 0 + 1 * 41
expected_training_loss = 41.
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
self.assertAllClose(expected_weights, actual_weights)
def test_train_create_loss_loss_reduction(self):
"""Tests create_loss with loss_reduction."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
features = {'x': np.array(((42,),), dtype=np.float32)}
# unreduced_loss = cross_entropy(labels, logits) = [0, 41]
expected_unreduced_loss = [[0.], [41.]]
# weights default to 1.
expected_weights = 1.
# training loss = (1 * 0 + 1 * 41) / num_nonzero_weights
expected_training_loss = 41. / 2.
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
self.assertAllClose(expected_weights, actual_weights)
def test_eval_create_loss_loss_fn(self):
"""Tests head.create_loss for eval mode and custom loss_fn."""
loss = np.array([[1.], [2.]], dtype=np.float32)
logits_input = np.array([[-10.], [10.]], dtype=np.float32)
labels_input = np.array([[1], [0]], dtype=np.int64)
def _loss_fn(labels, logits):
check_labels = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(labels, labels_input)),
data=[labels])
check_logits = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(logits, logits_input)),
data=[logits])
with ops.control_dependencies([check_labels, check_logits]):
return constant_op.constant(loss)
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_fn=_loss_fn)
actual_training_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
def test_eval_create_loss_loss_fn_wrong_shape(self):
"""Tests custom loss_fn that returns Tensor of unexpected shape."""
loss = np.array([1., 2.], dtype=np.float32)
def _loss_fn(labels, logits):
del labels, logits # Unused
return constant_op.constant(loss)
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_fn=_loss_fn)
logits = np.array([[-10.], [10.]], dtype=np.float32)
labels = np.array([[1], [0]], dtype=np.int64)
actual_training_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[loss_fn must return Tensor of shape \[D0, D1, ... DN, 1\]\. \] '
r'\[logits_shape: \] \[2 1\] \[loss_shape: \] \[2\]'):
actual_training_loss.eval()
def test_train_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=np.array(((45,), (-41,),), dtype=np.float32),
labels=None,
train_op_fn=_no_op_train_fn)
def test_train(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
expected_train_result = b'my_train_op'
features = {'x': np.array(((42,),), dtype=np.float32)}
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
expected_loss = 41.
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/2 = 41/2 = 20.5
metric_keys.MetricKeys.LOSS_MEAN: 20.5,
}, summary_str)
def test_train_summaries_with_head_name(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
name='some_binary_head')
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
features = {'x': np.array(((42,),), dtype=np.float32)}
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
expected_loss = 41.
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
_assert_simple_summaries(
self,
{
'{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS):
expected_loss,
# loss_mean = loss/2 = 41/2 = 20.5
'{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS_MEAN):
20.5,
},
summary_str)
def test_train_with_regularization_losses(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
expected_train_result = b'my_train_op'
features = {'x': np.array(((42,),), dtype=np.float32)}
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size
# = sum(0, 41) / 2 = 20.5
# loss = unregularized_loss + regularization_loss = 7.
expected_loss = 22.5
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_REGULARIZATION: (
expected_regularization_loss),
}, summary_str)
def test_float_labels_train_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
features = {'x': np.array([[42]], dtype=np.float32)}
# loss = cross_entropy(labels, logits)
# = -label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i])
# = [-0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5)),
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))]
# = [0.57407698418, 0.67435524446]
# weighted sum loss = 0.57407698418 + 0.67435524446
expected_training_loss = 1.24843222864
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
def test_float_labels_train(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
expected_train_result = b'my_train_op'
features = {'x': np.array([[42]], dtype=np.float32)}
# loss = sum(cross_entropy(labels, logits))
# = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))
# = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))
# = 1.2484322
expected_loss = 1.2484322
def _train_op_fn(loss):
with ops.control_dependencies((dnn_testing_utils.assert_close(
math_ops.to_float(expected_loss), math_ops.to_float(loss)),)):
return constant_op.constant(expected_train_result)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
self.assertEqual(expected_train_result, train_result)
def test_float_labels_eval_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
features = {'x': np.array([[42]], dtype=np.float32)}
# loss = cross_entropy(labels, logits)
# = -label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i])
# = [-0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5)),
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))]
# = [0.57407698418, 0.67435524446]
# weighted sum loss = 0.57407698418 + 0.67435524446
expected_training_loss = 1.24843222864
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
def test_float_labels_eval(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
features = {'x': np.array([[42]], dtype=np.float32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# loss = sum(cross_entropy(labels, logits))
# = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))
# = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))
# = 1.2484322
expected_loss = 1.2484322
# Assert loss.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
self.assertAlmostEqual(
expected_loss / 2., metrics[metric_keys.MetricKeys.LOSS_MEAN])
def test_weighted_multi_example_predict(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(
logits.astype(np.float32),
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
_sigmoid(logits).astype(np.float32),
predictions[prediction_keys.PredictionKeys.LOGISTIC])
self.assertAllClose(
[[0., 1.], [1., 0.],
[0., 1.]], predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose([[1], [0], [1]],
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual([[b'1'], [b'0'], [b'1']],
predictions[prediction_keys.PredictionKeys.CLASSES])
def test_weighted_multi_example_eval(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((1,), (1,), (0,)), dtype=np.int32))
# label_mean = (1*1 + .1*1 + 1.5*0)/(1 + .1 + 1.5) = 1.1/2.6
# = .42307692307
expected_label_mean = .42307692307
keys = metric_keys.MetricKeys
expected_metrics = {
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
keys.LOSS_MEAN: 26.9615384615,
# accuracy = (1*1 + .1*0 + 1.5*0)/(1 + .1 + 1.5) = 1/2.6 = .38461538461
keys.ACCURACY: .38461538461,
# prediction_mean = (1*1 + .1*0 + 1.5*1)/(1 + .1 + 1.5) = 2.5/2.6
# = .96153846153
keys.PREDICTION_MEAN: .96153846153,
keys.LABEL_MEAN: expected_label_mean,
keys.ACCURACY_BASELINE: 1 - expected_label_mean,
keys.AUC: .45454565,
keys.AUC_PR: .6737757325172424,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(70.1, loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_train_one_dim_create_loss(self):
"""Tests create_loss with 1D labels and weights (shape [batch_size])."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
labels_rank_1 = np.array((1., 1., 0.,))
weights_rank_1 = np.array(((1., .1, 1.5,)), dtype=np.float64)
features = {
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': weights_rank_1,
}
# unreduced_loss = cross_entropy(labels, logits) = [0, 41, 44]
expected_unreduced_loss = [[0.], [41.], [44.]]
# weights are reshaped to [3, 1] to match logits.
expected_weights = [[1.], [.1], [1.5]]
# training loss = 1 * 0 + .1 * 41 + 1.5 * 44
expected_training_loss = 70.1
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(),
rtol=1e-2, atol=1e-2)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(),
rtol=1e-2, atol=1e-2)
self.assertAllClose(expected_weights, actual_weights.eval())
def test_train_one_dim(self):
"""Tests train with 1D labels and weights (shape [batch_size])."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
labels_rank_1 = np.array((1., 1., 0.,))
weights_rank_1 = np.array(((1., .1, 1.5,)), dtype=np.float64)
self.assertEqual((3,), labels_rank_1.shape)
self.assertEqual((3,), weights_rank_1.shape)
features = {
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': weights_rank_1,
}
expected_train_result = b'my_train_op'
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
expected_loss = 70.1
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,
}, summary_str)
def test_weighted_multi_example_train(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
expected_loss = 70.1
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((1.,), (1.,), (0.,))),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,
}, summary_str)
def test_multi_dim_weighted_train_create_loss(self):
"""Logits and labels of shape [2, 2, 1], weights [2, 2]."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='weights')
logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)
labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# unreduced_loss = cross_entropy(labels, logits) = [[10, 0], [0, 12]].
expected_unreduced_loss = [[[10.], [0.]], [[0.], [12.]]]
# Weights are reshaped to [2, 2, 1] to match logits.
expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]
# training_loss = 1*10 + 1.5*0 + 2*0 + 2.5*12 = 40
expected_training_loss = 40.
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-2
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(),
rtol=tol, atol=tol)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(),
rtol=tol, atol=tol)
self.assertAllClose(expected_weights, actual_weights.eval())
def test_multi_dim_weighted_train(self):
"""Logits and labels of shape [2, 2, 1], weights [2, 2]."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='weights')
logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)
labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = cross_entropy(labels, logits) = [[10, 0], [0, 12]].
# weighted_sum_loss = 1*10 + 1.5*0 + 2*0 + 2.5*12 = 40
expected_loss = 40.
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
# Create estimator spec.
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
def test_multi_dim_train_weights_wrong_inner_dim(self):
"""Logits and labels of shape [2, 2, 1], weights [2, 1]."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='weights')
logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)
labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)
weights = np.array([[1.], [2.]], dtype=np.float32)
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 1\] \[weights_shape: \] \[2 1\]'):
spec.loss.eval()
def test_multi_dim_train_weights_wrong_outer_dim(self):
"""Logits and labels of shape [2, 2, 1], weights [2, 2, 2]."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='weights')
logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)
labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)
weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'weights': weights_placeholder},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \]\s\[2 2 1\]\s\[weights_shape: \]\s\[2 2 2\]'):
spec.loss.eval({
weights_placeholder: np.array([[[1., 1.1], [1.5, 1.6]],
[[2., 2.1], [2.5, 2.6]]])})
def test_multi_dim_weighted_eval(self):
"""Logits and labels of shape [2, 2, 1], weights [2, 2]."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='weights')
logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)
labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = cross_entropy(labels, logits) = [[10, 0], [0, 12]].
# weighted_sum_loss = 1*10 + 1.5*0 + 2*0 + 2.5*12 = 40
expected_loss = 40.
# Create estimator spec.
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / np.sum(weights),
keys.ACCURACY: (1.*0. + 1.5*1. + 2.*1. + 2.5*0.) / np.sum(weights),
keys.PREDICTION_MEAN: (1.*1 + 1.5*0 + 2.*1 + 2.5*0) / np.sum(weights),
keys.LABEL_MEAN: (1.*0 + 1.5*0 + 2.*1 + 2.5*1) / np.sum(weights),
keys.ACCURACY_BASELINE: (1.*0 + 1.5*0 + 2.*1 + 2.5*1) / np.sum(weights),
# We cannot reliably calculate AUC with only 4 data points, but the
# values should not change because of backwards-compatibility.
keys.AUC: 0.5222,
keys.AUC_PR: 0.7341,
}
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
class RegressionHeadWithMeanSquaredErrorLossTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_invalid_label_dimension(self):
with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):
head_lib._regression_head_with_mean_squared_error_loss(label_dimension=-1)
with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):
head_lib._regression_head_with_mean_squared_error_loss(label_dimension=0)
def test_invalid_loss_reduction(self):
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):
head_lib._regression_head_with_mean_squared_error_loss(
loss_reduction='invalid_loss_reduction')
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: none'):
head_lib._regression_head_with_mean_squared_error_loss(
loss_reduction=losses.Reduction.NONE)
def test_loss_fn_arg_labels_missing(self):
def _loss_fn(logits):
del logits # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: labels\. '
r'Given arguments: \(\'logits\',\)'):
head_lib._regression_head_with_mean_squared_error_loss(loss_fn=_loss_fn)
def test_loss_fn_arg_logits_missing(self):
def _loss_fn(labels):
del labels # unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: logits\. '
r'Given arguments: \(\'labels\',\)'):
head_lib._regression_head_with_mean_squared_error_loss(loss_fn=_loss_fn)
def test_loss_fn_arg_features_ok(self):
def _loss_fn(labels, logits, features):
del labels, logits, features # Unused
head_lib._regression_head_with_mean_squared_error_loss(loss_fn=_loss_fn)
def test_loss_fn_arg_invalid(self):
def _loss_fn(labels, logits, name=None):
del labels, logits, name # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn has unexpected args: \[\'name\'\]'):
head_lib._regression_head_with_mean_squared_error_loss(loss_fn=_loss_fn)
def test_invalid_logits(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
logits_1d = np.array(((45.,), (41.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_1d)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PREDICTIONS].eval({
logits_placeholder: logits_1d
})
def test_incompatible_labels_eval(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))
values_1d = np.array(((43.,), (44.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):
head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=values_3d,
labels=values_1d)
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': values_3d}, labels=values_3d,
mode=model_fn.ModeKeys.EVAL, logits=values_1d, train_op_fn=None)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
logits_placeholder: values_1d
})
training_loss = head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'):
training_loss.eval({
labels_placeholder: values_1d,
logits_placeholder: values_3d
})
def test_incompatible_labels_train(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))
values_1d = np.array(((43.,), (44.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):
head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=values_3d,
labels=values_1d)
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': values_3d},
mode=model_fn.ModeKeys.TRAIN,
logits=values_1d,
labels=values_3d,
train_op_fn=lambda x: x)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=logits_placeholder,
labels=labels_placeholder,
train_op_fn=lambda x: x)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
logits_placeholder: values_1d
})
training_loss = head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'):
training_loss.eval({
labels_placeholder: values_1d,
logits_placeholder: values_3d
})
def test_name(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
name='foo')
self.assertEqual('foo', head.name)
def test_predict(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.int32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertIsNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNone(spec.train_op)
default_serving_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
self.assertItemsEqual(
(default_serving_key, 'predict', 'regression'),
spec.export_outputs.keys())
_assert_no_hooks(self, spec)
# Assert predictions.
with self.test_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(logits, spec.predictions[prediction_key].eval())
self.assertAllClose(
logits, spec.export_outputs[default_serving_key].value.eval())
self.assertAllClose(
logits, spec.export_outputs['regression'].value.eval())
self.assertAllClose(
logits, spec.export_outputs['predict'].outputs['predictions'].eval())
def test_predict_with_inverse_link_fn(self):
def _inverse_link_fn(logits):
return logits - 10.
head = head_lib._regression_head_with_mean_squared_error_loss(
inverse_link_fn=_inverse_link_fn)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.int32)
expected_predictions = np.array(((35,), (31,),), dtype=np.int32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
keys = prediction_keys.PredictionKeys
self.assertItemsEqual(
(keys.PREDICTIONS, keys.LOGITS), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[keys.PREDICTIONS].dtype)
self.assertEqual(dtypes.float32, spec.predictions[keys.LOGITS].dtype)
default_serving_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
self.assertItemsEqual(
(default_serving_key, 'predict', 'regression'),
spec.export_outputs.keys())
# Assert predictions.
with self.test_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(
expected_predictions, spec.predictions[keys.PREDICTIONS].eval())
self.assertAllClose(logits, spec.predictions[keys.LOGITS].eval())
self.assertAllClose(
expected_predictions,
spec.export_outputs[default_serving_key].value.eval())
self.assertAllClose(
expected_predictions, spec.export_outputs['regression'].value.eval())
self.assertAllClose(
expected_predictions,
spec.export_outputs['predict'].outputs['predictions'].eval())
self.assertAllClose(
logits, spec.export_outputs['predict'].outputs['logits'].eval())
def test_eval_create_loss(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(43-45)^2, (44-41)] = [4, 9]
self.assertAllClose(13., training_loss.eval())
def test_eval_create_loss_loss_fn(self):
"""Tests head.create_loss for eval mode and custom loss_fn."""
loss = np.array([[0., 1.], [2., 3.]], dtype=np.float32)
logits_input = np.array([[-1., 1.], [-2., 2.]], dtype=np.float32)
labels_input = np.array([[1., 0.], [2., -1.]], dtype=np.float32)
def _loss_fn(labels, logits):
check_labels = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(labels, labels_input)),
data=[labels])
check_logits = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(logits, logits_input)),
data=[logits])
with ops.control_dependencies([check_labels, check_logits]):
return constant_op.constant(loss)
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=2, loss_fn=_loss_fn)
actual_training_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
def test_eval_create_loss_loss_fn_wrong_shape(self):
"""Tests custom loss_fn that returns Tensor of unexpected shape."""
loss = np.array([[1.], [2.]], dtype=np.float32)
def _loss_fn(labels, logits):
del labels, logits # Unused
return constant_op.constant(loss)
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=2, loss_fn=_loss_fn)
logits = np.array([[-1., 1.], [-2., 2.]], dtype=np.float32)
labels = np.array([[1., 0.], [2., -1.]], dtype=np.float32)
actual_training_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[loss_fn must return Tensor of shape \[D0, D1, ... DN, 2\]\. \] '
r'\[logits_shape: \] \[2 2\] \[loss_shape: \] \[2 1\]'):
actual_training_loss.eval()
def test_eval_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib._regression_head_with_mean_squared_error_loss()
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=np.array(((45,), (41,),), dtype=np.float32),
labels=None)
def test_eval(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = (43-45)^2 + (44-41)^2 = 4+9 = 13
self.assertAllClose(13., loss)
# loss_mean = loss/2 = 13/2 = 6.5
expected_loss_mean = 6.5
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_eval_metric_ops_with_head_name_for_regression(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
name='some_regression_head')
logits = np.array(((1,), (9,)), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
expected_metric_keys = [
'{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS_MEAN),
]
self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())
def test_eval_with_regularization_losses(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
self.assertEqual(1, head.logits_dimension)
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = ((43-45)^2 + (44-41)^2) / batch_size
# = (4 + 9) / 2 = 6.5
expected_unregularized_loss = 6.5
expected_regularized_loss = (
expected_unregularized_loss + expected_regularization_loss)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels,
regularization_losses=regularization_losses)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
}
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
predictions, loss, metrics = sess.run((
spec.predictions[prediction_key], spec.loss, update_ops))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_regularized_loss, loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_train_create_loss(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
# unreduced_loss = [(43-45)^2, (44-41)] = [4, 9]
expected_unreduced_loss = [[4.], [9.]]
# weights default to 1.
expected_weights = 1
# training_loss = 1 * 4 + 1 * 9 = 13
expected_training_loss = 13.
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
self.assertAllClose(expected_weights, actual_weights)
def test_train_create_loss_loss_reduction(self):
"""Tests create_loss with loss_reduction."""
head = head_lib._regression_head_with_mean_squared_error_loss(
loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
# unreduced_loss = [(43-45)^2, (44-41)] = [4, 9]
expected_unreduced_loss = [[4.], [9.]]
# weights default to 1.
expected_weights = 1
# training_loss = (1 * 4 + 1 * 9) / num_nonzero_weights
expected_training_loss = 13. / 2.
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
self.assertAllClose(expected_weights, actual_weights)
def test_train_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib._regression_head_with_mean_squared_error_loss()
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=np.array(((45,), (41,),), dtype=np.float32),
labels=None,
train_op_fn=_no_op_train_fn)
def test_train(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43.,), (44.,),), dtype=np.float64)
expected_train_result = b'my_train_op'
features = {'x': np.array(((42.,),), dtype=np.float32)}
# loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13
expected_loss = 13
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/2 = 13/2 = 6.5
metric_keys.MetricKeys.LOSS_MEAN: 6.5,
}, summary_str)
def test_train_summaries_with_head_name(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
name='some_regression_head')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43.,), (44.,),), dtype=np.float64)
features = {'x': np.array(((42.,),), dtype=np.float32)}
# loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13
expected_loss = 13
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
_assert_simple_summaries(
self,
{
'{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS):
expected_loss,
# loss_mean = loss/2 = 13/2 = 6.5
'{}/some_regression_head'
.format(metric_keys.MetricKeys.LOSS_MEAN):
6.5,
},
summary_str)
def test_train_with_regularization_losses(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43.,), (44.,),), dtype=np.float64)
expected_train_result = b'my_train_op'
features = {'x': np.array(((42.,),), dtype=np.float32)}
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = ((43-45)^2 + (44-41)^2) / batch_size
# = (4 + 9) / 2 = 6.5
# loss = unregularized_loss + regularization_loss = 8.5
expected_loss = 8.5
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_REGULARIZATION: (
expected_regularization_loss),
}, summary_str)
def test_weighted_multi_example_eval(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
expected_loss_mean = 39.0769231
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_weight_with_numeric_column(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column=feature_column_lib.numeric_column(
'label_weights', normalizer_fn=lambda x: x + 1.))
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x':
np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights':
np.array(((0.,), (-0.9,), (0.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert loss.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
def test_weighted_multi_example_train(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.float32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((35.,), (42.,), (45.,)), dtype=np.float32),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,
}, summary_str)
def test_train_one_dim_create_loss(self):
"""Tests create_loss with 1D labels and weights (shape [batch_size])."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
x_feature_rank_1 = np.array((42., 43., 44.,), dtype=np.float32)
weight_rank_1 = np.array((1., .1, 1.5,), dtype=np.float64)
labels_rank_1 = np.array((35., 42., 45.,))
# unreduced_loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
expected_unreduced_loss = [[100.], [1.], [1.]]
# weights are reshaped to [3, 1] to match logits.
expected_weights = [[1.], [.1], [1.5]]
# training_loss = 100 * 1 + 1 * .1 + 1.5 * 1 = 101.6
expected_training_loss = 101.6
features = {'x': x_feature_rank_1, 'label_weights': weight_rank_1}
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
self.assertAllClose(expected_weights, actual_weights.eval())
def test_train_one_dim(self):
"""Tests train with 1D labels and weights (shape [batch_size])."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
x_feature_rank_1 = np.array((42., 43., 44.,), dtype=np.float32)
weight_rank_1 = np.array((1., .1, 1.5,), dtype=np.float64)
labels_rank_1 = np.array((35., 42., 45.,))
features = {'x': x_feature_rank_1, 'label_weights': weight_rank_1}
self.assertEqual((3,), x_feature_rank_1.shape)
self.assertEqual((3,), weight_rank_1.shape)
self.assertEqual((3,), labels_rank_1.shape)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,
}, summary_str)
def test_weighted_multi_value_eval_create_loss(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),))
}
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
# weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6
self.assertAllClose(101.6, training_loss.eval())
def test_weighted_multi_value_eval(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
self.assertEqual(3, head.logits_dimension)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),))
}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
expected_loss_mean = 39.076923
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_weighted_multi_value_train_create_loss(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),))
}
# Create loss.
training_loss = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
# weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6
self.assertAllClose(101.6, training_loss.eval())
def test_weighted_multi_value_train(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
self.assertEqual(3, head.logits_dimension)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),)),
}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Evaluate predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
metric_keys.MetricKeys.LOSS_MEAN: 39.076923,
}, summary_str)
def test_weighted_multi_batch_eval(self):
"""1d label, 1 example, 3 batches."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45.,), (41.,), (44.,)))
input_fn = numpy_io.numpy_input_fn(
x={
'x': np.array(((42.,), (43.,), (44.,))),
'label_weights': np.array(((1.,), (.1,), (1.5,))),
# 'logits' is not a feature, but we use `numpy_input_fn` to make a
# batched version of it, and pop it off before passing to
# `create_estimator_spec`.
'logits': logits,
},
y=np.array(((35.,), (42.,), (45.,))),
batch_size=1,
num_epochs=1,
shuffle=False)
batched_features, batched_labels = input_fn()
batched_logits = batched_features.pop('logits')
spec = head.create_estimator_spec(
features=batched_features,
mode=model_fn.ModeKeys.EVAL,
logits=batched_logits,
labels=batched_labels,
train_op_fn=None)
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
# loss = sum(losses) = 100+.1+1.5 = 101.6
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
expected_metrics = {metric_keys.MetricKeys.LOSS_MEAN: 39.076923}
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
_assert_no_hooks(self, spec)
with self.test_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
queue_runner_impl.start_queue_runners()
# Run tensors for `steps` steps.
steps = len(logits)
results = tuple([
sess.run((
spec.loss,
# The `[1]` gives us the metric update op.
{k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
)) for _ in range(steps)
])
# Assert losses and metrics.
self.assertAllClose((100, .1, 1.5), [r[0] for r in results])
# For metrics, check results of both update (in `results`) and value ops.
# Note: we only check the result of the last step for streaming metrics.
self.assertAllClose(expected_metrics, results[steps - 1][1])
self.assertAllClose(expected_metrics, {
k: spec.eval_metric_ops[k][0].eval() for k in spec.eval_metric_ops
})
def test_weighted_multi_batch_train(self):
"""1d label, 1 example, 3 batches."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45.,), (41.,), (44.,)))
input_fn = numpy_io.numpy_input_fn(
x={
'x': np.array(((42.,), (43.,), (44.,))),
'label_weights': np.array(((1.,), (.1,), (1.5,))),
# 'logits' is not a feature, but we use `numpy_input_fn` to make a
# batched version of it, and pop it off before passing to
# `create_estimator_spec`.
'logits': logits,
},
y=np.array(((35.,), (42.,), (45.,))),
batch_size=1,
num_epochs=1,
shuffle=False)
batched_features, batched_labels = input_fn()
batched_logits = batched_features.pop('logits')
spec = head.create_estimator_spec(
features=batched_features,
mode=model_fn.ModeKeys.TRAIN,
logits=batched_logits,
labels=batched_labels,
train_op_fn=lambda loss: loss * -7.)
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertIsNotNone(spec.train_op)
with self.test_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
queue_runner_impl.start_queue_runners()
results = tuple([
sess.run((spec.loss, spec.train_op)) for _ in range(len(logits))
])
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
expected_losses = np.array((100, .1, 1.5))
self.assertAllClose(expected_losses, [r[0] for r in results])
self.assertAllClose(expected_losses * -7., [r[1] for r in results])
def test_multi_dim_weighted_train_create_loss(self):
"""Logits, labels of shape [2, 2, 3], weight shape [2, 2]."""
label_dimension = 3
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=label_dimension)
logits = np.array([[[00., 01., 02.], [10., 11., 12.]],
[[20., 21., 22.], [30., 31., 32.]]])
labels = np.array([[[01., 02., 03.], [12., 13., 14.]],
[[23., 24., 25.], [34., 35., 36.]]])
weights = np.array([[1., 1.5], [2., 2.5]])
expected_unreduced_loss = [[[1., 1., 1.], [4., 4., 4.]],
[[9., 9., 9.], [16., 16., 16.]]]
expected_training_loss = np.sum(
np.array([[[1. * x for x in [1., 1., 1.]],
[1.5 * x for x in [4., 4., 4.]]],
[[2. * x for x in [9., 9., 9.]],
[2.5 * x for x in [16., 16., 16.]]]]))
# Weights are expanded to [2, 2, 1] to match logits.
expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]
# Create loss.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features={'label_weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
self.assertAllClose(expected_weights, actual_weights.eval())
def test_multi_dim_weighted_train(self):
"""Logits, labels of shape [2, 2, 3], weight shape [2, 2]."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
logits = np.array([[[00., 01., 02.], [10., 11., 12.]],
[[20., 21., 22.], [30., 31., 32.]]])
labels = np.array([[[01., 02., 03.], [12., 13., 14.]],
[[23., 24., 25.], [34., 35., 36.]]])
expected_train_result = b'my_train_op'
features = {
'label_weights': np.array([[1., 1.5], [2., 2.5]]),
}
# loss = 1*3*1^2 + 1.5*3*2^2 + 2*3*3^2 +2.5*3*4^2 = 195
expected_loss = 195.
# Create estimator spec.
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_loss, spec.loss.eval())
def test_multi_dim_train_weights_wrong_inner_dim(self):
"""Logits, labels of shape [2, 2, 3], weight shape [2, 1]."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
logits = np.array([[[00., 01., 02.], [10., 11., 12.]],
[[20., 21., 22.], [30., 31., 32.]]])
labels = np.array([[[01., 02., 03.], [12., 13., 14.]],
[[23., 24., 25.], [34., 35., 36.]]])
features = {
'label_weights': np.array([[1.], [2]]),
}
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 1\]'):
spec.loss.eval()
def test_multi_dim_train_weights_wrong_outer_dim(self):
"""Logits, labels of shape [2, 2, 3], weight shape [2, 2, 2]."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
logits = np.array([[[00., 01., 02.], [10., 11., 12.]],
[[20., 21., 22.], [30., 31., 32.]]])
labels = np.array([[[01., 02., 03.], [12., 13., 14.]],
[[23., 24., 25.], [34., 35., 36.]]])
weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)
features = {
'label_weights': weights_placeholder,
}
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \]\s\[2 2 3\]\s\[weights_shape: \]\s\[2 2 2\]'):
spec.loss.eval({
weights_placeholder: np.array([[[1., 1.1], [1.5, 1.6]],
[[2., 2.1], [2.5, 2.6]]])})
if __name__ == '__main__':
test.main()
|
py | b407595b38eca489199d10c5ec16d5d0bc2bc331 | # -*- coding: utf-8 -*-
from guietta import QFileDialog
filename = QFileDialog.getOpenFileName(None, "Open File",
"/home")
"Images (*.png *.xpm *.jpg)");
print(filename)
|
py | b40759caa0df5efaef5b86c141a1841dc4c30a79 | import mwbase
import requests
wb_doc = requests.get(
"https://wikidata.org/wiki/Special:EntityData/Q42.json").json()
entity = mwbase.Entity.from_json(wb_doc['entities']['Q42'])
entity.labels['en']
entity.properties.keys()
entity.sitelinks.keys()
|
py | b4075a349631e1c6586ea34cef39cd0b5a3936c0 | """
Django settings for printer project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.translation import ugettext_lazy as _
DEFAULT_CHARTSET = 'utf-8'
# Directories
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),)
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'assets')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'assets'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
# Site
SITE_NAME = "3DRPP"
DOMAIN = 'http://localhost/' # Must end with a slash! /!\
SITE_URL_PREFIX = '' # Empty or your-prefix/ <- Must end with a slash /!\
SITE_URL = DOMAIN + SITE_URL_PREFIX
STATIC_URL = '/' + SITE_URL_PREFIX + 'assets/'
MEDIA_URL = '/' + SITE_URL_PREFIX + 'media/'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5dxu%_2qv*nvhal*oa!b=qr-x94^26ax2y@t$aukemdkve^)4r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
AUTH_USER_MODEL = 'printer.User'
PASSWORD_HASHERS = ('django.contrib.auth.hashers.PBKDF2PasswordHasher',)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'printer'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'printer.urls'
WSGI_APPLICATION = 'printer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# I18N
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
LANGUAGES = (
('fr', _('French')),
)
USE_I18N = False
USE_L10N = True
USE_TZ = False
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'printer.context_processors.site_infos'
],
},
},
]
GPIO_pins = {
7: None,
8: None,
10: None,
11: None,
12: None,
13: None,
15: None,
16: None,
18: None,
19: None,
21: None,
22: None,
23: None,
24: None,
26: None,
29: None,
31: {
'target': 'X-axis-sensor',
'mode': 'in',
'default': False,
'events': None
},
32: {
'target': 'Y-axis-sensor',
'mode': 'in',
'default': False,
'events': None
},
33: {
'target': 'Z-axis-sensor',
'mode': 'in',
'default': False,
'events': None
},
35: {
'target': 'ventilation',
'mode': 'out',
'default': False
},
36: {
'target': 'beeper',
'mode': 'out',
'default': False
},
37: {
'target': 'nozzle-heating',
'mode': 'out',
'default': False
},
38: {
'target': 'heating-bed',
'mode': 'out',
'default': False
},
40: {
'target': 'alimentation',
'mode': 'out',
'default': False
}
}
live_stream = {
'activated': True,
'url': 'http://192.168.1.22:8080/stream/video.mjpeg'
}
printer = {
'dimensions': {
'X-axis': 300.00, #mm
'Y-axis': 300.00, #mm
'Z-axis': 300.00 #mm
},
'nozzle': {
'temperature-sensor': {
'/dev': 'TODO'
}
},
'heating-bed': {
'temperature-sensor': {
'/dev': 'TODO'
}
},
'motor-hats': {
'below': {
'addr': 0x60,
'freq': 1600,
'm1-m2': {
'name': 'X-axis',
'step': 1.80, #degrees
'speed': 30 #rpm
},
'm3-m4': {
'name': 'Y-axis',
'step': 1.80, #degrees
'speed': 30 #rpm
}
},
'above': {
'addr': 0x61,
'freq': 1600,
'm1-m2': {
'name': 'Z-axis',
'step': 1.80, #degrees
'speed': 30 #rpm
},
'm3-m4': {
'name': 'nozzle',
'step': 1.80, #degrees
'speed': 30 #rpm
}
}
}
} |
py | b4075ac221e2eb21f1a4153a4106dc90992d2400 | name = 'cppmm'
version = '0.2.0'
def commands():
env.PATH.append('{root}/bin')
|
py | b4075ad8282ac47358b5d1096756a960da331bd1 | """
Kernel of empymod, calculates the wavenumber-domain electromagnetic
response. Plus analytical full- and half-space solutions.
The functions :func:`wavenumber`, :func:`angle_factor`, :func:`fullspace`,
:func:`greenfct`, :func:`reflections`, and :func:`fields` are based on source
files (specified in each function) from the source code distributed with
[HuTS15]_, which can be found at `software.seg.org/2015/0001
<https://software.seg.org/2015/0001>`_. These functions are (c) 2015 by
Hunziker et al. and the Society of Exploration Geophysicists,
https://software.seg.org/disclaimer.txt. Please read the NOTICE-file in the
root directory for more information regarding the involved licenses.
"""
# Copyright 2016-2022 The emsig community.
#
# This file is part of empymod.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
import numba as nb
from scipy import special # Only used for halfspace solution
__all__ = ['wavenumber', 'angle_factor', 'fullspace', 'greenfct',
'reflections', 'fields', 'halfspace']
# Numba-settings
_numba_setting = {'nogil': True, 'cache': True}
_numba_with_fm = {'fastmath': True, **_numba_setting}
# Wavenumber-frequency domain kernel
@nb.njit(**_numba_setting)
def wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH, zetaV, lambd,
ab, xdirect, msrc, mrec):
r"""Calculate wavenumber domain solution.
Return the wavenumber domain solutions `PJ0`, `PJ1`, and `PJ0b`, which have
to be transformed with a Hankel transform to the frequency domain.
`PJ0`/`PJ0b` and `PJ1` have to be transformed with Bessel functions of
order 0 (:math:`J_0`) and 1 (:math:`J_1`), respectively.
This function corresponds loosely to equations 105--107, 111--116,
119--121, and 123--128 in [HuTS15]_, and equally loosely to the file
`kxwmod.c`.
[HuTS15]_ uses Bessel functions of orders 0, 1, and 2 (:math:`J_0, J_1,
J_2`). The implementations of the *Fast Hankel Transform* and the
*Quadrature-with-Extrapolation* in :mod:`empymod.transform` are set-up with
Bessel functions of order 0 and 1 only. This is achieved by applying the
recurrence formula
.. math::
:label: wavenumber
J_2(kr) = \frac{2}{kr} J_1(kr) - J_0(kr) \ .
.. note::
`PJ0` and `PJ0b` could theoretically be added here into one, and then
be transformed in one go. However, `PJ0b` has to be multiplied by
:func:`ang_fact` later. This has to be done after the Hankel transform
for methods which make use of spline interpolation, in order to work
for offsets that are not in line with each other.
This function is called from one of the Hankel functions in
:mod:`empymod.transform`. Consult the modelling routines in
:mod:`empymod.model` for a description of the input and output parameters.
If you are solely interested in the wavenumber-domain solution you can call
this function directly. However, you have to make sure all input arguments
are correct, as no checks are carried out here.
"""
nfreq, _ = etaH.shape
noff, nlambda = lambd.shape
# ** CALCULATE GREEN'S FUNCTIONS
# Shape of PTM, PTE: (nfreq, noffs, nfilt)
PTM, PTE = greenfct(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH,
zetaV, lambd, ab, xdirect, msrc, mrec)
# ** AB-SPECIFIC COLLECTION OF PJ0, PJ1, AND PJ0b
# Pre-allocate output
if ab in [11, 22, 24, 15, 33]:
PJ0 = np.zeros_like(PTM)
else:
PJ0 = None
if ab in [11, 12, 21, 22, 14, 24, 15, 25]:
PJ0b = np.zeros_like(PTM)
else:
PJ0b = None
if ab not in [33, ]:
PJ1 = np.zeros_like(PTM)
else:
PJ1 = None
Ptot = np.zeros_like(PTM)
# Calculate Ptot which is used in all cases
fourpi = 4*np.pi
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
Ptot[i, ii, iv] = (PTM[i, ii, iv] + PTE[i, ii, iv])/fourpi
# If rec is magnetic switch sign (reciprocity MM/ME => EE/EM).
if mrec:
sign = -1
else:
sign = 1
# Group into PJ0 and PJ1 for J0/J1 Hankel Transform
if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Eqs 105, 106, 111, 112,
# J2(kr) = 2/(kr)*J1(kr) - J0(kr) # 119, 120, 123, 124
if ab in [14, 22]:
sign *= -1
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
PJ0b[i, ii, iv] = sign/2*Ptot[i, ii, iv]*lambd[ii, iv]
PJ1[i, ii, iv] = -sign*Ptot[i, ii, iv]
if ab in [11, 22, 24, 15]:
if ab in [22, 24]:
sign *= -1
eightpi = sign*8*np.pi
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
PJ0[i, ii, iv] = PTM[i, ii, iv] - PTE[i, ii, iv]
PJ0[i, ii, iv] *= lambd[ii, iv]/eightpi
elif ab in [13, 23, 31, 32, 34, 35, 16, 26]: # Eqs 107, 113, 114, 115,
if ab in [34, 26]: # . 121, 125, 126, 127
sign *= -1
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
dlambd = lambd[ii, iv]*lambd[ii, iv]
PJ1[i, ii, iv] = sign*Ptot[i, ii, iv]*dlambd
elif ab in [33, ]: # Eq 116
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
tlambd = lambd[ii, iv]*lambd[ii, iv]*lambd[ii, iv]
PJ0[i, ii, iv] = sign*Ptot[i, ii, iv]*tlambd
# Return PJ0, PJ1, PJ0b
return PJ0, PJ1, PJ0b
@nb.njit(**_numba_setting)
def greenfct(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH, zetaV, lambd,
ab, xdirect, msrc, mrec):
r"""Calculate Green's function for TM and TE.
.. math::
:label: greenfct
\tilde{g}^{tm}_{hh}, \tilde{g}^{tm}_{hz},
\tilde{g}^{tm}_{zh}, \tilde{g}^{tm}_{zz},
\tilde{g}^{te}_{hh}, \tilde{g}^{te}_{zz}
This function corresponds to equations 108--110, 117/118, 122; 89--94,
A18--A23, B13--B15; 97--102 A26--A31, and B16--B18 in [HuTS15]_, and
loosely to the corresponding files `Gamma.F90`, `Wprop.F90`, `Ptotalx.F90`,
`Ptotalxm.F90`, `Ptotaly.F90`, `Ptotalym.F90`, `Ptotalz.F90`, and
`Ptotalzm.F90`.
The Green's functions are multiplied according to Eqs 105-107, 111-116,
119-121, 123-128; with the factors inside the integrals.
This function is called from the function :func:`wavenumber`.
"""
nfreq, nlayer = etaH.shape
noff, nlambda = lambd.shape
# GTM/GTE have shape (frequency, offset, lambda).
# gamTM/gamTE have shape (frequency, offset, layer, lambda):
# Reciprocity switches for magnetic receivers
if mrec:
if msrc: # If src is also magnetic, switch eta and zeta (MM => EE).
# G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e)
etaH, zetaH = -zetaH, -etaH
etaV, zetaV = -zetaV, -etaV
else: # If src is electric, swap src and rec (ME => EM).
# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z)
zsrc, zrec = zrec, zsrc
lsrc, lrec = lrec, lsrc
for TM in [True, False]:
# Continue if Green's function not required
if TM and ab in [16, 26]:
continue
elif not TM and ab in [13, 23, 31, 32, 33, 34, 35]:
continue
# Define eta/zeta depending if TM or TE
if TM:
e_zH, e_zV, z_eH = etaH, etaV, zetaH # TM: zetaV not used
else:
e_zH, e_zV, z_eH = zetaH, zetaV, etaH # TE: etaV not used
# Uppercase gamma
Gam = np.zeros((nfreq, noff, nlayer, nlambda), etaH.dtype)
for i in range(nfreq):
for ii in range(noff):
for iii in range(nlayer):
h_div_v = e_zH[i, iii]/e_zV[i, iii]
h_times_h = z_eH[i, iii]*e_zH[i, iii]
for iv in range(nlambda):
l2 = lambd[ii, iv]*lambd[ii, iv]
Gam[i, ii, iii, iv] = np.sqrt(h_div_v*l2 + h_times_h)
# Gamma in receiver layer
lrecGam = Gam[:, :, lrec, :]
# Reflection (coming from below (Rp) and above (Rm) rec)
if depth.size > 1: # Only if more than 1 layer
Rp, Rm = reflections(depth, e_zH, Gam, lrec, lsrc)
# Field propagators
# (Up- (Wu) and downgoing (Wd), in rec layer); Eq 74
Wu = np.zeros_like(lrecGam)
Wd = np.zeros_like(lrecGam)
if lrec != depth.size-1: # No upgoing field prop. if rec in last
ddepth = depth[lrec + 1] - zrec
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
Wu[i, ii, iv] = np.exp(-lrecGam[i, ii, iv]*ddepth)
if lrec != 0: # No downgoing field propagator if rec in first
ddepth = zrec - depth[lrec]
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
Wd[i, ii, iv] = np.exp(-lrecGam[i, ii, iv]*ddepth)
# Field at rec level (coming from below (Pu) and above (Pd) rec)
Pu, Pd = fields(depth, Rp, Rm, Gam, lrec, lsrc, zsrc, ab, TM)
# Green's functions
green = np.zeros_like(lrecGam)
if lsrc == lrec: # Rec in src layer; Eqs 108, 109, 110, 117, 118, 122
# Green's function depending on <ab>
# (If only one layer, no reflections/fields)
if depth.size > 1 and ab in [13, 23, 31, 32, 14, 24, 15, 25]:
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
green[i, ii, iv] = Pu[i, ii, iv]*Wu[i, ii, iv]
green[i, ii, iv] -= Pd[i, ii, iv]*Wd[i, ii, iv]
elif depth.size > 1:
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
green[i, ii, iv] = Pu[i, ii, iv]*Wu[i, ii, iv]
green[i, ii, iv] += Pd[i, ii, iv]*Wd[i, ii, iv]
# Direct field, if it is computed in the wavenumber domain
if not xdirect:
ddepth = abs(zsrc - zrec)
dsign = np.sign(zrec - zsrc)
minus_ab = [11, 12, 13, 14, 15, 21, 22, 23, 24, 25]
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
# Direct field
directf = np.exp(-lrecGam[i, ii, iv]*ddepth)
# Swap TM for certain <ab>
if TM and ab in minus_ab:
directf *= -1
# Multiply by zrec-zsrc-sign for certain <ab>
if ab in [13, 14, 15, 23, 24, 25, 31, 32]:
directf *= dsign
# Add direct field to Green's function
green[i, ii, iv] += directf
else:
# Calculate exponential factor
if lrec == depth.size-1:
ddepth = 0
else:
ddepth = depth[lrec+1] - depth[lrec]
fexp = np.zeros_like(lrecGam)
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
fexp[i, ii, iv] = np.exp(-lrecGam[i, ii, iv]*ddepth)
# Sign-switch for Green calculation
if TM and ab in [11, 12, 13, 21, 22, 23, 14, 24, 15, 25]:
pmw = -1
else:
pmw = 1
if lrec < lsrc: # Rec above src layer: Pd not used
# Eqs 89-94, A18-A23, B13-B15
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
green[i, ii, iv] = Pu[i, ii, iv]*(
Wu[i, ii, iv] + pmw*Rm[i, ii, 0, iv] *
fexp[i, ii, iv]*Wd[i, ii, iv])
elif lrec > lsrc: # rec below src layer: Pu not used
# Eqs 97-102 A26-A30, B16-B18
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
green[i, ii, iv] = Pd[i, ii, iv]*(
pmw*Wd[i, ii, iv] +
Rp[i, ii, abs(lsrc-lrec), iv] *
fexp[i, ii, iv]*Wu[i, ii, iv])
# Store in corresponding variable
if TM:
gamTM, GTM = Gam, green
else:
gamTE, GTE = Gam, green
# ** AB-SPECIFIC FACTORS AND CALCULATION OF PTOT'S
# These are the factors inside the integrals
# Eqs 105-107, 111-116, 119-121, 123-128
if ab in [11, 12, 21, 22]:
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
GTM[i, ii, iv] *= gamTM[i, ii, lrec, iv]/etaH[i, lrec]
GTE[i, ii, iv] *= zetaH[i, lsrc]/gamTE[i, ii, lsrc, iv]
elif ab in [14, 15, 24, 25]:
for i in range(nfreq):
fact = etaH[i, lsrc]/etaH[i, lrec]
for ii in range(noff):
for iv in range(nlambda):
GTM[i, ii, iv] *= fact*gamTM[i, ii, lrec, iv]
GTM[i, ii, iv] /= gamTM[i, ii, lsrc, iv]
elif ab in [13, 23]:
GTE = np.zeros_like(GTM)
for i in range(nfreq):
fact = etaH[i, lsrc]/etaH[i, lrec]/etaV[i, lsrc]
for ii in range(noff):
for iv in range(nlambda):
GTM[i, ii, iv] *= -fact*gamTM[i, ii, lrec, iv]
GTM[i, ii, iv] /= gamTM[i, ii, lsrc, iv]
elif ab in [31, 32]:
GTE = np.zeros_like(GTM)
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
GTM[i, ii, iv] /= etaV[i, lrec]
elif ab in [34, 35]:
GTE = np.zeros_like(GTM)
for i in range(nfreq):
fact = etaH[i, lsrc]/etaV[i, lrec]
for ii in range(noff):
for iv in range(nlambda):
GTM[i, ii, iv] *= fact/gamTM[i, ii, lsrc, iv]
elif ab in [16, 26]:
GTM = np.zeros_like(GTE)
for i in range(nfreq):
fact = zetaH[i, lsrc]/zetaV[i, lsrc]
for ii in range(noff):
for iv in range(nlambda):
GTE[i, ii, iv] *= fact/gamTE[i, ii, lsrc, iv]
elif ab in [33, ]:
GTE = np.zeros_like(GTM)
for i in range(nfreq):
fact = etaH[i, lsrc]/etaV[i, lsrc]/etaV[i, lrec]
for ii in range(noff):
for iv in range(nlambda):
GTM[i, ii, iv] *= fact/gamTM[i, ii, lsrc, iv]
# Return Green's functions
return GTM, GTE
@nb.njit(**_numba_with_fm)
def reflections(depth, e_zH, Gam, lrec, lsrc):
r"""Calculate Rp, Rm.
.. math::
:label: reflections
R^\pm_n, \bar{R}^\pm_n
This function corresponds to equations 64/65 and A-11/A-12 in
[HuTS15]_, and loosely to the corresponding files `Rmin.F90` and
`Rplus.F90`.
This function is called from the function :func:`greenfct`.
"""
# Get numbers and max/min layer.
nfreq, noff, nlambda = Gam[:, :, 0, :].shape
maxl = max([lrec, lsrc])
minl = min([lrec, lsrc])
# Loop over Rp, Rm
for plus in [True, False]:
# Switches depending if plus or minus
if plus:
pm = 1
layer_count = np.arange(depth.size-2, minl-1, -1)
izout = abs(lsrc-lrec)
minmax = pm*maxl
else:
pm = -1
layer_count = np.arange(1, maxl+1, 1)
izout = 0
minmax = pm*minl
# If rec in last and rec below src (plus) or
# if rec in first and rec above src (minus), shift izout
shiftplus = lrec < lsrc and lrec == 0 and not plus
shiftminus = lrec > lsrc and lrec == depth.size-1 and plus
if shiftplus or shiftminus:
izout -= pm
# Pre-allocate Ref and rloc
Ref = np.zeros_like(Gam[:, :, :maxl-minl+1, :])
rloc = np.zeros_like(Gam[:, :, 0, :])
# Calculate the reflection
for iz in layer_count:
# Eqs 65, A-12
for i in range(nfreq):
ra = e_zH[i, iz+pm]
rb = e_zH[i, iz]
for ii in range(noff):
for iv in range(nlambda):
rloca = ra*Gam[i, ii, iz, iv]
rlocb = rb*Gam[i, ii, iz+pm, iv]
rloc[i, ii, iv] = (rloca - rlocb)/(rloca + rlocb)
# In first layer tRef = rloc
if iz == layer_count[0]:
tRef = rloc.copy()
else:
ddepth = depth[iz+1+pm]-depth[iz+pm]
# Eqs 64, A-11
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
term = tRef[i, ii, iv]*np.exp(
-2*Gam[i, ii, iz+pm, iv]*ddepth)
tRef[i, ii, iv] = (rloc[i, ii, iv] + term)/(
1 + rloc[i, ii, iv]*term)
# The global reflection coefficient is given back for all layers
# between and including src- and rec-layer
if lrec != lsrc and pm*iz <= minmax:
Ref[:, :, izout, :] = tRef[:]
izout -= pm
# If lsrc = lrec, we just store the last values
if lsrc == lrec and layer_count.size > 0:
out = np.zeros_like(Ref[:, :, :1, :])
out[:, :, 0, :] = tRef
else:
out = Ref
# Store Ref in Rm/Rp
if plus:
Rm = out
else:
Rp = out
# Return reflections (minus and plus)
return Rm, Rp
@nb.njit(**_numba_setting)
def fields(depth, Rp, Rm, Gam, lrec, lsrc, zsrc, ab, TM):
r"""Calculate Pu+, Pu-, Pd+, Pd-.
.. math::
:label: fields
P^{u\pm}_s, P^{d\pm}_s, \bar{P}^{u\pm}_s, \bar{P}^{d\pm}_s;
P^{u\pm}_{s-1}, P^{u\pm}_n, \bar{P}^{u\pm}_{s-1}, \bar{P}^{u\pm}_n;
P^{d\pm}_{s+1}, P^{d\pm}_n, \bar{P}^{d\pm}_{s+1}, \bar{P}^{d\pm}_n
This function corresponds to equations 81/82, 95/96, 103/104, A-8/A-9,
A-24/A-25, and A-32/A-33 in [HuTS15]_, and loosely to the corresponding
files `Pdownmin.F90`, `Pdownplus.F90`, `Pupmin.F90`, and `Pdownmin.F90`.
This function is called from the function :func:`greenfct`.
"""
nfreq, noff, nlambda = Gam[:, :, 0, :].shape
# Variables
nlsr = abs(lsrc-lrec)+1 # nr of layers btw and incl. src and rec layer
rsrcl = 0 # src-layer in reflection (Rp/Rm), first if down
izrange = range(2, nlsr)
isr = lsrc
last = depth.size-1
# Booleans if src in first or last layer; swapped if up=True
first_layer = lsrc == 0
last_layer = lsrc == depth.size-1
# Depths; dp and dm are swapped if up=True
if lsrc != depth.size-1:
ds = depth[lsrc+1]-depth[lsrc]
dp = depth[lsrc+1]-zsrc
dm = zsrc-depth[lsrc]
# Rm and Rp; swapped if up=True
Rmp = Rm
Rpm = Rp
# Boolean if plus or minus has to be calculated
plusset = [13, 23, 33, 14, 24, 34, 15, 25, 35]
if TM:
plus = ab in plusset
else:
plus = ab not in plusset
# Sign-switches
pm = 1 # + if plus=True, - if plus=False
if not plus:
pm = -1
pup = -1 # + if up=True, - if up=False
mupm = 1 # + except if up=True and plus=False
# Gamma of source layer
iGam = Gam[:, :, lsrc, :]
# Calculate down- and up-going fields
for up in [False, True]:
# No upgoing field if rec is in last layer or below src
if up and (lrec == depth.size-1 or lrec > lsrc):
Pu = np.zeros_like(iGam)
continue
# No downgoing field if rec is in first layer or above src
if not up and (lrec == 0 or lrec < lsrc):
Pd = np.zeros_like(iGam)
continue
# Swaps if up=True
if up:
if not last_layer:
dp, dm = dm, dp
else:
dp = dm
Rmp, Rpm = Rpm, Rmp
first_layer, last_layer = last_layer, first_layer
rsrcl = nlsr-1 # src-layer in refl. (Rp/Rm), last (nlsr-1) if up
izrange = range(nlsr-2)
isr = lrec
last = 0
pup = 1
if not plus:
mupm = -1
P = np.zeros_like(iGam)
# Calculate Pu+, Pu-, Pd+, Pd-
if lsrc == lrec: # rec in src layer; Eqs 81/82, A-8/A-9
if last_layer: # If src/rec are in top (up) or bottom (down) layer
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
tRmp = Rmp[i, ii, 0, iv]
tiGam = iGam[i, ii, iv]
P[i, ii, iv] = tRmp*np.exp(-tiGam*dm)
else: # If src and rec are in any layer in between
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
tiGam = iGam[i, ii, iv]
tRpm = Rpm[i, ii, 0, iv]
tRmp = Rmp[i, ii, 0, iv]
p1 = np.exp(-tiGam*dm)
p2 = pm*tRpm*np.exp(-tiGam*(ds+dp))
p3 = 1 - tRmp * tRpm * np.exp(-2*tiGam*ds)
P[i, ii, iv] = (p1 + p2) * tRmp/p3
else: # rec above (up) / below (down) src layer
# # Eqs 95/96, A-24/A-25 for rec above src layer
# # Eqs 103/104, A-32/A-33 for rec below src layer
# First compute P_{s-1} (up) / P_{s+1} (down)
iRpm = Rpm[:, :, rsrcl, :]
if first_layer: # If src is in bottom (up) / top (down) layer
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
tiRpm = iRpm[i, ii, iv]
tiGam = iGam[i, ii, iv]
P[i, ii, iv] = (1 + tiRpm)*mupm*np.exp(-tiGam*dp)
else:
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
iRmp = Rmp[i, ii, rsrcl, iv]
tiGam = iGam[i, ii, iv]
tRpm = iRpm[i, ii, iv]
p1 = mupm*np.exp(-tiGam*dp)
p2 = pm*mupm*iRmp*np.exp(-tiGam * (ds+dm))
p3 = (1 + tRpm)/(1 - iRmp*tRpm*np.exp(-2*tiGam*ds))
P[i, ii, iv] = (p1 + p2) * p3
# If up or down and src is in last but one layer
if up or (not up and lsrc+1 < depth.size-1):
ddepth = depth[lsrc+1-1*pup]-depth[lsrc-1*pup]
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
tiRpm = Rpm[i, ii, rsrcl-1*pup, iv]
tiGam = Gam[i, ii, lsrc-1*pup, iv]
P[i, ii, iv] /= 1 + tiRpm*np.exp(-2*tiGam*ddepth)
# Second compute P for all other layers
if nlsr > 2:
for iz in izrange:
ddepth = depth[isr+iz+pup+1]-depth[isr+iz+pup]
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
tiRpm = Rpm[i, ii, iz+pup, iv]
piGam = Gam[i, ii, isr+iz+pup, iv]
p1 = (1+tiRpm)*np.exp(-piGam*ddepth)
P[i, ii, iv] *= p1
# If rec/src NOT in first/last layer (up/down)
if isr+iz != last:
ddepth = depth[isr+iz+1] - depth[isr+iz]
for i in range(nfreq):
for ii in range(noff):
for iv in range(nlambda):
tiRpm = Rpm[i, ii, iz, iv]
piGam2 = Gam[i, ii, isr+iz, iv]
p1 = 1 + tiRpm*np.exp(-2*piGam2 * ddepth)
P[i, ii, iv] /= p1
# Store P in Pu/Pd
if up:
Pu = P
else:
Pd = P
# Return fields (up- and downgoing)
return Pu, Pd
# Angle Factor
def angle_factor(angle, ab, msrc, mrec):
r"""Return the angle-dependent factor.
The whole calculation in the wavenumber domain is only a function of the
distance between the source and the receiver, it is independent of the
angel. The angle-dependency is this factor, which can be applied to the
corresponding parts in the wavenumber or in the frequency domain.
The :func:`angle_factor` corresponds to the sine and cosine-functions in
Eqs 105-107, 111-116, 119-121, 123-128.
This function is called from one of the Hankel functions in
:mod:`empymod.transform`. Consult the modelling routines in
:mod:`empymod.model` for a description of the input and output parameters.
"""
# 33/66 are completely symmetric and hence independent of angle
if ab in [33, ]:
return np.ones(angle.size)
# Evaluation angle
eval_angle = angle.copy()
# Add pi if receiver is magnetic (reciprocity), but not if source is
# electric, because then source and receiver are swapped, ME => EM:
# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z).
if mrec and not msrc:
eval_angle += np.pi
# Define fct (cos/sin) and angles to be tested
if ab in [11, 22, 15, 24, 13, 31, 26, 35]:
fct = np.cos
test_ang_1 = np.pi/2
test_ang_2 = 3*np.pi/2
else:
fct = np.sin
test_ang_1 = np.pi
test_ang_2 = 2*np.pi
if ab in [11, 22, 15, 24, 12, 21, 14, 25]:
eval_angle *= 2
# Get factor
ang_fact = fct(eval_angle)
# Ensure cos([pi/2, 3pi/2]) and sin([pi, 2pi]) are zero (floating pt issue)
ang_fact[np.isclose(np.abs(eval_angle), test_ang_1, 1e-10, 1e-14)] = 0
ang_fact[np.isclose(np.abs(eval_angle), test_ang_2, 1e-10, 1e-14)] = 0
return ang_fact
# Analytical solutions
@np.errstate(all='ignore')
def fullspace(off, angle, zsrc, zrec, etaH, etaV, zetaH, zetaV, ab, msrc,
mrec):
r"""Analytical full-space solutions in the frequency domain.
.. math::
:label: fullspace
\hat{G}^{ee}_{\alpha\beta}, \hat{G}^{ee}_{3\alpha},
\hat{G}^{ee}_{33}, \hat{G}^{em}_{\alpha\beta}, \hat{G}^{em}_{\alpha 3}
This function corresponds to equations 45--50 in [HuTS15]_, and loosely to
the corresponding files `Gin11.F90`, `Gin12.F90`, `Gin13.F90`, `Gin22.F90`,
`Gin23.F90`, `Gin31.F90`, `Gin32.F90`, `Gin33.F90`, `Gin41.F90`,
`Gin42.F90`, `Gin43.F90`, `Gin51.F90`, `Gin52.F90`, `Gin53.F90`,
`Gin61.F90`, and `Gin62.F90`.
This function is called from one of the modelling routines in
:mod:`empymod.model`. Consult these modelling routines for a description of
the input and output parameters.
"""
xco = np.cos(angle)*off
yco = np.sin(angle)*off
# Reciprocity switches for magnetic receivers
if mrec:
if msrc: # If src is also magnetic, switch eta and zeta (MM => EE).
# G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e)
etaH, zetaH = -zetaH, -etaH
etaV, zetaV = -zetaV, -etaV
else: # If src is electric, swap src and rec (ME => EM).
# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z)
xco *= -1
yco *= -1
zsrc, zrec = zrec, zsrc
# Calculate TE/TM-variables
if ab not in [16, 26]: # Calc TM
lGamTM = np.sqrt(zetaH*etaV)
RTM = np.sqrt(off*off + ((zsrc-zrec)*(zsrc-zrec)*etaH/etaV)[:, None])
uGamTM = np.exp(-lGamTM[:, None]*RTM)/(4*np.pi*RTM *
np.sqrt(etaH/etaV)[:, None])
if ab not in [13, 23, 31, 32, 33, 34, 35]: # Calc TE
lGamTE = np.sqrt(zetaV*etaH)
RTE = np.sqrt(off*off+(zsrc-zrec)*(zsrc-zrec)*(zetaH/zetaV)[:, None])
uGamTE = np.exp(-lGamTE[:, None]*RTE)/(4*np.pi*RTE *
np.sqrt(zetaH/zetaV)[:, None])
# Calculate responses
if ab in [11, 12, 21, 22]: # Eqs 45, 46
# Define coo1, coo2, and delta
if ab in [11, 22]:
if ab in [11, ]:
coo1 = xco
coo2 = xco
else:
coo1 = yco
coo2 = yco
delta = 1
else:
coo1 = xco
coo2 = yco
delta = 0
# Calculate response
term1 = uGamTM*(3*coo1*coo2/(RTM*RTM) - delta)
term1 *= 1/(etaV[:, None]*RTM*RTM) + (lGamTM/etaV)[:, None]/RTM
term1 += uGamTM*zetaH[:, None]*coo1*coo2/(RTM*RTM)
term2 = -delta*zetaH[:, None]*uGamTE
term3 = -zetaH[:, None]*coo1*coo2/(off*off)*(uGamTM - uGamTE)
term4 = -np.sqrt(zetaH)[:, None]*(2*coo1*coo2/(off*off) - delta)
if np.any(zetaH.imag < 0): # We need the sqrt where Im > 0.
term4 *= -1 # This if-statement corrects for it.
term4 *= np.exp(-lGamTM[:, None]*RTM) - np.exp(-lGamTE[:, None]*RTE)
term4 /= 4*np.pi*np.sqrt(etaH)[:, None]*off*off
gin = term1 + term2 + term3 + term4
elif ab in [13, 23, 31, 32]: # Eq 47
# Define coo
if ab in [13, 31]:
coo = xco
elif ab in [23, 32]:
coo = yco
# Calculate response
term1 = (etaH/etaV)[:, None]*(zrec - zsrc)*coo/(RTM*RTM)
term2 = 3/(RTM*RTM) + 3*lGamTM[:, None]/RTM + (lGamTM*lGamTM)[:, None]
gin = term1*term2*uGamTM/etaV[:, None]
elif ab in [33, ]: # Eq 48
# Calculate response
term1 = (((etaH/etaV)[:, None]*(zsrc - zrec)/RTM) *
((etaH/etaV)[:, None]*(zsrc - zrec)/RTM) *
(3/(RTM*RTM) + 3*lGamTM[:, None]/RTM +
(lGamTM*lGamTM)[:, None]))
term2 = (-(etaH/etaV)[:, None]/RTM*(1/RTM + lGamTM[:, None]) -
(etaH*zetaH)[:, None])
gin = (term1 + term2)*uGamTM/etaV[:, None]
elif ab in [14, 24, 15, 25]: # Eq 49
# Define coo1, coo2, coo3, coo4, delta, and pm
if ab in [14, 25]:
coo1, coo2 = xco, yco
coo3, coo4 = xco, yco
delta = 0
pm = -1
elif ab in [24, 15]:
coo1, coo2 = yco, yco
coo3, coo4 = xco, xco
delta = 1
pm = 1
# 15/25: Swap x/y
if ab in [15, 25]:
coo1, coo3 = coo3, coo1
coo2, coo4 = coo4, coo2
# 24/25: Swap src/rec
if ab in [24, 25]:
zrec, zsrc = zsrc, zrec
# Calculate response
def term(lGam, z_eH, z_eV, R, off, co1, co2):
fac = (lGam*z_eH/z_eV)[:, None]/R*np.exp(-lGam[:, None]*R)
term = 2/(off*off) + lGam[:, None]/R + 1/(R*R)
return fac*(co1*co2*term - delta)
termTM = term(lGamTM, etaH, etaV, RTM, off, coo1, coo2)
termTE = term(lGamTE, zetaH, zetaV, RTE, off, coo3, coo4)
mult = (zrec - zsrc)/(4*np.pi*np.sqrt(etaH*zetaH)[:, None]*off*off)
gin = -mult*(pm*termTM + termTE)
elif ab in [34, 35, 16, 26]: # Eqs 50, 51
# Define coo
if ab in [34, 16]:
coo = yco
else:
coo = -xco
# Define R, lGam, uGam, e_zH, and e_zV
if ab in [34, 35]:
coo *= -1
R = RTM
lGam = lGamTM
uGam = uGamTM
e_zH = etaH
e_zV = etaV
else:
R = RTE
lGam = lGamTE
uGam = uGamTE
e_zH = zetaH
e_zV = zetaV
# Calculate response
gin = coo*(e_zH/e_zV)[:, None]/R*(lGam[:, None] + 1/R)*uGam
# If rec is magnetic switch sign (reciprocity MM/ME => EE/EM).
if mrec:
gin *= -1
return gin
@np.errstate(all='ignore')
def halfspace(off, angle, zsrc, zrec, etaH, etaV, freqtime, ab, signal,
solution='dhs'):
r"""Return frequency- or time-space domain VTI half-space solution.
Calculates the frequency- or time-space domain electromagnetic response for
a half-space below air using the diffusive approximation, as given in
[SlHM10]_, where the electric source is located at [x=0, y=0, z=zsrc>=0],
and the electric receiver at [x=cos(angle)*off, y=sin(angle)*off,
z=zrec>=0].
It can also be used to calculate the fullspace solution or the separate
fields: direct field, reflected field, and airwave; always using the
diffusive approximation. See `solution`-parameter.
This function is called from one of the modelling routines in
:mod:`empymod.model`. Consult these modelling routines for a description of
the input and solution parameters.
"""
xco = np.cos(angle)*off
yco = np.sin(angle)*off
res = np.real(1/etaH[0, 0])
aniso = 1/np.sqrt(np.real(etaV[0, 0])*res)
# Define sval/time and dtype depending on signal.
if signal is None:
sval = freqtime
dtype = etaH.dtype
else:
time = freqtime
if signal == -1: # Calculate DC
time = np.r_[time[:, 0], 1e4][:, None]
freqtime = time
dtype = np.float64
# Other defined parameters
rh = np.sqrt(xco**2 + yco**2) # Horizontal distance in space
hp = abs(zrec + zsrc) # Physical vertical distance
hm = abs(zrec - zsrc)
hsp = hp*aniso # Scaled vertical distance
hsm = hm*aniso
rp = np.sqrt(xco**2 + yco**2 + hp**2) # Physical distance
rm = np.sqrt(xco**2 + yco**2 + hm**2)
rsp = np.sqrt(xco**2 + yco**2 + hsp**2) # Scaled distance
rsm = np.sqrt(xco**2 + yco**2 + hsm**2)
#
mu_0 = 4e-7*np.pi # Magn. perm. of free space [H/m]
tp = mu_0*rp**2/(res*4) # Diffusion time
tm = mu_0*rm**2/(res*4)
tsp = mu_0*rsp**2/(res*aniso**2*4) # Scaled diffusion time
tsm = mu_0*rsm**2/(res*aniso**2*4)
# delta-fct delta_\alpha\beta
if ab in [11, 22, 33]:
delta = 1
else:
delta = 0
# Define alpha/beta; swap if necessary
x = xco
y = yco
if ab == 11:
y = x
elif ab in [22, 23, 32]:
x = y
elif ab == 21:
x, y = y, x
# Define rev for 3\alpha->\alpha3 reciprocity
if ab in [13, 23]:
rev = -1
elif ab in [31, 32]:
rev = 1
# Exponential diffusion functions for m=0,1,2
if signal is None: # Frequency-domain
f0p = np.exp(-2*np.sqrt(sval*tp))
f0m = np.exp(-2*np.sqrt(sval*tm))
fs0p = np.exp(-2*np.sqrt(sval*tsp))
fs0m = np.exp(-2*np.sqrt(sval*tsm))
f1p = np.sqrt(sval)*f0p
f1m = np.sqrt(sval)*f0m
fs1p = np.sqrt(sval)*fs0p
fs1m = np.sqrt(sval)*fs0m
f2p = sval*f0p
f2m = sval*f0m
fs2p = sval*fs0p
fs2m = sval*fs0m
elif abs(signal) == 1: # Time-domain step response
# Replace F(m) with F(m-2)
f0p = special.erfc(np.sqrt(tp/time))
f0m = special.erfc(np.sqrt(tm/time))
fs0p = special.erfc(np.sqrt(tsp/time))
fs0m = special.erfc(np.sqrt(tsm/time))
f1p = np.exp(-tp/time)/np.sqrt(np.pi*time)
f1m = np.exp(-tm/time)/np.sqrt(np.pi*time)
fs1p = np.exp(-tsp/time)/np.sqrt(np.pi*time)
fs1m = np.exp(-tsm/time)/np.sqrt(np.pi*time)
f2p = f1p*np.sqrt(tp)/time
f2m = f1m*np.sqrt(tm)/time
fs2p = fs1p*np.sqrt(tsp)/time
fs2m = fs1m*np.sqrt(tsm)/time
else: # Time-domain impulse response
f0p = np.sqrt(tp/(np.pi*time**3))*np.exp(-tp/time)
f0m = np.sqrt(tm/(np.pi*time**3))*np.exp(-tm/time)
fs0p = np.sqrt(tsp/(np.pi*time**3))*np.exp(-tsp/time)
fs0m = np.sqrt(tsm/(np.pi*time**3))*np.exp(-tsm/time)
f1p = (tp/time - 0.5)/np.sqrt(tp)*f0p
f1m = (tm/time - 0.5)/np.sqrt(tm)*f0m
fs1p = (tsp/time - 0.5)/np.sqrt(tsp)*fs0p
fs1m = (tsm/time - 0.5)/np.sqrt(tsm)*fs0m
f2p = (tp/time - 1.5)/time*f0p
f2m = (tm/time - 1.5)/time*f0m
fs2p = (tsp/time - 1.5)/time*fs0p
fs2m = (tsm/time - 1.5)/time*fs0m
# Pre-allocate arrays
gs0m = np.zeros(np.shape(x), dtype=dtype)
gs0p = np.zeros(np.shape(x), dtype=dtype)
gs1m = np.zeros(np.shape(x), dtype=dtype)
gs1p = np.zeros(np.shape(x), dtype=dtype)
gs2m = np.zeros(np.shape(x), dtype=dtype)
gs2p = np.zeros(np.shape(x), dtype=dtype)
g0p = np.zeros(np.shape(x), dtype=dtype)
g1m = np.zeros(np.shape(x), dtype=dtype)
g1p = np.zeros(np.shape(x), dtype=dtype)
g2m = np.zeros(np.shape(x), dtype=dtype)
g2p = np.zeros(np.shape(x), dtype=dtype)
air = np.zeros(np.shape(f0p), dtype=dtype)
if ab in [11, 12, 21, 22]: # 1. {alpha, beta}
# Get indices for singularities
izr = rh == 0 # index where rh = 0
iir = np.invert(izr) # invert of izr
izh = hm == 0 # index where hm = 0
iih = np.invert(izh) # invert of izh
# fab
fab = rh**2*delta-x*y
# TM-mode coefficients
gs0p = res*aniso*(3*x*y - rsp**2*delta)/(4*np.pi*rsp**5)
gs0m = res*aniso*(3*x*y - rsm**2*delta)/(4*np.pi*rsm**5)
gs1p[iir] = (((3*x[iir]*y[iir] - rsp[iir]**2*delta)/rsp[iir]**4 -
(x[iir]*y[iir] - fab[iir])/rh[iir]**4) *
np.sqrt(mu_0*res)/(4*np.pi))
gs1m[iir] = (((3*x[iir]*y[iir] - rsm[iir]**2*delta)/rsm[iir]**4 -
(x[iir]*y[iir] - fab[iir])/rh[iir]**4) *
np.sqrt(mu_0*res)/(4*np.pi))
gs2p[iir] = ((mu_0*x[iir]*y[iir])/(4*np.pi*aniso*rsp[iir]) *
(1/rsp[iir]**2 - 1/rh[iir]**2))
gs2m[iir] = ((mu_0*x[iir]*y[iir])/(4*np.pi*aniso*rsm[iir]) *
(1/rsm[iir]**2 - 1/rh[iir]**2))
# TM-mode for numerical singularities rh=0 (hm!=0)
gs1p[izr*iih] = -np.sqrt(mu_0*res)*delta/(4*np.pi*hsp**2)
gs1m[izr*iih] = -np.sqrt(mu_0*res)*delta/(4*np.pi*hsm**2)
gs2p[izr*iih] = -mu_0*delta/(8*np.pi*aniso*hsp)
gs2m[izr*iih] = -mu_0*delta/(8*np.pi*aniso*hsm)
# TE-mode coefficients
g0p = res*(3*fab - rp**2*delta)/(2*np.pi*rp**5)
g1m[iir] = (np.sqrt(mu_0*res)*(x[iir]*y[iir] - fab[iir]) /
(4*np.pi*rh[iir]**4))
g1p[iir] = (g1m[iir] + np.sqrt(mu_0*res)*(3*fab[iir] -
rp[iir]**2*delta)/(2*np.pi*rp[iir]**4))
g2p[iir] = mu_0*fab[iir]/(4*np.pi*rp[iir])*(2/rp[iir]**2 -
1/rh[iir]**2)
g2m[iir] = -mu_0*fab[iir]/(4*np.pi*rh[iir]**2*rm[iir])
# TE-mode for numerical singularities rh=0 (hm!=0)
g1m[izr*iih] = np.zeros(np.shape(g1m[izr*iih]), dtype=dtype)
g1p[izr*iih] = -np.sqrt(mu_0*res)*delta/(2*np.pi*hp**2)
g2m[izr*iih] = mu_0*delta/(8*np.pi*hm)
g2p[izr*iih] = mu_0*delta/(8*np.pi*hp)
# Bessel functions for airwave
def BI(gamH, hp, nr, xim):
r"""Return BI_nr."""
return np.exp(-np.real(gamH)*hp)*special.ive(nr, xim)
def BK(xip, nr):
r"""Return BK_nr."""
if np.isrealobj(xip):
# To keep it real in Laplace-domain [exp(-1j*0) = 1-0j].
return special.kve(nr, xip)
else:
return np.exp(-1j*np.imag(xip))*special.kve(nr, xip)
# Airwave calculation
def airwave(sval, hp, rp, res, fab, delta):
r"""Return airwave."""
# Parameters
zeta = sval*mu_0
gamH = np.sqrt(zeta/res)
xip = gamH*(rp + hp)/2
xim = gamH*(rp - hp)/2
# Bessel functions
BI0 = BI(gamH, hp, 0, xim)
BI1 = BI(gamH, hp, 1, xim)
BI2 = BI(gamH, hp, 2, xim)
BK0 = BK(xip, 0)
BK1 = BK(xip, 1)
# Calculation
P1 = (sval*mu_0)**(3/2)*fab*hp/(4*np.sqrt(res))
P2 = 4*BI1*BK0 - (3*BI0 - 4*np.sqrt(res)*BI1/(np.sqrt(sval*mu_0) *
(rp + hp)) + BI2)*BK1
P3 = 3*fab/rp**2 - delta
P4 = (sval*mu_0*hp*rp*(BI0*BK0 - BI1*BK1) +
np.sqrt(res*sval*mu_0)*BI0*BK1 *
(rp + hp) + np.sqrt(res*sval*mu_0)*BI1*BK0*(rp - hp))
return (P1*P2 - P3*P4)/(4*np.pi*rp**3)
# Airwave depending on signal
if signal is None: # Frequency-domain
air = airwave(sval, hp, rp, res, fab, delta)
elif abs(signal) == 1: # Time-domain step response
# Solution for step-response air-wave is not analytical, but uses
# the Gaver-Stehfest method.
K = 16
# Coefficients Dk
def coeff_dk(k, K):
r"""Return coefficients Dk for k, K."""
n = np.arange((k+1)//2, min([k, K/2])+.5, 1)
Dk = n**(K/2)*special.factorial(2*n)/special.factorial(n)
Dk /= special.factorial(n-1)*special.factorial(k-n)
Dk /= special.factorial(2*n-k)*special.factorial(K/2-n)
return Dk.sum()*(-1)**(k+K/2)
for k in range(1, K+1):
sval = k*np.log(2)/time
cair = airwave(sval, hp, rp, res, fab, delta)
air += coeff_dk(k, K)*cair.real/k
else: # Time-domain impulse response
thp = mu_0*hp**2/(4*res)
trh = mu_0*rh**2/(8*res)
P1 = (mu_0**2*hp*np.exp(-thp/time))/(res*32*np.pi*time**3)
P2 = 2*(delta - (x*y)/rh**2)*special.ive(1, trh/time)
P3 = mu_0/(2*res*time)*(rh**2*delta - x*y)-delta
P4 = special.ive(0, trh/time) - special.ive(1, trh/time)
air = P1*(P2 - P3*P4)
elif ab in [13, 23, 31, 32]: # 2. {3, alpha}, {alpha, 3}
# TM-mode
gs0m = 3*x*res*aniso**3*(zrec - zsrc)/(4*np.pi*rsm**5)
gs0p = rev*3*x*res*aniso**3*hp/(4*np.pi*rsp**5)
gs1m = (np.sqrt(mu_0*res)*3*aniso**2*x*(zrec - zsrc) /
(4*np.pi*rsm**4))
gs1p = rev*np.sqrt(mu_0*res)*3*aniso**2*x*hp/(4*np.pi*rsp**4)
gs2m = mu_0*x*aniso*(zrec - zsrc)/(4*np.pi*rsm**3)
gs2p = rev*mu_0*x*aniso*hp/(4*np.pi*rsp**3)
elif ab == 33: # 3. {3, 3}
# TM-mode
gs0m = res*aniso**3*(3*hsm**2 - rsm**2)/(4*np.pi*rsm**5)
gs0p = -res*aniso**3*(3*hsp**2 - rsp**2)/(4*np.pi*rsp**5)
gs1m = np.sqrt(mu_0*res)*aniso**2*(3*hsm**2 - rsm**2)/(4*np.pi*rsm**4)
gs1p = -np.sqrt(mu_0*res)*aniso**2*(3*hsp**2 - rsp**2)/(4*np.pi*rsp**4)
gs2m = mu_0*aniso*(hsm**2 - rsm**2)/(4*np.pi*rsm**3)
gs2p = -mu_0*aniso*(hsp**2 - rsp**2)/(4*np.pi*rsp**3)
# Direct field
direct_TM = gs0m*fs0m + gs1m*fs1m + gs2m*fs2m
direct_TE = g1m*f1m + g2m*f2m
direct = direct_TM + direct_TE
# Reflection
reflect_TM = gs0p*fs0p + gs1p*fs1p + gs2p*fs2p
reflect_TE = g0p*f0p + g1p*f1p + g2p*f2p
reflect = reflect_TM + reflect_TE
# If switch-off, subtract switch-on from DC value
if signal == -1:
direct_TM = direct_TM[-1]-direct_TM[:-1]
direct_TE = direct_TE[-1]-direct_TE[:-1]
direct = direct[-1]-direct[:-1]
reflect_TM = reflect_TM[-1]-reflect_TM[:-1]
reflect_TE = reflect_TE[-1]-reflect_TE[:-1]
reflect = reflect[-1]-reflect[:-1]
air = air[-1]-air[:-1]
# Return, depending on 'solution'
if solution == 'dfs':
return direct
elif solution == 'dsplit':
return direct, reflect, air
elif solution == 'dtetm':
return direct_TE, direct_TM, reflect_TE, reflect_TM, air
else:
return direct + reflect + air
|
py | b4075b714d6b5a79e8f253836d5cd8c8f37c4195 | # Convert SVG path to the Turtle Command format
import sys
import os
import xml.etree.ElementTree as ET
svg_file = 'Tracing.svg'
if len(sys.argv) >= 2:
svg_file = sys.argv[1]
tree = ET.parse(svg_file)
root = tree.getroot()
tc_file = os.path.splitext(svg_file)[0] + '.tc'
path_list = []
for x in root:
if 'path' in x.tag:
path_list.append(x.attrib['d'])
f = open(tc_file, 'w')
for path in path_list:
for cmd in path.split():
if cmd.upper() in 'MHVL':
f.write('\n')
f.write(' ' + cmd)
f.write('\n')
f.close()
print(tc_file)
|
py | b4075b844c83e421e8b1a6be6d4ab2e650261eba | import __init__
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import function as fn
from dgl.ops import edge_softmax
from dgl.utils import expand_as_pair
from eff_gcn_modules.rev.rev_layer import SharedDropout
from dgl.nn import GroupRevRes
class ElementWiseLinear(nn.Module):
def __init__(self, size, weight=True, bias=True, inplace=False):
super().__init__()
if weight:
self.weight = nn.Parameter(torch.Tensor(size))
else:
self.weight = None
if bias:
self.bias = nn.Parameter(torch.Tensor(size))
else:
self.bias = None
self.inplace = inplace
self.reset_parameters()
def reset_parameters(self):
if self.weight is not None:
nn.init.ones_(self.weight)
if self.bias is not None:
nn.init.zeros_(self.bias)
def forward(self, x):
if self.inplace:
if self.weight is not None:
x.mul_(self.weight)
if self.bias is not None:
x.add_(self.bias)
else:
if self.weight is not None:
x = x * self.weight
if self.bias is not None:
x = x + self.bias
return x
class GATConv(nn.Module):
def __init__(
self,
in_feats,
out_feats,
num_heads=1,
feat_drop=0.0,
attn_drop=0.0,
edge_drop=0.0,
negative_slope=0.2,
use_attn_dst=True,
residual=False,
activation=None,
allow_zero_in_degree=False,
use_symmetric_norm=False,
):
super(GATConv, self).__init__()
self._num_heads = num_heads
self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
self._out_feats = out_feats
self._allow_zero_in_degree = allow_zero_in_degree
self._use_symmetric_norm = use_symmetric_norm
if isinstance(in_feats, tuple):
self.fc_src = nn.Linear(self._in_src_feats, out_feats * num_heads, bias=False)
self.fc_dst = nn.Linear(self._in_dst_feats, out_feats * num_heads, bias=False)
else:
self.fc = nn.Linear(self._in_src_feats, out_feats * num_heads, bias=False)
self.attn_l = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
if use_attn_dst:
self.attn_r = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
else:
self.register_buffer("attn_r", None)
self.feat_drop = nn.Dropout(feat_drop)
assert feat_drop == 0.0 # not implemented
self.attn_drop = nn.Dropout(attn_drop)
assert attn_drop == 0.0 # not implemented
self.edge_drop = edge_drop
self.leaky_relu = nn.LeakyReLU(negative_slope)
if residual:
self.res_fc = nn.Linear(self._in_dst_feats, num_heads * out_feats, bias=False)
else:
self.register_buffer("res_fc", None)
self.reset_parameters()
self._activation = activation
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
if hasattr(self, "fc"):
nn.init.xavier_normal_(self.fc.weight, gain=gain)
else:
nn.init.xavier_normal_(self.fc_src.weight, gain=gain)
nn.init.xavier_normal_(self.fc_dst.weight, gain=gain)
nn.init.xavier_normal_(self.attn_l, gain=gain)
if isinstance(self.attn_r, nn.Parameter):
nn.init.xavier_normal_(self.attn_r, gain=gain)
if isinstance(self.res_fc, nn.Linear):
nn.init.xavier_normal_(self.res_fc.weight, gain=gain)
def set_allow_zero_in_degree(self, set_value):
self._allow_zero_in_degree = set_value
def forward(self, graph, feat, perm=None):
with graph.local_scope():
if not self._allow_zero_in_degree:
if (graph.in_degrees() == 0).any():
assert False
if isinstance(feat, tuple):
h_src = self.feat_drop(feat[0])
h_dst = self.feat_drop(feat[1])
if not hasattr(self, "fc_src"):
self.fc_src, self.fc_dst = self.fc, self.fc
feat_src, feat_dst = h_src, h_dst
feat_src = self.fc_src(h_src).view(-1, self._num_heads, self._out_feats)
feat_dst = self.fc_dst(h_dst).view(-1, self._num_heads, self._out_feats)
else:
h_src = self.feat_drop(feat)
feat_src = h_src
feat_src = self.fc(h_src).view(-1, self._num_heads, self._out_feats)
if graph.is_block:
h_dst = h_src[: graph.number_of_dst_nodes()]
feat_dst = feat_src[: graph.number_of_dst_nodes()]
else:
h_dst = h_src
feat_dst = feat_src
if self._use_symmetric_norm:
degs = graph.out_degrees().float().clamp(min=1)
norm = torch.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat_src.dim() - 1)
norm = torch.reshape(norm, shp)
feat_src = feat_src * norm
# NOTE: GAT paper uses "first concatenation then linear projection"
# to compute attention scores, while ours is "first projection then
# addition", the two approaches are mathematically equivalent:
# We decompose the weight vector a mentioned in the paper into
# [a_l || a_r], then
# a^T [Wh_i || Wh_j] = a_l Wh_i + a_r Wh_j
# Our implementation is much efficient because we do not need to
# save [Wh_i || Wh_j] on edges, which is not memory-efficient. Plus,
# addition could be optimized with DGL's built-in function u_add_v,
# which further speeds up computation and saves memory footprint.
el = (feat_src * self.attn_l).sum(dim=-1).unsqueeze(-1)
graph.srcdata.update({"ft": feat_src, "el": el})
# compute edge attention, el and er are a_l Wh_i and a_r Wh_j respectively.
if self.attn_r is not None:
er = (feat_dst * self.attn_r).sum(dim=-1).unsqueeze(-1)
graph.dstdata.update({"er": er})
graph.apply_edges(fn.u_add_v("el", "er", "e"))
else:
graph.apply_edges(fn.copy_u("el", "e"))
e = self.leaky_relu(graph.edata.pop("e"))
if self.training and self.edge_drop > 0:
if perm is None:
perm = torch.randperm(graph.number_of_edges(), device=e.device)
bound = int(graph.number_of_edges() * self.edge_drop)
eids = perm[bound:]
graph.edata["a"] = torch.zeros_like(e)
graph.edata["a"][eids] = self.attn_drop(edge_softmax(graph, e[eids], eids=eids))
else:
graph.edata["a"] = self.attn_drop(edge_softmax(graph, e))
# message passing
graph.update_all(fn.u_mul_e("ft", "a", "m"), fn.sum("m", "ft"))
rst = graph.dstdata["ft"]
if self._use_symmetric_norm:
degs = graph.in_degrees().float().clamp(min=1)
norm = torch.pow(degs, 0.5)
shp = norm.shape + (1,) * (feat_dst.dim() - 1)
norm = torch.reshape(norm, shp)
rst = rst * norm
# residual
if self.res_fc is not None:
resval = self.res_fc(h_dst).view(h_dst.shape[0], -1, self._out_feats)
rst = rst + resval
# activation
if self._activation is not None:
rst = self._activation(rst)
return rst
class RevGATBlock(nn.Module):
def __init__(
self,
node_feats,
edge_feats,
edge_emb,
out_feats,
n_heads=1,
attn_drop=0.0,
edge_drop=0.0,
negative_slope=0.2,
residual=True,
activation=None,
use_attn_dst=True,
allow_zero_in_degree=True,
use_symmetric_norm=False,
):
super(RevGATBlock, self).__init__()
self.norm = nn.BatchNorm1d(n_heads * out_feats)
self.conv = GATConv(
node_feats,
out_feats,
num_heads=n_heads,
attn_drop=attn_drop,
edge_drop=edge_drop,
negative_slope=negative_slope,
residual=residual,
activation=activation,
use_attn_dst=use_attn_dst,
allow_zero_in_degree=allow_zero_in_degree,
use_symmetric_norm=use_symmetric_norm,
)
self.dropout = SharedDropout()
if edge_emb > 0:
self.edge_encoder = nn.Linear(edge_feats, edge_emb)
else:
self.edge_encoder = None
def forward(self, graph, x, dropout_mask=None, perm=None, efeat=None):
if perm is not None:
perm = perm.squeeze()
out = self.norm(x)
out = F.relu(out, inplace=True)
if isinstance(self.dropout, SharedDropout):
self.dropout.set_mask(dropout_mask)
out = self.dropout(out)
if self.edge_encoder is not None:
if efeat is None:
efeat = graph.edata["feat"]
efeat_emb = self.edge_encoder(efeat)
efeat_emb = F.relu(efeat_emb, inplace=True)
else:
efeat_emb = None
out = self.conv(graph, out, perm).flatten(1, -1)
return out
class RevGAT(nn.Module):
def __init__(
self,
in_feats,
n_classes,
n_hidden,
n_layers,
n_heads,
activation,
dropout=0.0,
input_drop=0.0,
attn_drop=0.0,
edge_drop=0.0,
use_attn_dst=True,
use_symmetric_norm=False,
group=2,
):
super().__init__()
self.in_feats = in_feats
self.n_hidden = n_hidden
self.n_classes = n_classes
self.n_layers = n_layers
self.num_heads = n_heads
self.group = group
self.convs = nn.ModuleList()
self.norm = nn.BatchNorm1d(n_heads * n_hidden)
for i in range(n_layers):
in_hidden = n_heads * n_hidden if i > 0 else in_feats
out_hidden = n_hidden if i < n_layers - 1 else n_classes
num_heads = n_heads if i < n_layers - 1 else 1
if i == 0 or i == n_layers -1:
self.convs.append(
GATConv(
in_hidden,
out_hidden,
num_heads=num_heads,
attn_drop=attn_drop,
edge_drop=edge_drop,
use_attn_dst=use_attn_dst,
use_symmetric_norm=use_symmetric_norm,
residual=True,
)
)
else:
fm = RevGATBlock(
in_hidden // group,
0,
0,
out_hidden // group,
n_heads=num_heads,
attn_drop=attn_drop,
edge_drop=edge_drop,
use_attn_dst=use_attn_dst,
use_symmetric_norm=use_symmetric_norm,
residual=True,
)
conv = GroupRevRes(fm, group=self.group)
self.convs.append(conv)
self.bias_last = ElementWiseLinear(n_classes, weight=False, bias=True, inplace=True)
self.input_drop = nn.Dropout(input_drop)
self.dropout = dropout
self.dp_last = nn.Dropout(dropout)
self.activation = activation
def forward(self, graph, feat):
h = feat
h = self.input_drop(h)
self.perms = []
for i in range(self.n_layers):
perm = torch.randperm(graph.number_of_edges(),
device=graph.device)
self.perms.append(perm)
h = self.convs[0](graph, h, self.perms[0]).flatten(1, -1)
m = torch.zeros_like(h).bernoulli_(1 - self.dropout)
mask = m.requires_grad_(False) / (1 - self.dropout)
for i in range(1, self.n_layers-1):
graph.requires_grad = False
perm = torch.stack([self.perms[i]]*self.group, dim=1)
h = self.convs[i](graph, h, mask, perm)
h = self.norm(h)
h = self.activation(h, inplace=True)
h = self.dp_last(h)
h = self.convs[-1](graph, h, self.perms[-1])
h = h.mean(1)
h = self.bias_last(h)
return h
|
py | b40760640fc876a063db1767181d6b84fae1fa53 | from a10sdk.common.A10BaseClass import A10BaseClass
class Neighbor(A10BaseClass):
"""Class Description::
Specify a neighbor router.
Class neighbor supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param peer_group_neighbor_list: {"minItems": 1, "items": {"type": "peer-group-neighbor"}, "uniqueItems": true, "array": [{"required": ["peer-group"], "properties": {"activate": {"default": 1, "optional": true, "type": "number", "description": "Enable the Address Family for this Neighbor", "format": "flag"}, "route-refresh": {"default": 1, "optional": true, "type": "number", "description": "Advertise route-refresh capability to this neighbor", "format": "flag"}, "ve": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "trunk", "lif"], "type": "number", "description": "Virtual ethernet interface (Virtual ethernet interface number)", "optional": true, "format": "interface"}, "weight": {"description": "Set default weight for routes from this neighbor", "format": "number", "default": 0, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "timers-keepalive": {"description": "Keepalive interval", "format": "number", "default": 30, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "dynamic": {"default": 0, "optional": true, "type": "number", "description": "Advertise dynamic capability to this neighbor", "format": "flag"}, "loopback": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "ve", "trunk", "lif"], "type": "number", "description": "Loopback interface (Port number)", "optional": true, "format": "interface"}, "default-originate": {"default": 0, "optional": true, "type": "number", "description": "Originate default route to this neighbor", "format": "flag"}, "distribute-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"distribute-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "distribute-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Filter updates to/from this neighbor (IP standard/extended/named access list)", "format": "string"}}}]}, "shutdown": {"default": 0, "optional": true, "type": "number", "description": "Administratively shut down this neighbor", "format": "flag"}, "prefix-list-direction": {"optional": true, "enum": ["both", "receive", "send"], "type": "string", "description": "'both': both; 'receive': receive; 'send': send; ", "format": "enum"}, "neighbor-route-map-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"nbr-rmap-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "nbr-route-map": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Apply route map to neighbor (Name of route map)", "format": "string"}}}]}, "advertisement-interval": {"description": "Minimum interval between sending BGP routing updates (time in seconds)", "format": "number", "type": "number", "maximum": 600, "minimum": 1, "optional": true}, "lif": {"description": "Logical interface (Lif interface number)", "format": "number", "optional": true, "not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "ve", "trunk"], "maximum": 128, "minimum": 1, "type": "number"}, "send-community-val": {"description": "'both': Send Standard and Extended Community attributes; 'none': Disable Sending Community attributes; 'standard': Send Standard Community attributes; 'extended': Send Extended Community attributes; ", "format": "enum", "default": "both", "type": "string", "enum": ["both", "none", "standard", "extended"], "optional": true}, "update-source-ip": {"not-list": ["update-source-ipv6", "ethernet", "loopback", "ve", "trunk", "lif"], "type": "string", "description": "IP address", "optional": true, "format": "ipv4-address"}, "collide-established": {"default": 0, "optional": true, "type": "number", "description": "Include Neighbor in Established State for Collision Detection", "format": "flag"}, "next-hop-self": {"default": 0, "optional": true, "type": "number", "description": "Disable the next hop calculation for this neighbor", "format": "flag"}, "pass-encrypted": {"optional": true, "type": "encrypted", "format": "encrypted"}, "peer-group": {"description": "Neighbor tag", "format": "string", "minLength": 1, "optional": false, "maxLength": 128, "type": "string"}, "dont-capability-negotiate": {"default": 0, "optional": true, "type": "number", "description": "Do not perform capability negotiation", "format": "flag"}, "unsuppress-map": {"description": "Route-map to selectively unsuppress suppressed routes (Name of route map)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"}, "passive": {"default": 0, "optional": true, "type": "number", "description": "Don't send open messages to this neighbor", "format": "flag"}, "ebgp-multihop-hop-count": {"description": "maximum hop count", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}, "allowas-in": {"default": 0, "optional": true, "type": "number", "description": "Accept as-path with my AS present in it", "format": "flag"}, "pass-value": {"description": "Key String", "format": "password", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "timers-holdtime": {"description": "Holdtime", "format": "number", "default": 90, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "description": {"description": "Neighbor specific description (Up to 80 characters describing this neighbor)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 80, "type": "string"}, "inbound": {"default": 0, "optional": true, "type": "number", "description": "Allow inbound soft reconfiguration for this neighbor", "format": "flag"}, "maximum-prefix-thres": {"description": "threshold-value, 1 to 100 percent", "format": "number", "type": "number", "maximum": 100, "minimum": 1, "optional": true}, "neighbor-prefix-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"nbr-prefix-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "nbr-prefix-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Filter updates to/from this neighbor (Name of a prefix list)", "format": "string"}}}]}, "peer-group-remote-as": {"description": "Specify AS number of BGP neighbor", "format": "number", "type": "number", "maximum": 4294967295, "minimum": 1, "optional": true}, "disallow-infinite-holdtime": {"default": 0, "optional": true, "type": "number", "description": "BGP per neighbor disallow-infinite-holdtime", "format": "flag"}, "route-map": {"description": "Route-map to specify criteria to originate default (route-map name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"}, "trunk": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "ve", "lif"], "type": "number", "description": "Trunk interface (Trunk interface number)", "optional": true, "format": "interface"}, "remove-private-as": {"default": 0, "optional": true, "type": "number", "description": "Remove private AS number from outbound updates", "format": "flag"}, "neighbor-filter-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"filter-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Establish BGP filters (AS path access-list name)", "format": "string"}, "filter-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true}}]}, "update-source-ipv6": {"not-list": ["update-source-ip", "ethernet", "loopback", "ve", "trunk", "lif"], "type": "string", "description": "IPv6 address", "optional": true, "format": "ipv6-address"}, "maximum-prefix": {"description": "Maximum number of prefix accept from this peer (maximum no. of prefix limit (various depends on model))", "format": "number", "type": "number", "maximum": 65536, "minimum": 1, "optional": true}, "peer-group-key": {"default": 0, "optional": true, "type": "number", "description": "Configure peer-group", "format": "flag"}, "allowas-in-count": {"description": "Number of occurrences of AS number", "format": "number", "default": 3, "optional": true, "maximum": 10, "minimum": 1, "type": "number"}, "as-origination-interval": {"description": "Minimum interval between sending AS-origination routing updates (time in seconds)", "format": "number", "type": "number", "maximum": 600, "minimum": 1, "optional": true}, "override-capability": {"default": 0, "optional": true, "type": "number", "description": "Override capability negotiation result", "format": "flag"}, "enforce-multihop": {"default": 0, "optional": true, "type": "number", "description": "Enforce EBGP neighbors to perform multihop", "format": "flag"}, "strict-capability-match": {"default": 0, "optional": true, "type": "number", "description": "Strict capability negotiation match", "format": "flag"}, "ebgp-multihop": {"default": 0, "optional": true, "type": "number", "description": "Allow EBGP neighbors not on directly connected networks", "format": "flag"}, "ethernet": {"not-list": ["update-source-ip", "update-source-ipv6", "loopback", "ve", "trunk", "lif"], "type": "number", "description": "Ethernet interface (Port number)", "optional": true, "format": "interface"}, "connect": {"description": "BGP connect timer", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}}}], "type": "array", "$ref": "/axapi/v3/router/bgp/{as-number}/neighbor/peer-group-neighbor/{peer-group}"}
:param ipv6_neighbor_list: {"minItems": 1, "items": {"type": "ipv6-neighbor"}, "uniqueItems": true, "array": [{"required": ["neighbor-ipv6"], "properties": {"activate": {"default": 1, "optional": true, "type": "number", "description": "Enable the Address Family for this Neighbor", "format": "flag"}, "route-refresh": {"default": 1, "optional": true, "type": "number", "description": "Advertise route-refresh capability to this neighbor", "format": "flag"}, "ve": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "trunk", "lif"], "type": "number", "description": "Virtual ethernet interface (Virtual ethernet interface number)", "optional": true, "format": "interface"}, "weight": {"description": "Set default weight for routes from this neighbor", "format": "number", "default": 0, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "timers-keepalive": {"description": "Keepalive interval", "format": "number", "default": 30, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "bfd-value": {"description": "Key String", "format": "password", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "key-type": {"optional": true, "enum": ["md5", "meticulous-md5", "meticulous-sha1", "sha1", "simple"], "type": "string", "description": "'md5': md5; 'meticulous-md5': meticulous-md5; 'meticulous-sha1': meticulous-sha1; 'sha1': sha1; 'simple': simple; (Keyed MD5/Meticulous Keyed MD5/Meticulous Keyed SHA1/Keyed SHA1/Simple Password)", "format": "enum"}, "dynamic": {"default": 0, "optional": true, "type": "number", "description": "Advertise dynamic capability to this neighbor", "format": "flag"}, "loopback": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "ve", "trunk", "lif"], "type": "number", "description": "Loopback interface (Port number)", "optional": true, "format": "interface"}, "multihop": {"default": 0, "optional": true, "type": "number", "description": "Enable multihop", "format": "flag"}, "default-originate": {"description": "Originate default route to this neighbor", "format": "flag", "default": 0, "optional": true, "not": "peer-group-name", "type": "number"}, "distribute-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"distribute-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "distribute-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Filter updates to/from this neighbor (IP standard/extended/named access list)", "format": "string"}}}]}, "shutdown": {"default": 0, "optional": true, "type": "number", "description": "Administratively shut down this neighbor", "format": "flag"}, "prefix-list-direction": {"optional": true, "enum": ["both", "receive", "send"], "type": "string", "description": "'both': both; 'receive': receive; 'send': send; ", "format": "enum"}, "nbr-remote-as": {"description": "Specify AS number of BGP neighbor", "format": "number", "type": "number", "maximum": 4294967295, "minimum": 1, "optional": true}, "description": {"description": "Neighbor specific description (Up to 80 characters describing this neighbor)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 80, "type": "string"}, "advertisement-interval": {"description": "Minimum interval between sending BGP routing updates (time in seconds)", "format": "number", "optional": true, "maximum": 600, "minimum": 1, "not": "peer-group-name", "type": "number"}, "lif": {"description": "Logical interface (Lif interface number)", "format": "number", "optional": true, "not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "ve", "trunk"], "maximum": 128, "minimum": 1, "type": "number"}, "neighbor-route-map-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"nbr-rmap-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "nbr-route-map": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Apply route map to neighbor (Name of route map)", "format": "string"}}}]}, "send-community-val": {"description": "'both': Send Standard and Extended Community attributes; 'none': Disable Sending Community attributes; 'standard': Send Standard Community attributes; 'extended': Send Extended Community attributes; ", "format": "enum", "default": "both", "type": "string", "enum": ["both", "none", "standard", "extended"], "optional": true}, "bfd": {"default": 0, "optional": true, "type": "number", "description": "Bidirectional Forwarding Detection (BFD)", "format": "flag"}, "collide-established": {"default": 0, "optional": true, "type": "number", "description": "Include Neighbor in Established State for Collision Detection", "format": "flag"}, "next-hop-self": {"description": "Disable the next hop calculation for this neighbor", "format": "flag", "default": 0, "optional": true, "not": "peer-group-name", "type": "number"}, "pass-encrypted": {"optional": true, "type": "encrypted", "format": "encrypted"}, "dont-capability-negotiate": {"default": 0, "optional": true, "type": "number", "description": "Do not perform capability negotiation", "format": "flag"}, "unsuppress-map": {"description": "Route-map to selectively unsuppress suppressed routes (Name of route map)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "not": "peer-group-name", "type": "string"}, "passive": {"default": 0, "optional": true, "type": "number", "description": "Don't send open messages to this neighbor", "format": "flag"}, "ebgp-multihop-hop-count": {"description": "maximum hop count", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}, "allowas-in": {"default": 0, "optional": true, "type": "number", "description": "Accept as-path with my AS present in it", "format": "flag"}, "pass-value": {"description": "Key String", "format": "password", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "key-id": {"description": "Key ID", "format": "number", "type": "number", "maximum": 255, "minimum": 0, "optional": true}, "timers-holdtime": {"description": "Holdtime", "format": "number", "default": 90, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "update-source-ip": {"not-list": ["update-source-ipv6", "ethernet", "loopback", "ve", "trunk", "lif"], "type": "string", "description": "IP address", "optional": true, "format": "ipv4-address"}, "neighbor-ipv6": {"optional": false, "type": "string", "description": "Neighbor IPv6 address", "format": "ipv6-address"}, "inbound": {"default": 0, "optional": true, "type": "number", "description": "Allow inbound soft reconfiguration for this neighbor", "format": "flag"}, "maximum-prefix-thres": {"description": "threshold-value, 1 to 100 percent", "format": "number", "type": "number", "maximum": 100, "minimum": 1, "optional": true}, "bfd-encrypted": {"optional": true, "type": "encrypted", "description": "Do NOT use this option manually. (This is an A10 reserved keyword.) (The ENCRYPTED password string)", "format": "encrypted"}, "disallow-infinite-holdtime": {"description": "BGP per neighbor disallow-infinite-holdtime", "format": "flag", "default": 0, "optional": true, "not": "peer-group-name", "type": "number"}, "route-map": {"description": "Route-map to specify criteria to originate default (route-map name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"}, "trunk": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "ve", "lif"], "type": "number", "description": "Trunk interface (Trunk interface number)", "optional": true, "format": "interface"}, "remove-private-as": {"default": 0, "optional": true, "type": "number", "description": "Remove private AS number from outbound updates", "format": "flag"}, "neighbor-filter-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"filter-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Establish BGP filters (AS path access-list name)", "format": "string"}, "filter-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true}}]}, "update-source-ipv6": {"not-list": ["update-source-ip", "ethernet", "loopback", "ve", "trunk", "lif"], "type": "string", "description": "IPv6 address", "optional": true, "format": "ipv6-address"}, "maximum-prefix": {"description": "Maximum number of prefix accept from this peer (maximum no. of prefix limit (various depends on model))", "format": "number", "type": "number", "maximum": 65536, "minimum": 1, "optional": true}, "neighbor-prefix-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"nbr-prefix-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "nbr-prefix-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Filter updates to/from this neighbor (Name of a prefix list)", "format": "string"}}}]}, "allowas-in-count": {"description": "Number of occurrences of AS number", "format": "number", "default": 3, "optional": true, "maximum": 10, "minimum": 1, "type": "number"}, "peer-group-name": {"description": "Configure peer-group (peer-group name)", "format": "string", "minLength": 1, "not-list": ["advertisement-interval", "as-origination-interval", "default-originate", "disallow-infinite-holdtime", "next-hop-self", "timers", "unsuppress-map"], "optional": true, "maxLength": 128, "type": "string"}, "as-origination-interval": {"description": "Minimum interval between sending AS-origination routing updates (time in seconds)", "format": "number", "optional": true, "maximum": 600, "minimum": 1, "not": "peer-group-name", "type": "number"}, "override-capability": {"default": 0, "optional": true, "type": "number", "description": "Override capability negotiation result", "format": "flag"}, "enforce-multihop": {"default": 0, "optional": true, "type": "number", "description": "Enforce EBGP neighbors to perform multihop", "format": "flag"}, "strict-capability-match": {"default": 0, "optional": true, "type": "number", "description": "Strict capability negotiation match", "format": "flag"}, "ebgp-multihop": {"default": 0, "optional": true, "type": "number", "description": "Allow EBGP neighbors not on directly connected networks", "format": "flag"}, "ethernet": {"not-list": ["update-source-ip", "update-source-ipv6", "loopback", "ve", "trunk", "lif"], "type": "number", "description": "Ethernet interface (Port number)", "optional": true, "format": "interface"}, "connect": {"description": "BGP connect timer", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}}}], "type": "array", "$ref": "/axapi/v3/router/bgp/{as-number}/neighbor/ipv6-neighbor/{neighbor-ipv6}"}
:param ipv4_neighbor_list: {"minItems": 1, "items": {"type": "ipv4-neighbor"}, "uniqueItems": true, "array": [{"required": ["neighbor-ipv4"], "properties": {"activate": {"default": 1, "optional": true, "type": "number", "description": "Enable the Address Family for this Neighbor", "format": "flag"}, "route-refresh": {"default": 1, "optional": true, "type": "number", "description": "Advertise route-refresh capability to this neighbor", "format": "flag"}, "ve": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "trunk", "lif"], "type": "number", "description": "Virtual ethernet interface (Virtual ethernet interface number)", "optional": true, "format": "interface"}, "weight": {"description": "Set default weight for routes from this neighbor", "format": "number", "default": 0, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "timers-keepalive": {"description": "Keepalive interval", "format": "number", "default": 30, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "bfd-value": {"description": "Key String", "format": "password", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "key-type": {"optional": true, "enum": ["md5", "meticulous-md5", "meticulous-sha1", "sha1", "simple"], "type": "string", "description": "'md5': md5; 'meticulous-md5': meticulous-md5; 'meticulous-sha1': meticulous-sha1; 'sha1': sha1; 'simple': simple; (Keyed MD5/Meticulous Keyed MD5/Meticulous Keyed SHA1/Keyed SHA1/Simple Password)", "format": "enum"}, "dynamic": {"default": 0, "optional": true, "type": "number", "description": "Advertise dynamic capability to this neighbor", "format": "flag"}, "loopback": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "ve", "trunk", "lif"], "type": "number", "description": "Loopback interface (Port number)", "optional": true, "format": "interface"}, "multihop": {"default": 0, "optional": true, "type": "number", "description": "Enable multihop", "format": "flag"}, "default-originate": {"description": "Originate default route to this neighbor", "format": "flag", "default": 0, "optional": true, "not": "peer-group-name", "type": "number"}, "distribute-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"distribute-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "distribute-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Filter updates to/from this neighbor (IP standard/extended/named access list)", "format": "string"}}}]}, "shutdown": {"default": 0, "optional": true, "type": "number", "description": "Administratively shut down this neighbor", "format": "flag"}, "prefix-list-direction": {"optional": true, "enum": ["both", "receive", "send"], "type": "string", "description": "'both': both; 'receive': receive; 'send': send; ", "format": "enum"}, "as-origination-interval": {"description": "Minimum interval between sending AS-origination routing updates (time in seconds)", "format": "number", "optional": true, "maximum": 600, "minimum": 1, "not": "peer-group-name", "type": "number"}, "nbr-remote-as": {"description": "Specify AS number of BGP neighbor", "format": "number", "type": "number", "maximum": 4294967295, "minimum": 1, "optional": true}, "advertisement-interval": {"description": "Minimum interval between sending BGP routing updates (time in seconds)", "format": "number", "optional": true, "maximum": 600, "minimum": 1, "not": "peer-group-name", "type": "number"}, "lif": {"description": "Logical interface (Lif interface number)", "format": "number", "optional": true, "not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "ve", "trunk"], "maximum": 128, "minimum": 1, "type": "number"}, "neighbor-route-map-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"nbr-rmap-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "nbr-route-map": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Apply route map to neighbor (Name of route map)", "format": "string"}}}]}, "send-community-val": {"description": "'both': Send Standard and Extended Community attributes; 'none': Disable Sending Community attributes; 'standard': Send Standard Community attributes; 'extended': Send Extended Community attributes; ", "format": "enum", "default": "both", "type": "string", "enum": ["both", "none", "standard", "extended"], "optional": true}, "bfd": {"default": 0, "optional": true, "type": "number", "description": "Bidirectional Forwarding Detection (BFD)", "format": "flag"}, "collide-established": {"default": 0, "optional": true, "type": "number", "description": "Include Neighbor in Established State for Collision Detection", "format": "flag"}, "next-hop-self": {"description": "Disable the next hop calculation for this neighbor", "format": "flag", "default": 0, "optional": true, "not": "peer-group-name", "type": "number"}, "pass-encrypted": {"optional": true, "type": "encrypted", "format": "encrypted"}, "dont-capability-negotiate": {"default": 0, "optional": true, "type": "number", "description": "Do not perform capability negotiation", "format": "flag"}, "unsuppress-map": {"description": "Route-map to selectively unsuppress suppressed routes (Name of route map)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "not": "peer-group-name", "type": "string"}, "passive": {"default": 0, "optional": true, "type": "number", "description": "Don't send open messages to this neighbor", "format": "flag"}, "ebgp-multihop-hop-count": {"description": "maximum hop count", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}, "allowas-in": {"default": 0, "optional": true, "type": "number", "description": "Accept as-path with my AS present in it", "format": "flag"}, "pass-value": {"description": "Key String", "format": "password", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "key-id": {"description": "Key ID", "format": "number", "type": "number", "maximum": 255, "minimum": 0, "optional": true}, "timers-holdtime": {"description": "Holdtime", "format": "number", "default": 90, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}, "update-source-ip": {"not-list": ["update-source-ipv6", "ethernet", "loopback", "ve", "trunk", "lif"], "type": "string", "description": "IP address", "optional": true, "format": "ipv4-address"}, "description": {"description": "Neighbor specific description (Up to 80 characters describing this neighbor)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 80, "type": "string"}, "neighbor-ipv4": {"optional": false, "type": "string", "description": "Neighbor address", "format": "ipv4-address"}, "inbound": {"default": 0, "optional": true, "type": "number", "description": "Allow inbound soft reconfiguration for this neighbor", "format": "flag"}, "maximum-prefix-thres": {"description": "threshold-value, 1 to 100 percent", "format": "number", "type": "number", "maximum": 100, "minimum": 1, "optional": true}, "bfd-encrypted": {"optional": true, "type": "encrypted", "description": "Do NOT use this option manually. (This is an A10 reserved keyword.) (The ENCRYPTED password string)", "format": "encrypted"}, "disallow-infinite-holdtime": {"description": "BGP per neighbor disallow-infinite-holdtime", "format": "flag", "default": 0, "optional": true, "not": "peer-group-name", "type": "number"}, "trunk": {"not-list": ["update-source-ip", "update-source-ipv6", "ethernet", "loopback", "ve", "lif"], "type": "number", "description": "Trunk interface (Trunk interface number)", "optional": true, "format": "interface"}, "remove-private-as": {"default": 0, "optional": true, "type": "number", "description": "Remove private AS number from outbound updates", "format": "flag"}, "neighbor-filter-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"filter-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Establish BGP filters (AS path access-list name)", "format": "string"}, "filter-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true}}]}, "update-source-ipv6": {"not-list": ["update-source-ip", "ethernet", "loopback", "ve", "trunk", "lif"], "type": "string", "description": "IPv6 address", "optional": true, "format": "ipv6-address"}, "maximum-prefix": {"description": "Maximum number of prefix accept from this peer (maximum no. of prefix limit (various depends on model))", "format": "number", "type": "number", "maximum": 65536, "minimum": 1, "optional": true}, "neighbor-prefix-lists": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"nbr-prefix-list-direction": {"enum": ["in", "out"], "type": "string", "description": "'in': in; 'out': out; ", "format": "enum"}, "optional": true, "nbr-prefix-list": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Filter updates to/from this neighbor (Name of a prefix list)", "format": "string"}}}]}, "allowas-in-count": {"description": "Number of occurrences of AS number", "format": "number", "default": 3, "optional": true, "maximum": 10, "minimum": 1, "type": "number"}, "peer-group-name": {"description": "Configure peer-group (peer-group name)", "format": "string", "minLength": 1, "not-list": ["advertisement-interval", "as-origination-interval", "default-originate", "disallow-infinite-holdtime", "next-hop-self", "timers", "unsuppress-map"], "optional": true, "maxLength": 128, "type": "string"}, "enforce-multihop": {"default": 0, "optional": true, "type": "number", "description": "Enforce EBGP neighbors to perform multihop", "format": "flag"}, "override-capability": {"default": 0, "optional": true, "type": "number", "description": "Override capability negotiation result", "format": "flag"}, "route-map": {"description": "Route-map to specify criteria to originate default (route-map name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"}, "strict-capability-match": {"default": 0, "optional": true, "type": "number", "description": "Strict capability negotiation match", "format": "flag"}, "ebgp-multihop": {"default": 0, "optional": true, "type": "number", "description": "Allow EBGP neighbors not on directly connected networks", "format": "flag"}, "ethernet": {"not-list": ["update-source-ip", "update-source-ipv6", "loopback", "ve", "trunk", "lif"], "type": "number", "description": "Ethernet interface (Port number)", "optional": true, "format": "interface"}, "connect": {"description": "BGP connect timer", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}}}], "type": "array", "$ref": "/axapi/v3/router/bgp/{as-number}/neighbor/ipv4-neighbor/{neighbor-ipv4}"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/router/bgp/{as_number}/neighbor`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "neighbor"
self.a10_url="/axapi/v3/router/bgp/{as_number}/neighbor"
self.DeviceProxy = ""
self.peer_group_neighbor_list = []
self.ipv6_neighbor_list = []
self.ipv4_neighbor_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
|
py | b40760eb834bdf3eb5cd37699d7ed98dfe903817 | # -*- coding: utf-8 -*-
import base64
import gzip
import io
import xml.etree.ElementTree as ElementTree
import zlib
import codecs
from .common import KDBFile, HeaderDictionary
from .common import stream_unpack
from .crypto import transform_key, pad, unpad
from .crypto import xor, sha256, aes_cbc_decrypt, aes_cbc_encrypt
from .hbio import HashedBlockIO
from .pureSalsa20 import Salsa20
KDB4_SALSA20_IV = codecs.decode('e830094b97205d2a', 'hex')
KDB4_SIGNATURE = (0x9AA2D903, 0xB54BFB67)
try:
file_types = (file, io.IOBase)
except NameError:
file_types = (io.IOBase,)
class KDB4Header(HeaderDictionary):
fields = {
'EndOfHeader': 0,
'Comment': 1,
# cipher used for the data stream after the header
'CipherID': 2,
# indicates whether decrypted data stream is gzip compressed
'CompressionFlags': 3,
#
'MasterSeed': 4,
#
'TransformSeed': 5,
#
'TransformRounds': 6,
#
'EncryptionIV': 7,
# key used to protect data in xml
'ProtectedStreamKey': 8,
# first 32 bytes of the decrypted data stream after the header
'StreamStartBytes': 9,
# cipher used to protect data in xml (ARC4 or Salsa20)
'InnerRandomStreamID': 10,
}
fmt = {3: '<I', 6: '<q'}
class KDB4File(KDBFile):
def __init__(self, stream=None, **credentials):
self.header = KDB4Header()
KDBFile.__init__(self, stream, **credentials)
def set_compression(self, flag=1):
"""Dis- (0) or enable (default: 1) compression"""
if flag not in [0, 1]:
raise ValueError('Compression flag can be 0 or 1.')
self.header.CompressionFlags = flag
# def set_comment(self, comment):
# self.header.Comment = comment
def read_from(self, stream):
"""
Read, parse, decrypt, decompress a KeePass file from a stream.
:arg stream: A file-like object (opened in 'rb' mode) or IO buffer
containing a KeePass file.
"""
super(KDB4File, self).read_from(stream)
if self.header.CompressionFlags == 1:
self._unzip()
# def write_to(self, stream):
# """
# Write the KeePass database back to a KeePass2 compatible file.
# :arg stream: A writeable file-like object or IO buffer.
# """
# if not (isinstance(stream, io.IOBase) or isinstance(stream, file_types)):
# raise TypeError('Stream does not have the buffer interface.')
# self._write_header(stream)
def _read_header(self, stream):
"""
Parse the header and write the values into self.header. Also sets
self.header_length.
"""
# KeePass 2.07 has version 1.01,
# 2.08 has 1.02,
# 2.09 has 2.00, 2.10 has 2.02, 2.11 has 2.04,
# 2.15 has 3.00.
# The first 2 bytes are critical (i.e. loading will fail, if the
# file version is too high), the last 2 bytes are informational.
# TODO implement version check
# the first header field starts at byte 12 after the signature
stream.seek(12)
while True:
# field_id is a single byte
field_id = stream_unpack(stream, None, 1, 'b')
# field_id >10 is undefined
if field_id not in self.header.fields.values():
raise IOError('Unknown header field found.')
# two byte (short) length of field data
length = stream_unpack(stream, None, 2, 'h')
if length > 0:
data = stream_unpack(stream, None, length, '{}s'.format(length))
self.header.b[field_id] = data
# set position in data stream of end of header
if field_id == 0:
self.header_length = stream.tell()
break
# def _write_header(self, stream):
# """Serialize the header fields from self.header into a byte stream, prefix
# with file signature and version before writing header and out-buffer
# to `stream`.
# Note, that `stream` is flushed, but not closed!"""
# # serialize header to stream
# header = bytearray()
# # write file signature
# header.extend(struct.pack('<II', *KDB4_SIGNATURE))
# # and version
# header.extend(struct.pack('<hh', 0, 3))
# field_ids = self.header.keys()
# field_ids.sort()
# field_ids.reverse() # field_id 0 must be last
# for field_id in field_ids:
# value = self.header.b[field_id]
# length = len(value)
# header.extend(struct.pack('<b', field_id))
# header.extend(struct.pack('<h', length))
# header.extend(struct.pack('{}s'.format(length), value))
# # write header to stream
# stream.write(header)
# headerHash = base64.b64encode(sha256(header))
# self.obj_root.Meta.HeaderHash = headerHash
# # create HeaderHash if it does not exist
# if len(self.obj_root.Meta.xpath("HeaderHash")) < 1:
# etree.SubElement(self.obj_root.Meta, "HeaderHash")
# # reload out_buffer because we just changed the HeaderHash
# self.protect()
# self.out_buffer = io.BytesIO(self.pretty_print())
# # zip or not according to header setting
# if self.header.CompressionFlags == 1:
# self._zip()
# self._encrypt();
# # write encrypted block to stream
# stream.write(self.out_buffer)
# stream.flush()
def _decrypt(self, stream):
"""
Build the master key from header settings and key-hash list.
Start reading from `stream` after the header and decrypt all the data.
Remove padding as needed and feed into hashed block reader, set as
in-buffer.
"""
super(KDB4File, self)._decrypt(stream)
data = aes_cbc_decrypt(stream.read(), self.master_key,
self.header.EncryptionIV)
data = unpad(data)
length = len(self.header.StreamStartBytes)
if self.header.StreamStartBytes == data[:length]:
# skip startbytes and wrap data in a hashed block io
self.in_buffer = HashedBlockIO(bytes=data[length:])
# set successful decryption flag
self.opened = True
else:
raise IOError('Master key invalid.')
def _encrypt(self):
"""
Rebuild the master key from header settings and key-hash list. Encrypt
the stream start bytes and the out-buffer formatted as hashed block
stream with padding added as needed.
"""
# rebuild master key from (possibly) updated header
self._make_master_key()
# make hashed block stream
block_buffer = HashedBlockIO()
block_buffer.write(self.out_buffer.read())
# data is buffered in hashed block io, start a new one
self.out_buffer = io.BytesIO()
# write start bytes (for successful decrypt check)
self.out_buffer.write(self.header.StreamStartBytes)
# append blocked data to out-buffer
block_buffer.write_block_stream(self.out_buffer)
block_buffer.close()
self.out_buffer.seek(0)
# encrypt the whole thing with header settings and master key
data = pad(self.out_buffer.read())
self.out_buffer = aes_cbc_encrypt(data, self.master_key,
self.header.EncryptionIV)
def _unzip(self):
"""
Inplace decompress in-buffer. Read/write position is moved to 0.
"""
self.in_buffer.seek(0)
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
self.in_buffer = io.BytesIO(d.decompress(self.in_buffer.read()))
self.in_buffer.seek(0)
def _zip(self):
"""
Inplace compress out-buffer. Read/write position is moved to 0.
"""
data = self.out_buffer.read()
self.out_buffer = io.BytesIO()
# note: compresslevel=6 seems to be important for kdb4!
gz = gzip.GzipFile(fileobj=self.out_buffer, mode='wb', compresslevel=6)
gz.write(data)
gz.close()
self.out_buffer.seek(0)
def _make_master_key(self):
"""
Make the master key by (1) combining the credentials to create
a composite hash, (2) transforming the hash using the transform seed
for a specific number of rounds and (3) finally hashing the result in
combination with the master seed.
"""
super(KDB4File, self)._make_master_key()
composite = sha256(''.join(self.keys))
tkey = transform_key(composite,
self.header.TransformSeed,
self.header.TransformRounds)
self.master_key = sha256(self.header.MasterSeed + tkey)
class KDBXmlExtension:
"""
The KDB4 payload is a XML document. For easier use this class provides
a lxml.objectify'ed version of the XML-tree as the `obj_root` attribute.
More importantly though in the XML document text values can be protected
using Salsa20. Protected elements are unprotected by default (passwords are
in clear). You can override this with the `unprotect=False` argument.
"""
def __init__(self, unprotect=True):
self._salsa_buffer = bytearray()
self.salsa = Salsa20(
sha256(self.header.ProtectedStreamKey),
KDB4_SALSA20_IV)
self.in_buffer.seek(0)
# self.tree = objectify.parse(self.in_buffer)
# self.obj_root = self.tree.getroot()
self.obj_root = ElementTree.fromstring(self.in_buffer.read())
if unprotect:
self.unprotect()
def unprotect(self):
"""
Find all elements with a 'Protected=True' attribute and replace the text
with an unprotected value in the XML element tree. The original text is
set as 'ProtectedValue' attribute and the 'Protected' attribute is set
to 'False'. The 'ProtectPassword' element in the 'Meta' section is also
set to 'False'.
"""
self._reset_salsa()
for elem in self.obj_root.iterfind('.//Value[@Protected="True"]'):
if elem.text is not None:
elem.set('ProtectedValue', elem.text)
elem.set('Protected', 'False')
elem.text = self._unprotect(elem.text)
# def protect(self):
# """
# Find all elements with a 'Protected=False' attribute and replace the
# text with a protected value in the XML element tree. If there was a
# 'ProtectedValue' attribute, it is deleted and the 'Protected' attribute
# is set to 'True'. The 'ProtectPassword' element in the 'Meta' section is
# also set to 'True'.
# This does not just restore the previous protected value, but reencrypts
# all text values of elements with 'Protected=False'. So you could use
# this after modifying a password, adding a completely new entry or
# deleting entry history items.
# """
# self._reset_salsa()
# self.obj_root.Meta.MemoryProtection.ProtectPassword._setText('True')
# for elem in self.obj_root.iterfind('.//Value[@Protected="False"]'):
# etree.strip_attributes(elem, 'ProtectedValue')
# elem.set('Protected', 'True')
# elem._setText(self._protect(elem.text))
# def pretty_print(self):
# """Return a serialization of the element tree."""
# return etree.tostring(self.obj_root, pretty_print=True,
# encoding='utf-8', standalone=True)
def to_dic(self):
"""Return a dictionnary of the element tree."""
pwd_found = []
# print etree.tostring(self.obj_root)
root = ElementTree.fromstring(ElementTree.tostring(self.obj_root))
for entry in root.findall('.//Root//Entry'):
dic = {}
for elem in entry.iter('String'):
try:
if elem[0].text == 'UserName':
dic['Login'] = elem[1].text
else:
# Replace new line by a point
dic[elem[0].text] = elem[1].text.replace('\n', '.')
except Exception as e:
# print e
pass
pwd_found.append(dic)
return pwd_found
# def write_to(self, stream):
# """Serialize the element tree to the out-buffer."""
# if self.out_buffer is None:
# self.protect()
# self.out_buffer = io.BytesIO(self.pretty_print())
def _reset_salsa(self):
"""Clear the salsa buffer and reset algorithm counter to 0."""
self._salsa_buffer = bytearray()
self.salsa.set_counter(0)
def _get_salsa(self, length):
"""
Returns the next section of the "random" Salsa20 bytes with the
requested `length`.
"""
while length > len(self._salsa_buffer):
new_salsa = self.salsa.encrypt_bytes(str(bytearray(64)))
self._salsa_buffer.extend(new_salsa)
nacho = self._salsa_buffer[:length]
del self._salsa_buffer[:length]
return nacho
def _unprotect(self, string):
"""
Base64 decode and XOR the given `string` with the next salsa.
Returns an unprotected string.
"""
tmp = base64.b64decode(string)
return str(xor(tmp, self._get_salsa(len(tmp))))
def _protect(self, string):
"""
XORs the given `string` with the next salsa and base64 encodes it.
Returns a protected string.
"""
tmp = str(xor(string, self._get_salsa(len(string))))
return base64.b64encode(tmp)
class KDB4Reader(KDB4File, KDBXmlExtension):
"""
Usually you would want to use the `keepass.open` context manager to open a
file. It checks the file signature and creates a suitable reader-instance.
doing it by hand is also possible::
kdb = keepass.KDB4Reader()
kdb.add_credentials(password='secret')
with open('passwords.kdb', 'rb') as fh:
kdb.read_from(fh)
or...::
with open('passwords.kdb', 'rb') as fh:
kdb = keepass.KDB4Reader(fh, password='secret')
"""
def __init__(self, stream=None, **credentials):
KDB4File.__init__(self, stream, **credentials)
def read_from(self, stream, unprotect=True):
KDB4File.read_from(self, stream)
# the extension requires parsed header and decrypted self.in_buffer, so
# initialize only here
KDBXmlExtension.__init__(self, unprotect)
# def write_to(self, stream, use_etree=True):
# """
# Write the KeePass database back to a KeePass2 compatible file.
# :arg stream: A file-like object or IO buffer.
# :arg use_tree: Serialize the element tree to XML to save (default:
# True), Set to False to write the data currently in the in-buffer
# instead.
# """
# if use_etree:
# KDBXmlExtension.write_to(self, stream)
# KDB4File.write_to(self, stream)
|
py | b40761d1f52db53d32d702c436d877a8ab087a3a | #!/usr/bin/env python
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import pystache
from google.protobuf.compiler import plugin_pb2 as plugin
from google.protobuf.descriptor_pb2 import FieldDescriptorProto
from plugin.templates import resource_name, insertion_points
from plugin.utils import proto_utils, gapic_utils
TEMPLATE_LOCATION = os.path.join('plugin', 'templates')
def render_new_file(renderer, response, resource):
f = response.file.add()
f.name = resource.filename()
f.content = renderer.render(resource)
def get_oneof_for_resource(collection_config, gapic_config):
oneof = None
for oneof_config in gapic_config.collection_oneofs.values():
for collection_name in oneof_config.collection_names:
if collection_name == collection_config.entity_name:
if oneof:
raise ValueError("A collection cannot be part of multiple oneofs")
oneof = oneof_config
return oneof
def generate_resource_name_types(response, gapic_config, java_package):
renderer = pystache.Renderer(search_dirs=TEMPLATE_LOCATION)
for collection_config in gapic_config.collection_configs.values():
oneof = get_oneof_for_resource(collection_config, gapic_config)
resource = resource_name.ResourceName(collection_config, java_package, oneof)
render_new_file(renderer, response, resource)
for fixed_config in gapic_config.fixed_collections.values():
oneof = get_oneof_for_resource(fixed_config, gapic_config)
resource = resource_name.ResourceNameFixed(fixed_config, java_package, oneof)
render_new_file(renderer, response, resource)
for oneof_config in gapic_config.collection_oneofs.values():
parent_resource = resource_name.ParentResourceName(oneof_config, java_package)
untyped_resource = resource_name.UntypedResourceName(oneof_config, java_package)
resource_factory = resource_name.ResourceNameFactory(oneof_config, java_package)
render_new_file(renderer, response, parent_resource)
render_new_file(renderer, response, untyped_resource)
render_new_file(renderer, response, resource_factory)
def get_builder_view(field):
if field.label == FieldDescriptorProto.LABEL_REPEATED:
return insertion_points.InsertBuilderList
else:
return insertion_points.InsertBuilder
def get_class_view(field):
if field.label == FieldDescriptorProto.LABEL_REPEATED:
return insertion_points.InsertClassList
else:
return insertion_points.InsertClass
def get_protos_to_generate_for(request):
proto_files = dict((pf.name, pf) for pf in request.proto_file)
for pf_name in request.file_to_generate:
if pf_name in proto_files:
yield proto_files[pf_name]
def resolve_java_package_name(request):
java_package = None
for pf in get_protos_to_generate_for(request):
for opt in proto_utils.get_named_options(pf, 'java_package'):
if java_package is not None and java_package != opt[1]:
raise ValueError('got conflicting java packages: ' + str(java_package)
+ ' and ' + str(opt[1]))
java_package = opt[1]
break
if java_package is None:
raise ValueError('java package not defined')
return java_package
def main(data):
# Parse request
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
java_package = resolve_java_package_name(request)
gapic_config = gapic_utils.read_from_gapic_yaml(request.parameter)
# Generate output
response = plugin.CodeGeneratorResponse()
generate_resource_name_types(response, gapic_config, java_package)
# Serialise response message
output = response.SerializeToString()
return output
if __name__ == '__main__':
try:
source = sys.stdin.buffer
dest = sys.stdout.buffer
except AttributeError:
source = sys.stdin
dest = sys.stdout
# Read request message from stdin
data = source.read()
output = main(data)
# Write to stdout
dest.write(output)
|
py | b407643c6af7f00be9f90ff3ef8f947c8d76addd | # Identical copies of two AlexNet models
import torch
import torch.nn as nn
import copy
import torch.nn.functional as F
import torch.optim as optim
class FullyConnected(nn.Module):
def __init__(self, input_dim=28*28 , width=50, depth=3, num_classes=10):
super(FullyConnected, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=False),
nn.ReLU(inplace=True),
*layers,
nn.Linear(self.width, self.num_classes, bias=False),
)
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.ReLU())
return layers
"""
# new function that spits out the outputs for each layer including the first and final layers
def layer_output(self, inputs):
hidden_layers = []
hidden_layers.append(inputs)
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.ReLU())
return hidden_layers
"""
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
# derivative for tanh, only for scalar output
def dtanh(x):
m = nn.Tanh()
x = torch.autograd.Variable(x, requires_grad=True)
y = m(x)
y.backward( torch.ones_like(x) )
return x.grad
class FullyConnected_tanh(nn.Module):
def __init__(self, input_dim=28*28 , width=50, depth=3, num_classes=10):
super(FullyConnected_tanh, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=False),
nn.Tanh(),
*layers,
nn.Linear(self.width, self.num_classes, bias=False),
)
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.Tanh())
return layers
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
# preactivation outputs
def preact_layer(self, x):
# number of hidden layers
hidden = [None] * (self.depth - 1)
x = x.view(x.size(0), -1)
ell = 2
for idx in range(self.depth - 1):
if idx == 0:
hidden[idx] = self.fc[idx](x)
else:
hidden[idx] = self.fc[ell * idx - 1: ell * idx + 1](hidden[idx - 1])
return hidden + [self.fc[-2:](hidden[-1])]
# the problem for this is that the last weight matrix in all these settings are not symmetrical, i.e. for mnist W_L is 784 by 10 (might need to adjust this in the future)
# final argument decides whether it is post-activation or not (pre-activation)
def layerwise_jacob_ls(self, x, post):
# check "The Emergence of Spectral Universality in Deep Networks"
m = nn.Tanh()
preact_h = self.preact_layer(x) # do not include the input layer check eq (1) and (2) of "Emergence of Spectral Universality in Deep Networks"
# get weights
weights = [p for p in self.parameters()]
if post:
dphi_h = dtanh(preact_h[0][0])
DW_l = torch.matmul(torch.diag( dphi_h ), weights[0])
#DW_l = torch.matmul(torch.diag( m(preact_h[0][0]) ), weights[0])
DW_ls = [DW_l]
for i in range(1, len(preact_h)): # include the last one even if it is not symmetrical
#for i in range(1, len(preact_h) - 1):
dphi_h = dtanh(preact_h[i][0])
if i != len(preact_h) - 1:
DW_l = torch.matmul(torch.diag( dphi_h ), weights[i])
else:
DW_l = torch.matmul(torch.diag( torch.ones_like(dphi_h) ), weights[i])
#DW_l = torch.matmul(torch.diag( m(preact_h[i][0]) ), weights[i])
DW_ls.append(DW_l)
else:
# essentially the first activation function is just the identity map
DW_ls = [weights[0]] # it's actually W^{l + 1} D^l
for i in range(0, len(preact_h) - 1):
dphi_h = dtanh(preact_h[i][0])
DW_l = torch.matmul(weights[i + 1], torch.diag( dphi_h ))
print(DW_l.shape)
DW_ls.append(DW_l)
return DW_ls
# postactivation outputs
def postact_layer(self, x):
# number of hidden layers
hidden = [None] * (self.depth - 1)
x = x.view(x.size(0), -1)
ell = 2
for idx in range(self.depth - 1):
if idx == 0:
hidden[idx] = self.fc[ell * idx: ell * idx + ell](x)
else:
hidden[idx] = self.fc[ell * idx: ell * idx + ell](hidden[idx - 1])
return hidden, self.sequential[-1](hidden[-1])
# fully connected with activation to the last layer
class FullyConnected_tanh_2(nn.Module):
def __init__(self, input_dim=28*28 , width=50, depth=3, num_classes=10):
super(FullyConnected_tanh_2, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=False),
nn.Tanh(),
*layers,
nn.Linear(self.width, self.num_classes, bias=False),
nn.Tanh(),
)
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.Tanh())
return layers
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
# preactivation outputs
def preact_layer(self, x):
# number of hidden layers
hidden = [None] * (self.depth - 1)
x = x.view(x.size(0), -1)
ell = 2
for idx in range(self.depth - 1):
if idx == 0:
hidden[idx] = self.fc[idx](x)
else:
hidden[idx] = self.fc[ell * idx - 1: ell * idx + 1](hidden[idx - 1])
return hidden + [self.fc[-2:](hidden[-1])]
# the problem for this is that the last weight matrix in all these settings are not symmetrical, i.e. for mnist W_L is 784 by 10 (might need to adjust this in the future)
def jacob_ls(self, x):
# check "The Emergence of Spectral Universality in Deep Networks"
m = nn.Tanh()
preact_h = self.preact_layer(x)
# get weights
weights = [p for p in self.parameters()]
dphi_h = dtanh(preact_h[0][0])
DW_l = torch.matmul(torch.diag( dphi_h ), weights[0])
#DW_l = torch.matmul(torch.diag( m(preact_h[0][0]) ), weights[0])
DW_ls = [DW_l]
# due to the last matrix being non-square, the case l = L is not included
for i in range(1, len(preact_h) - 1):
dphi_h = dtanh(preact_h[i][0])
DW_l = torch.matmul(torch.diag( dphi_h ), weights[i])
#DW_l = torch.matmul(torch.diag( m(preact_h[i][0]) ), weights[i])
DW_ls.append(DW_l)
return DW_ls
# postactivation outputs
"""
def postact_layer(self, x):
# number of hidden layers
hidden = [None] * (self.depth - 1)
x = x.view(x.size(0), -1)
ell = 2
for idx in range(self.depth - 1):
if idx == 0:
hidden[idx] = self.fc[ell * idx: ell * idx + ell](x)
else:
hidden[idx] = self.fc[ell * idx: ell * idx + ell](hidden[idx - 1])
return hidden, self.sequential[-1](hidden[-1])
"""
class FullyConnected_bias(nn.Module):
def __init__(self, input_dim=28*28 , width=50, depth=3, num_classes=10):
super(FullyConnected_bias, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=True),
#nn.ReLU(inplace=True),
nn.Sigmoid(),
*layers,
nn.Linear(self.width, self.num_classes, bias=True),
)
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=True))
layers.append(nn.Sigmoid())
return layers
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
class FullyConnected_sigmoid(nn.Module):
def __init__(self, input_dim=28*28 , width=50, depth=3, num_classes=10):
super(FullyConnected_sigmoid, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=False),
#nn.ReLU(inplace=True),
nn.Sigmoid(),
*layers,
nn.Linear(self.width, self.num_classes, bias=False),
)
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.Sigmoid())
return layers
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
# This is a copy from online repositories
class AlexNet(nn.Module):
def __init__(self, input_height=32, input_width=32, input_channels=3, ch=64, num_classes=1000):
# ch is the scale factor for number of channels
super(AlexNet, self).__init__()
self.input_height = input_height
self.input_width = input_width
self.input_channels = input_channels
self.features = nn.Sequential(
nn.Conv2d(3, out_channels=ch, kernel_size=4, stride=2, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(ch, ch, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(ch, ch, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(ch, ch, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(ch, ch, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.size = self.get_size()
print(self.size)
a = torch.tensor(self.size).float()
b = torch.tensor(2).float()
self.width = int(a) * int(1 + torch.log(a) / torch.log(b))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.size, self.width),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(self.width, self.width),
nn.ReLU(inplace=True),
nn.Linear(self.width, num_classes),
)
def get_size(self):
# hack to get the size for the FC layer...
x = torch.randn(1, self.input_channels, self.input_height, self.input_width)
y = self.features(x)
print(y.size())
return y.view(-1).size(0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def alexnet():
return AlexNet(ch=64, num_classes=10)
# FCN with square WMs (except for the last one), bias:no, activation:tanh, dataset:MNIST
def fc2_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=100, depth=2, num_classes=10)
def fc3_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=3, num_classes=10)
def fc4_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=4, num_classes=10)
def fc5_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=5, num_classes=10)
def fc6_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=6, num_classes=10)
def fc7_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=7, num_classes=10)
def fc8_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=8, num_classes=10)
def fc9_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=9, num_classes=10)
def fc10_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=10, num_classes=10)
def fc15_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=15, num_classes=10)
def fc20_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=20, num_classes=10)
def fc25_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=25, num_classes=10)
def fc30_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=28*28, depth=30, num_classes=10)
# FCN with square WMs (except for the last one), bias:no, activation:tanh, dataset:MNIST, additional tanh added to last layer
def fc3_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=3, num_classes=10)
def fc4_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=4, num_classes=10)
def fc5_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=5, num_classes=10)
def fc6_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=6, num_classes=10)
def fc7_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=7, num_classes=10)
def fc8_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=8, num_classes=10)
def fc9_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=9, num_classes=10)
def fc10_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=10, num_classes=10)
def fc15_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=15, num_classes=10)
def fc20_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=20, num_classes=10)
def fc25_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=25, num_classes=10)
def fc30_mnist_tanh_2(**kwargs):
return FullyConnected_tanh_2(input_dim=28*28, width=28*28, depth=30, num_classes=10)
# FCN with square WMs (except for the last one), bias:yes, activation:tanh, dataset:MNIST
def fc5_mnist_tanh_bias(**kwargs):
return FullyConnected_tanh_bias(input_dim=28*28, width=28*28, depth=5, num_classes=10)
def fc10_ns_mnist_tanh(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=100, depth=10, num_classes=10)
def fc15_tanh(**kwargs):
return FullyConnected_tanh(input_dim=32*32*3, width=100, depth=15, num_classes=10)
def fc15_mnist_tanh_bias(**kwargs):
return FullyConnected_tanh_bias(input_dim=28*28, width=28*28, depth=15, num_classes=10)
def fc15_ns_mnist_tanh_bias(**kwargs):
return FullyConnected_tanh(input_dim=28*28, width=100, depth=15, num_classes=10)
def fc2(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=2, num_classes=10)
def fc3(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=3, num_classes=10)
def fc3_sq_bias(**kwargs):
return FullyConnected_bias(input_dim=32*32*3, width=100, depth=3, num_classes=10)
def fc3_sigmoid(**kwargs):
return FullyConnected_sigmoid(input_dim=32*32*3, width=100, depth=3, num_classes=10)
def fc4(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=4, num_classes=10)
def fc5(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=5, num_classes=10)
def fc6(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=6, num_classes=10)
def fc7(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=7, num_classes=10)
def fc20(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=20, num_classes=10)
def fc56(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=56, num_classes=10)
def fc110(**kwargs):
return FullyConnected(input_dim=32*32*3, width=100, depth=110, num_classes=10)
def simplenet(**kwargs):
return SimpleNet(**kwargs)
|
py | b40764451df32ec37c2121df0061274c3f714aa3 | import numpy as np
import multiprocessing as mp
import time
import sys
from enum import Enum
from copy import deepcopy
from reinforcement_learning.gym import logger
from reinforcement_learning.gym.vector.vector_env import VectorEnv
from reinforcement_learning.gym.error import (AlreadyPendingCallError, NoAsyncCallError,
ClosedEnvironmentError)
from reinforcement_learning.gym.vector.utils import (create_shared_memory, create_empty_array,
write_to_shared_memory, read_from_shared_memory,
concatenate, CloudpickleWrapper, clear_mpi_env_vars)
__all__ = ['AsyncVectorEnv']
class AsyncState(Enum):
DEFAULT = 'default'
WAITING_RESET = 'reset'
WAITING_STEP = 'step'
class AsyncVectorEnv(VectorEnv):
"""Vectorized environment that runs multiple environments in parallel. It
uses `multiprocessing` processes, and pipes for communication.
Parameters
----------
env_fns : iterable of callable
Functions that create the environments.
observation_space : `gym.spaces.Space` instance, optional
Observation space of a single environment. If `None`, then the
observation space of the first environment is taken.
action_space : `gym.spaces.Space` instance, optional
Action space of a single environment. If `None`, then the action space
of the first environment is taken.
shared_memory : bool (default: `True`)
If `True`, then the observations from the worker processes are
communicated back through shared variables. This can improve the
efficiency if the observations are large (e.g. images).
copy : bool (default: `True`)
If `True`, then the `reset` and `step` methods return a copy of the
observations.
context : str, optional
Context for multiprocessing. If `None`, then the default context is used.
Only available in Python 3.
daemon : bool (default: `True`)
If `True`, then subprocesses have `daemon` flag turned on; that is, they
will quit if the head process quits. However, `daemon=True` prevents
subprocesses to spawn children, so for some environments you may want
to have it set to `False`
worker : function, optional
WARNING - advanced mode option! If set, then use that worker in a subprocess
instead of a default one. Can be useful to override some inner vector env
logic, for instance, how resets on done are handled. Provides high
degree of flexibility and a high chance to shoot yourself in the foot; thus,
if you are writing your own worker, it is recommended to start from the code
for `_worker` (or `_worker_shared_memory`) method below, and add changes
"""
def __init__(self, env_fns, observation_space=None, action_space=None,
shared_memory=True, copy=True, context=None, daemon=True, worker=None):
try:
ctx = mp.get_context(context)
except AttributeError:
logger.warn('Context switching for `multiprocessing` is not '
'available in Python 2. Using the default context.')
ctx = mp
self.env_fns = env_fns
self.shared_memory = shared_memory
self.copy = copy
if (observation_space is None) or (action_space is None):
dummy_env = env_fns[0]()
observation_space = observation_space or dummy_env.observation_space
action_space = action_space or dummy_env.action_space
dummy_env.close()
del dummy_env
super(AsyncVectorEnv, self).__init__(num_envs=len(env_fns),
observation_space=observation_space, action_space=action_space)
if self.shared_memory:
_obs_buffer = create_shared_memory(self.single_observation_space,
n=self.num_envs, ctx=ctx)
self.observations = read_from_shared_memory(_obs_buffer,
self.single_observation_space, n=self.num_envs)
else:
_obs_buffer = None
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros)
self.parent_pipes, self.processes = [], []
self.error_queue = ctx.Queue()
target = _worker_shared_memory if self.shared_memory else _worker
target = worker or target
with clear_mpi_env_vars():
for idx, env_fn in enumerate(self.env_fns):
parent_pipe, child_pipe = ctx.Pipe()
process = ctx.Process(target=target,
name='Worker<{0}>-{1}'.format(type(self).__name__, idx),
args=(idx, CloudpickleWrapper(env_fn), child_pipe,
parent_pipe, _obs_buffer, self.error_queue))
self.parent_pipes.append(parent_pipe)
self.processes.append(process)
process.daemon = daemon
process.start()
child_pipe.close()
self._state = AsyncState.DEFAULT
self._check_observation_spaces()
def seed(self, seeds=None):
self._assert_is_running()
if seeds is None:
seeds = [None for _ in range(self.num_envs)]
if isinstance(seeds, int):
seeds = [seeds + i for i in range(self.num_envs)]
assert len(seeds) == self.num_envs
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `seed` while waiting '
'for a pending call to `{0}` to complete.'.format(
self._state.value), self._state.value)
for pipe, seed in zip(self.parent_pipes, seeds):
pipe.send(('seed', seed))
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
def reset_async(self):
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `reset_async` while waiting '
'for a pending call to `{0}` to complete'.format(
self._state.value), self._state.value)
for pipe in self.parent_pipes:
pipe.send(('reset', None))
self._state = AsyncState.WAITING_RESET
def reset_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `reset_wait` times out. If
`None`, the call to `reset_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET:
raise NoAsyncCallError('Calling `reset_wait` without any prior '
'call to `reset_async`.', AsyncState.WAITING_RESET.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `reset_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
if not self.shared_memory:
concatenate(results, self.observations, self.single_observation_space)
return deepcopy(self.observations) if self.copy else self.observations
def step_async(self, actions):
"""
Parameters
----------
actions : iterable of samples from `action_space`
List of actions.
"""
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `step_async` while waiting '
'for a pending call to `{0}` to complete.'.format(
self._state.value), self._state.value)
for pipe, action in zip(self.parent_pipes, actions):
pipe.send(('step', action))
self._state = AsyncState.WAITING_STEP
def step_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `step_wait` times out. If
`None`, the call to `step_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
rewards : `np.ndarray` instance (dtype `np.float_`)
A vector of rewards from the vectorized environment.
dones : `np.ndarray` instance (dtype `np.bool_`)
A vector whose entries indicate whether the episode has ended.
infos : list of dict
A list of auxiliary diagnostic informations.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_STEP:
raise NoAsyncCallError('Calling `step_wait` without any prior call '
'to `step_async`.', AsyncState.WAITING_STEP.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `step_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
observations_list, rewards, dones, infos = zip(*results)
if not self.shared_memory:
concatenate(observations_list, self.observations,
self.single_observation_space)
return (deepcopy(self.observations) if self.copy else self.observations,
np.array(rewards), np.array(dones, dtype=np.bool_), infos)
def close_extras(self, timeout=None, terminate=False):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `close` times out. If `None`,
the call to `close` never times out. If the call to `close` times
out, then all processes are terminated.
terminate : bool (default: `False`)
If `True`, then the `close` operation is forced and all processes
are terminated.
"""
timeout = 0 if terminate else timeout
try:
if self._state != AsyncState.DEFAULT:
logger.warn('Calling `close` while waiting for a pending '
'call to `{0}` to complete.'.format(self._state.value))
function = getattr(self, '{0}_wait'.format(self._state.value))
function(timeout)
except mp.TimeoutError:
terminate = True
if terminate:
for process in self.processes:
if process.is_alive():
process.terminate()
else:
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.send(('close', None))
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.recv()
for pipe in self.parent_pipes:
if pipe is not None:
pipe.close()
for process in self.processes:
process.join()
def _poll(self, timeout=None):
self._assert_is_running()
if timeout is None:
return True
end_time = time.time() + timeout
delta = None
for pipe in self.parent_pipes:
delta = max(end_time - time.time(), 0)
if pipe is None:
return False
if pipe.closed or (not pipe.poll(delta)):
return False
return True
def _check_observation_spaces(self):
self._assert_is_running()
for pipe in self.parent_pipes:
pipe.send(('_check_observation_space', self.single_observation_space))
same_spaces, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
if not all(same_spaces):
raise RuntimeError('Some environments have an observation space '
'different from `{0}`. In order to batch observations, the '
'observation spaces from all environments must be '
'equal.'.format(self.single_observation_space))
def _assert_is_running(self):
if self.closed:
raise ClosedEnvironmentError('Trying to operate on `{0}`, after a '
'call to `close()`.'.format(type(self).__name__))
def _raise_if_errors(self, successes):
if all(successes):
return
num_errors = self.num_envs - sum(successes)
assert num_errors > 0
for _ in range(num_errors):
index, exctype, value = self.error_queue.get()
logger.error('Received the following error from Worker-{0}: '
'{1}: {2}'.format(index, exctype.__name__, value))
logger.error('Shutting down Worker-{0}.'.format(index))
self.parent_pipes[index].close()
self.parent_pipes[index] = None
logger.error('Raising the last exception back to the main process.')
raise exctype(value)
def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is None
env = env_fn()
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == 'reset':
observation = env.reset()
pipe.send((observation, True))
elif command == 'step':
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
pipe.send(((observation, reward, done, info), True))
elif command == 'seed':
env.seed(data)
pipe.send((None, True))
elif command == 'close':
pipe.send((None, True))
break
elif command == '_check_observation_space':
pipe.send((data == env.observation_space, True))
else:
raise RuntimeError('Received unknown command `{0}`. Must '
'be one of {`reset`, `step`, `seed`, `close`, '
'`_check_observation_space`}.'.format(command))
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is not None
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == 'reset':
observation = env.reset()
write_to_shared_memory(index, observation, shared_memory,
observation_space)
pipe.send((None, True))
elif command == 'step':
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
write_to_shared_memory(index, observation, shared_memory,
observation_space)
pipe.send(((None, reward, done, info), True))
elif command == 'seed':
env.seed(data)
pipe.send((None, True))
elif command == 'close':
pipe.send((None, True))
break
elif command == '_check_observation_space':
pipe.send((data == observation_space, True))
else:
raise RuntimeError('Received unknown command `{0}`. Must '
'be one of {`reset`, `step`, `seed`, `close`, '
'`_check_observation_space`}.'.format(command))
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
|
py | b4076687bc49dea572d446a8ab7ba718fbb85acf | #!/usr/bin/env python
"""
SVG Rendering Library
"""
__author__ = 'Sam Koepnick <[email protected]>'
|
py | b4076728f3aea57e2582d952573b108ec9802a7c | import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from loguru import logger
from torch import Tensor, nn
from torch.optim import Adam
from datetime import datetime as dt
sys.path.insert(0, f'{os.path.join(os.path.dirname(__file__), "../")}')
from model.model import AttentionOCR
from model.cnn import CNN, ResNetCNN
from torch.utils.data import DataLoader
from utils.dataset import get_dataloader
from typing import Dict, Union, List, Optional
from nltk.translate.bleu_score import sentence_bleu
loss = nn.NLLLoss()
def train_epoch(dl: DataLoader, model: nn.Module, optim, device: str) -> float:
model.train()
batches = tqdm(dl)
losses = []
for b in batches:
for k in b:
b[k] = b[k].to(device)
optim.zero_grad()
pred = model(b)
curr_loss = loss(pred, b['tokens'].squeeze())
curr_loss.backward()
optim.step()
losses.append(curr_loss.cpu().item())
batches.set_description(
f'Train epoch. Current CCE Loss: {losses[-1]}. ')
return np.mean(losses)
@torch.no_grad()
def validate_epoch(dl: DataLoader, model: nn.Module, device: str) -> Dict[str, float]:
model.eval()
batches = tqdm(dl)
losses = []
bleu_scores = []
for b in batches:
for k in b:
b[k] = b[k].to(device)
pred = model(b)
curr_loss = loss(pred, b['tokens'].squeeze()).cpu().item()
pred_tokens = torch.argmax(pred, 1).detach().cpu().numpy()
true_tokens = b['tokens'].squeeze().cpu().numpy()
bleu = sentence_bleu([true_tokens], pred_tokens, weights=(1,))
losses.append(curr_loss)
bleu_scores.append(bleu)
batches.set_description(
f'Validation epoch. Current CCE Loss: {losses[-1]}. Current BLEU: {bleu_scores[-1]}. ')
metrics = {
'bleu': np.mean(bleu_scores),
'loss': np.mean(losses)
}
return metrics
def fit_model(
train_path: str,
eval_path: str,
image_dir: str,
formulas_path: str,
vocab_path: str,
device: Union['cpu', 'cuda'] = 'cpu',
n_epochs: int = 12,
lr: float = 1e-4,
save_dir: Optional[str] = None,
cnn_type: Union[ResNetCNN, CNN] = ResNetCNN
) -> pd.DataFrame:
log_file = ''.join(
['train_', dt.now().strftime('%Y-%m-%dT%H:%M:%S'), '.log'])
log_path = os.path.join('./', 'logs', log_file)
if save_dir is None:
save_dir = os.path.join('./', 'params/')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logger.add(log_path)
logger.info('Loading train dataset')
train_dl, vocab = get_dataloader(data_path=train_path,
image_dir=image_dir,
formulas_path=formulas_path,
vocab_path=vocab_path)
logger.info('Loading validation dataset')
eval_dl, _ = get_dataloader(data_path=eval_path,
image_dir=image_dir,
formulas_path=formulas_path,
vocab_path=vocab_path)
logger.info('Loading model')
model = AttentionOCR(len(vocab), device, cnn_type=cnn_type)
optim = torch.optim.Adam(model.parameters(), lr=lr)
metrics = []
logger.info(f'Start fitting {n_epochs} epochs on {len(train_dl)} objects')
for epoch in range(1, n_epochs):
logger.info(f'Start {epoch} epoch of {n_epochs}')
train_loss = train_epoch(train_dl, model, optim, device)
logger.info(f'Train epoch {epoch}. Mean loss is {train_loss}')
eval_metrics = validate_epoch(eval_dl, model, device)
logger.info(
f'Validation epoch {epoch}. Mean loss is {eval_metrics["loss"]}')
logger.info(
f'Validation epoch {epoch}. Mean bleu is {eval_metrics["bleu"]}')
metrics.append(eval_metrics)
model_name = f'{round(eval_metrics["bleu"], 3)}_{dt.now().strftime("%m-%d")}'
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
logger.info(f'Model saved at {model_path}')
logger.info(f'End fitting on {n_epochs} epochs')
return pd.DataFrame(metrics)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--cnn', type=str, default='resnet')
parser.add_argument('--epochs', type=int, default=12)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--train_path', type=str,
default='./data/train_filter.lst')
parser.add_argument('--val_path', type=str,
default='./data/validate_filter.lst')
parser.add_argument('--image_dir', type=str,
default='./data/images_processed/')
parser.add_argument('--formulas_path', type=str,
default='./data/formulas_tokenized.lst')
parser.add_argument('--vocab_path', type=str,
default='./data/latex_vocab.txt')
parser.add_argument('--save', type=str, default=None)
parser.add_argument('--norm', dest='norm', action='store_true')
parser.add_argument('--no-norm', dest='norm', action='store_false')
parser.set_defaults(norm=False)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
if args.norm:
args.formulas_path = args.formulas_path + '.norm'
args.vocab_path = args.vocab_path + '.norm'
cnn_type = ResNetCNN if args.cnn == 'resnet' else CNN
metrics = fit_model(
args.train_path,
args.val_path,
args.image_dir,
args.formulas_path,
args.vocab_path,
args.device,
args.epochs,
args.lr,
args.save,
cnn_type
)
logger.info(metrics)
metrics.to_csv('train_metrics.csv')
|
py | b407672a35dfc90177bec587feb12ec0b535b8d9 | #!/usr/bin/env python3
from taiseilib.common import (
run_main,
in_dir,
meson_introspect,
add_common_args,
)
from pathlib import Path
import argparse
import json
import shlex
import subprocess
import shutil
import re
import sys
import json
def main(args):
parser = argparse.ArgumentParser(description='Regenerate a Meson build directory, attempting to preserve build options.', prog=args[0])
parser.add_argument('build_dir',
default=Path.cwd(),
type=Path,
nargs='?',
help='the build directory (defaults to CWD)',
)
parser.add_argument('dest_build_dir',
default=None,
type=Path,
nargs='?',
help='the destination directory (defaults to same as build_dir)',
)
parser.add_argument('--meson',
default=['meson'],
type=shlex.split,
help='override the Meson executable (useful for wrappers)',
)
parser.add_argument('meson_args',
default=[],
nargs='*',
help='additional arguments for Meson',
)
add_common_args(parser)
args = parser.parse_args(args[1:])
if args.dest_build_dir is None:
args.dest_build_dir = args.build_dir
with in_dir(args.build_dir):
try:
build_options = meson_introspect('--buildoptions')
except subprocess.SubprocessError:
print("Warning: meson introspect failed, retrieving options from saved_options.json. This may not be up to date.", file=sys.stderr)
with open('saved_options.json') as infile:
build_options = json.loads(infile.read())
regen_cmdline = args.meson + [
str(args.rootdir.resolve(strict=True)),
str(args.dest_build_dir.resolve(strict=False)),
]
meson_options = set(re.findall(r'\[--([\w-]+)\s+.*?\]', subprocess.check_output(args.meson + ['--help']).decode('utf8'), re.A))
def opt_str_value(opt, value):
if isinstance(value, bool):
# Meson <= 0.43.0 bug
return str(value).lower()
if opt == 'install_umask':
return '%04o' % int(value)
return str(value)
for opt in build_options:
name = opt['name']
value = opt_str_value(name, opt['value'])
if name in meson_options:
regen_cmdline.append('--{}={}'.format(name, value))
regen_cmdline += args.meson_args
args.dest_build_dir.mkdir(parents=True, exist_ok=True)
with in_dir(args.dest_build_dir):
obj = {
'command': regen_cmdline,
'build_options': build_options,
}
with Path('imported_options.json').open('w') as outfile:
json.dump(obj, outfile,
ensure_ascii=False,
indent=4,
sort_keys=True,
)
meson_dir = Path('meson-private')
meson_dir_bak = meson_dir.with_name(meson_dir.name + '.bak')
if meson_dir.is_dir():
shutil.rmtree(meson_dir_bak, ignore_errors=True)
meson_dir.rename(meson_dir_bak)
print('+', regen_cmdline)
subprocess.check_call(regen_cmdline)
for opt in build_options:
name = opt['name']
value = opt_str_value(name, opt['value'])
cmdline = args.meson + ['configure', '-D{}={}'.format(name, value)]
print('+', cmdline)
subprocess.call(cmdline)
print('')
print("Regeneration done. This process is not 100% reliable; you may want to check the output of 'meson configure'")
if __name__ == '__main__':
run_main(main)
|
py | b407674609feb712c0ec99b34731135e9debfaa8 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import os
import re
import pyqtgraph as pg
from functools import partial
import numpy
from collections import ChainMap
from itertools import product
from .browser import Browser
from .curves import ResultsCurve, Crosshairs, ResultsImage
from .inputs import BooleanInput, IntegerInput, ListInput, ScientificInput, StringInput
from .log import LogHandler
from .Qt import QtCore, QtGui
from ..experiment import parameters, Procedure
from ..experiment.results import Results
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class PlotFrame(QtGui.QFrame):
""" Combines a PyQtGraph Plot with Crosshairs. Refreshes
the plot based on the refresh_time, and allows the axes
to be changed on the fly, which updates the plotted data
"""
LABEL_STYLE = {'font-size': '10pt', 'font-family': 'Arial', 'color': '#000000'}
updated = QtCore.QSignal()
x_axis_changed = QtCore.QSignal(str)
y_axis_changed = QtCore.QSignal(str)
def __init__(self, x_axis=None, y_axis=None, refresh_time=0.2, check_status=True, parent=None):
super().__init__(parent)
self.refresh_time = refresh_time
self.check_status = check_status
self._setup_ui()
self.change_x_axis(x_axis)
self.change_y_axis(y_axis)
def _setup_ui(self):
self.setAutoFillBackground(False)
self.setStyleSheet("background: #fff")
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.setFrameShadow(QtGui.QFrame.Sunken)
self.setMidLineWidth(1)
vbox = QtGui.QVBoxLayout(self)
self.plot_widget = pg.PlotWidget(self, background='#ffffff')
self.coordinates = QtGui.QLabel(self)
self.coordinates.setMinimumSize(QtCore.QSize(0, 20))
self.coordinates.setStyleSheet("background: #fff")
self.coordinates.setText("")
self.coordinates.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
vbox.addWidget(self.plot_widget)
vbox.addWidget(self.coordinates)
self.setLayout(vbox)
self.plot = self.plot_widget.getPlotItem()
self.crosshairs = Crosshairs(self.plot,
pen=pg.mkPen(color='#AAAAAA', style=QtCore.Qt.DashLine))
self.crosshairs.coordinates.connect(self.update_coordinates)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_curves)
self.timer.timeout.connect(self.crosshairs.update)
self.timer.timeout.connect(self.updated)
self.timer.start(int(self.refresh_time * 1e3))
def update_coordinates(self, x, y):
self.coordinates.setText("(%g, %g)" % (x, y))
def update_curves(self):
for item in self.plot.items:
if isinstance(item, ResultsCurve):
if self.check_status:
if item.results.procedure.status == Procedure.RUNNING:
item.update()
else:
item.update()
def parse_axis(self, axis):
""" Returns the units of an axis by searching the string
"""
units_pattern = r"\((?P<units>\w+)\)"
try:
match = re.search(units_pattern, axis)
except TypeError:
match = None
if match:
if 'units' in match.groupdict():
label = re.sub(units_pattern, '', axis)
return label, match.groupdict()['units']
else:
return axis, None
def change_x_axis(self, axis):
for item in self.plot.items:
if isinstance(item, ResultsCurve):
item.x = axis
item.update()
label, units = self.parse_axis(axis)
self.plot.setLabel('bottom', label, units=units, **self.LABEL_STYLE)
self.x_axis = axis
self.x_axis_changed.emit(axis)
def change_y_axis(self, axis):
for item in self.plot.items:
if isinstance(item, ResultsCurve):
item.y = axis
item.update()
label, units = self.parse_axis(axis)
self.plot.setLabel('left', label, units=units, **self.LABEL_STYLE)
self.y_axis = axis
self.y_axis_changed.emit(axis)
class PlotWidget(QtGui.QWidget):
""" Extends the PlotFrame to allow different columns
of the data to be dynamically choosen
"""
def __init__(self, columns, x_axis=None, y_axis=None, refresh_time=0.2, check_status=True,
parent=None):
super().__init__(parent)
self.columns = columns
self.refresh_time = refresh_time
self.check_status = check_status
self._setup_ui()
self._layout()
if x_axis is not None:
self.columns_x.setCurrentIndex(self.columns_x.findText(x_axis))
self.plot_frame.change_x_axis(x_axis)
if y_axis is not None:
self.columns_y.setCurrentIndex(self.columns_y.findText(y_axis))
self.plot_frame.change_y_axis(y_axis)
def _setup_ui(self):
self.columns_x_label = QtGui.QLabel(self)
self.columns_x_label.setMaximumSize(QtCore.QSize(45, 16777215))
self.columns_x_label.setText('X Axis:')
self.columns_y_label = QtGui.QLabel(self)
self.columns_y_label.setMaximumSize(QtCore.QSize(45, 16777215))
self.columns_y_label.setText('Y Axis:')
self.columns_x = QtGui.QComboBox(self)
self.columns_y = QtGui.QComboBox(self)
for column in self.columns:
self.columns_x.addItem(column)
self.columns_y.addItem(column)
self.columns_x.activated.connect(self.update_x_column)
self.columns_y.activated.connect(self.update_y_column)
self.plot_frame = PlotFrame(
self.columns[0],
self.columns[1],
self.refresh_time,
self.check_status
)
self.updated = self.plot_frame.updated
self.plot = self.plot_frame.plot
self.columns_x.setCurrentIndex(0)
self.columns_y.setCurrentIndex(1)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(0)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.columns_x_label)
hbox.addWidget(self.columns_x)
hbox.addWidget(self.columns_y_label)
hbox.addWidget(self.columns_y)
vbox.addLayout(hbox)
vbox.addWidget(self.plot_frame)
self.setLayout(vbox)
def sizeHint(self):
return QtCore.QSize(300, 600)
def new_curve(self, results, color=pg.intColor(0), **kwargs):
if 'pen' not in kwargs:
kwargs['pen'] = pg.mkPen(color=color, width=2)
if 'antialias' not in kwargs:
kwargs['antialias'] = False
curve = ResultsCurve(results,
x=self.plot_frame.x_axis,
y=self.plot_frame.y_axis,
**kwargs
)
curve.setSymbol(None)
curve.setSymbolBrush(None)
return curve
def update_x_column(self, index):
axis = self.columns_x.itemText(index)
self.plot_frame.change_x_axis(axis)
def update_y_column(self, index):
axis = self.columns_y.itemText(index)
self.plot_frame.change_y_axis(axis)
class ImageFrame(QtGui.QFrame):
""" Combines a PyQtGraph Plot with Crosshairs. Refreshes
the plot based on the refresh_time, and allows the axes
to be changed on the fly, which updates the plotted data
"""
LABEL_STYLE = {'font-size': '10pt', 'font-family': 'Arial', 'color': '#000000'}
updated = QtCore.QSignal()
x_axis_changed = QtCore.QSignal(str)
y_axis_changed = QtCore.QSignal(str)
z_axis_changed = QtCore.QSignal(str)
def __init__(self, x_axis, y_axis, z_axis=None, refresh_time=0.2, check_status=True, parent=None):
super().__init__(parent)
self.refresh_time = refresh_time
self.check_status = check_status
self._setup_ui()
# set axis labels
for item in self.plot.items:
if isinstance(item, ResultsImage):
item.x = x_axis
item.y = y_axis
item.update_img()
xlabel, xunits = self.parse_axis(x_axis)
self.plot.setLabel('bottom', xlabel, units=xunits, **self.LABEL_STYLE)
self.x_axis = x_axis
self.x_axis_changed.emit(x_axis)
ylabel, yunits = self.parse_axis(y_axis)
self.plot.setLabel('left', ylabel, units=yunits, **self.LABEL_STYLE)
self.y_axis = y_axis
self.y_axis_changed.emit(y_axis)
self.change_z_axis(z_axis)
def _setup_ui(self):
self.setAutoFillBackground(False)
self.setStyleSheet("background: #fff")
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.setFrameShadow(QtGui.QFrame.Sunken)
self.setMidLineWidth(1)
vbox = QtGui.QVBoxLayout(self)
self.plot_widget = pg.PlotWidget(self, background='#ffffff')
self.coordinates = QtGui.QLabel(self)
self.coordinates.setMinimumSize(QtCore.QSize(0, 20))
self.coordinates.setStyleSheet("background: #fff")
self.coordinates.setText("")
self.coordinates.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
vbox.addWidget(self.plot_widget)
vbox.addWidget(self.coordinates)
self.setLayout(vbox)
self.plot = self.plot_widget.getPlotItem()
self.crosshairs = Crosshairs(self.plot,
pen=pg.mkPen(color='#AAAAAA', style=QtCore.Qt.DashLine))
self.crosshairs.coordinates.connect(self.update_coordinates)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_curves)
self.timer.timeout.connect(self.crosshairs.update)
self.timer.timeout.connect(self.updated)
self.timer.start(int(self.refresh_time * 1e3))
def update_coordinates(self, x, y):
self.coordinates.setText("(%g, %g)" % (x, y))
def update_curves(self):
for item in self.plot.items:
if isinstance(item, ResultsImage):
if self.check_status:
if item.results.procedure.status == Procedure.RUNNING:
item.update_img()
else:
item.update()
def parse_axis(self, axis):
""" Returns the units of an axis by searching the string
"""
units_pattern = r"\((?P<units>\w+)\)"
try:
match = re.search(units_pattern, axis)
except TypeError:
match = None
if match:
if 'units' in match.groupdict():
label = re.sub(units_pattern, '', axis)
return label, match.groupdict()['units']
else:
return axis, None
def change_z_axis(self, axis):
for item in self.plot.items:
if isinstance(item, ResultsImage):
item.z = axis
item.update_img()
label, units = self.parse_axis(axis)
if units is not None:
self.plot.setTitle(label + ' (%s)'%units)
else:
self.plot.setTitle(label)
self.z_axis = axis
self.z_axis_changed.emit(axis)
class ImageWidget(QtGui.QWidget):
""" Extends the PlotFrame to allow different columns
of the data to be dynamically choosen
"""
def __init__(self, columns, x_axis, y_axis, z_axis=None, refresh_time=0.2, check_status=True,
parent=None):
super().__init__(parent)
self.columns = columns
self.refresh_time = refresh_time
self.check_status = check_status
self.x_axis = x_axis
self.y_axis = y_axis
self._setup_ui()
self._layout()
if z_axis is not None:
self.columns_z.setCurrentIndex(self.columns_z.findText(z_axis))
self.image_frame.change_z_axis(z_axis)
def _setup_ui(self):
self.columns_z_label = QtGui.QLabel(self)
self.columns_z_label.setMaximumSize(QtCore.QSize(45, 16777215))
self.columns_z_label.setText('Z Axis:')
self.columns_z = QtGui.QComboBox(self)
for column in self.columns:
self.columns_z.addItem(column)
self.columns_z.activated.connect(self.update_z_column)
self.image_frame = ImageFrame(
self.x_axis,
self.y_axis,
self.columns[0],
self.refresh_time,
self.check_status
)
self.updated = self.image_frame.updated
self.plot = self.image_frame.plot
self.columns_z.setCurrentIndex(2)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(0)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.columns_z_label)
hbox.addWidget(self.columns_z)
vbox.addLayout(hbox)
vbox.addWidget(self.image_frame)
self.setLayout(vbox)
def sizeHint(self):
return QtCore.QSize(300, 600)
def new_image(self, results):
""" Creates a new image """
image = ResultsImage(results,
x=self.image_frame.x_axis,
y=self.image_frame.y_axis,
z=self.image_frame.z_axis
)
return image
def update_z_column(self, index):
axis = self.columns_z.itemText(index)
self.image_frame.change_z_axis(axis)
class BrowserWidget(QtGui.QWidget):
def __init__(self, *args, parent=None):
super().__init__(parent)
self.browser_args = args
self._setup_ui()
self._layout()
def _setup_ui(self):
self.browser = Browser(*self.browser_args, parent=self)
self.clear_button = QtGui.QPushButton('Clear all', self)
self.clear_button.setEnabled(False)
self.hide_button = QtGui.QPushButton('Hide all', self)
self.hide_button.setEnabled(False)
self.show_button = QtGui.QPushButton('Show all', self)
self.show_button.setEnabled(False)
self.open_button = QtGui.QPushButton('Open', self)
self.open_button.setEnabled(True)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(0)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.show_button)
hbox.addWidget(self.hide_button)
hbox.addWidget(self.clear_button)
hbox.addStretch()
hbox.addWidget(self.open_button)
vbox.addLayout(hbox)
vbox.addWidget(self.browser)
self.setLayout(vbox)
class InputsWidget(QtGui.QWidget):
# tuple of Input classes that do not need an external label
NO_LABEL_INPUTS = (BooleanInput,)
def __init__(self, procedure_class, inputs=(), parent=None):
super().__init__(parent)
self._procedure_class = procedure_class
self._procedure = procedure_class()
self._inputs = inputs
self._setup_ui()
self._layout()
def _setup_ui(self):
parameter_objects = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameter_objects[name]
if parameter.ui_class is not None:
element = parameter.ui_class(parameter)
elif isinstance(parameter, parameters.FloatParameter):
element = ScientificInput(parameter)
elif isinstance(parameter, parameters.IntegerParameter):
element = IntegerInput(parameter)
elif isinstance(parameter, parameters.BooleanParameter):
element = BooleanInput(parameter)
elif isinstance(parameter, parameters.ListParameter):
element = ListInput(parameter)
elif isinstance(parameter, parameters.Parameter):
element = StringInput(parameter)
setattr(self, name, element)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(6)
parameters = self._procedure.parameter_objects()
for name in self._inputs:
if not isinstance(getattr(self, name), self.NO_LABEL_INPUTS):
label = QtGui.QLabel(self)
label.setText("%s:" % parameters[name].name)
vbox.addWidget(label)
vbox.addWidget(getattr(self, name))
self.setLayout(vbox)
def set_parameters(self, parameter_objects):
for name in self._inputs:
element = getattr(self, name)
element.set_parameter(parameter_objects[name])
def get_procedure(self):
""" Returns the current procedure """
self._procedure = self._procedure_class()
parameter_values = {}
for name in self._inputs:
element = getattr(self, name)
parameter_values[name] = element.parameter.value
self._procedure.set_parameters(parameter_values)
return self._procedure
class LogWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self._setup_ui()
self._layout()
def _setup_ui(self):
self.view = QtGui.QPlainTextEdit()
self.view.setReadOnly(True)
self.handler = LogHandler()
self.handler.setFormatter(logging.Formatter(
fmt='%(asctime)s : %(message)s (%(levelname)s)',
datefmt='%m/%d/%Y %I:%M:%S %p'
))
self.handler.connect(self.view.appendPlainText)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(0)
vbox.addWidget(self.view)
self.setLayout(vbox)
class ResultsDialog(QtGui.QFileDialog):
def __init__(self, columns, x_axis=None, y_axis=None, parent=None):
super().__init__(parent)
self.columns = columns
self.x_axis, self.y_axis = x_axis, y_axis
self.setOption(QtGui.QFileDialog.DontUseNativeDialog, True)
self._setup_ui()
def _setup_ui(self):
preview_tab = QtGui.QTabWidget()
vbox = QtGui.QVBoxLayout()
param_vbox = QtGui.QVBoxLayout()
vbox_widget = QtGui.QWidget()
param_vbox_widget = QtGui.QWidget()
self.plot_widget = PlotWidget(self.columns, self.x_axis, self.y_axis, parent=self)
self.plot = self.plot_widget.plot
self.preview_param = QtGui.QTreeWidget()
param_header = QtGui.QTreeWidgetItem(["Name", "Value"])
self.preview_param.setHeaderItem(param_header)
self.preview_param.setColumnWidth(0, 150)
self.preview_param.setAlternatingRowColors(True)
vbox.addWidget(self.plot_widget)
param_vbox.addWidget(self.preview_param)
vbox_widget.setLayout(vbox)
param_vbox_widget.setLayout(param_vbox)
preview_tab.addTab(vbox_widget, "Plot Preview")
preview_tab.addTab(param_vbox_widget, "Run Parameters")
self.layout().addWidget(preview_tab, 0, 5, 4, 1)
self.layout().setColumnStretch(5, 1)
self.setMinimumSize(900, 500)
self.resize(900, 500)
self.setFileMode(QtGui.QFileDialog.ExistingFiles)
self.currentChanged.connect(self.update_plot)
def update_plot(self, filename):
self.plot.clear()
if not os.path.isdir(filename) and filename != '':
try:
results = Results.load(str(filename))
except ValueError:
return
except Exception as e:
raise e
curve = ResultsCurve(results,
x=self.plot_widget.plot_frame.x_axis,
y=self.plot_widget.plot_frame.y_axis,
pen=pg.mkPen(color=(255, 0, 0), width=1.75),
antialias=True
)
curve.update()
self.plot.addItem(curve)
self.preview_param.clear()
for key, param in results.procedure.parameter_objects().items():
new_item = QtGui.QTreeWidgetItem([param.name, str(param)])
self.preview_param.addTopLevelItem(new_item)
self.preview_param.sortItems(0, QtCore.Qt.AscendingOrder)
""" This defines a list of functions that can be used to generate a sequence. """
SAFE_FUNCTIONS = {
'range': range,
'sorted': sorted,
'list': list,
'arange': numpy.arange,
'linspace': numpy.linspace,
'arccos': numpy.arccos,
'arcsin': numpy.arcsin,
'arctan': numpy.arctan,
'arctan2': numpy.arctan2,
'ceil': numpy.ceil,
'cos': numpy.cos,
'cosh': numpy.cosh,
'degrees': numpy.degrees,
'e': numpy.e,
'exp': numpy.exp,
'fabs': numpy.fabs,
'floor': numpy.floor,
'fmod': numpy.fmod,
'frexp': numpy.frexp,
'hypot': numpy.hypot,
'ldexp': numpy.ldexp,
'log': numpy.log,
'log10': numpy.log10,
'modf': numpy.modf,
'pi': numpy.pi,
'power': numpy.power,
'radians': numpy.radians,
'sin': numpy.sin,
'sinh': numpy.sinh,
'sqrt': numpy.sqrt,
'tan': numpy.tan,
'tanh': numpy.tanh,
}
class SequenceEvaluationException(Exception):
"""Raised when the evaluation of a sequence string goes wrong."""
pass
class SequencerWidget(QtGui.QWidget):
"""
Widget that allows to generate a sequence of measurements with varying
parameters. Moreover, one can write a simple text file to easily load a
sequence.
Currently requires a queue function of the ManagedWindow to have a
"procedure" argument.
"""
MAXDEPTH = 10
def __init__(self, inputs=None, sequence_file=None, parent=None):
super().__init__(parent)
self._parent = parent
# if no explicit inputs are given, use the displayed parameters
if inputs is not None:
self._inputs = inputs
else:
self._inputs = self._parent.displays
self._get_properties()
self._setup_ui()
self._layout()
# Load the sequence file if supplied.
if sequence_file is not None:
self.load_sequence(fileName=sequence_file)
def _get_properties(self):
"""
Obtain the names of the input parameters.
"""
parameter_objects = self._parent.procedure_class().parameter_objects()
self.names = {key: parameter.name
for key, parameter
in parameter_objects.items()
if key in self._inputs}
self.names_inv = {name: key for key, name in self.names.items()}
def _setup_ui(self):
self.tree = QtGui.QTreeWidget(self)
self.tree.setHeaderLabels(["Level", "Parameter", "Sequence"])
width = self.tree.viewport().size().width()
self.tree.setColumnWidth(0, int(0.7 * width))
self.tree.setColumnWidth(1, int(0.9 * width))
self.tree.setColumnWidth(2, int(0.9 * width))
self.add_root_item_btn = QtGui.QPushButton("Add root item")
self.add_root_item_btn.clicked.connect(
partial(self._add_tree_item, level=0)
)
self.add_tree_item_btn = QtGui.QPushButton("Add item")
self.add_tree_item_btn.clicked.connect(self._add_tree_item)
self.remove_tree_item_btn = QtGui.QPushButton("Remove item")
self.remove_tree_item_btn.clicked.connect(self._remove_selected_tree_item)
self.load_seq_button = QtGui.QPushButton("Load sequence")
self.load_seq_button.clicked.connect(self.load_sequence)
self.load_seq_button.setToolTip("Load a sequence from a file.")
self.queue_button = QtGui.QPushButton("Queue sequence")
self.queue_button.clicked.connect(self.queue_sequence)
def _layout(self):
btn_box = QtGui.QHBoxLayout()
btn_box.addWidget(self.add_root_item_btn)
btn_box.addWidget(self.add_tree_item_btn)
btn_box.addWidget(self.remove_tree_item_btn)
btn_box_2 = QtGui.QHBoxLayout()
btn_box_2.addWidget(self.load_seq_button)
btn_box_2.addWidget(self.queue_button)
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(6)
vbox.addWidget(self.tree)
vbox.addLayout(btn_box)
vbox.addLayout(btn_box_2)
self.setLayout(vbox)
def _add_tree_item(self, *, level=None, parameter=None, sequence=None):
"""
Add an item to the sequence tree. An item will be added as a child
to the selected (existing) item, except when level is given.
:param level: An integer value determining the level at which an
item is added. If level is 0, a root item will be added.
:param parameter: If given, the parameter field is pre-filled
:param sequence: If given, the sequence field is pre-filled
"""
selected = self.tree.selectedItems()
if len(selected) >= 1 and level != 0:
parent = selected[0]
else:
parent = self.tree.invisibleRootItem()
if level is not None and level > 0:
p_depth = self._depth_of_child(parent)
while p_depth > level - 1:
parent = parent.parent()
p_depth = self._depth_of_child(parent)
comboBox = QtGui.QComboBox()
lineEdit = QtGui.QLineEdit()
comboBox.addItems(list(sorted(self.names_inv.keys())))
item = QtGui.QTreeWidgetItem(parent, [""])
depth = self._depth_of_child(item)
item.setText(0, "{:d}".format(depth))
self.tree.setItemWidget(item, 1, comboBox)
self.tree.setItemWidget(item, 2, lineEdit)
self.tree.expandAll()
for selected_item in selected:
selected_item.setSelected(False)
if parameter is not None:
idx = self.tree.itemWidget(item, 1).findText(parameter)
self.tree.itemWidget(item, 1).setCurrentIndex(idx)
if idx == -1:
log.error(
"Parameter '{}' not found while loading sequence".format(
parameter) + ", probably mistyped."
)
if sequence is not None:
self.tree.itemWidget(item, 2).setText(sequence)
item.setSelected(True)
def _remove_selected_tree_item(self):
"""
Remove the selected item (and any child items) from the sequence tree.
"""
selected = self.tree.selectedItems()
if len(selected) == 0:
return
item = selected[0]
parent = item.parent()
if parent is None:
parent = self.tree.invisibleRootItem()
parent.removeChild(item)
for selected_item in self.tree.selectedItems():
selected_item.setSelected(False)
parent.setSelected(True)
def queue_sequence(self):
"""
Obtain a list of parameters from the sequence tree, enter these into
procedures, and queue these procedures.
"""
self.queue_button.setEnabled(False)
try:
sequence = self._generate_sequence_from_tree()
except SequenceEvaluationException:
log.error("Evaluation of one of the sequence strings went wrong, no sequence queued.")
else:
log.info(
"Queuing %d measurements based on the entered sequences." % len(sequence)
)
for entry in sequence:
QtGui.QApplication.processEvents()
parameters = dict(ChainMap(*entry[::-1]))
procedure = self._parent.make_procedure()
procedure.set_parameters(parameters)
self._parent.queue(procedure=procedure)
finally:
self.queue_button.setEnabled(True)
def load_sequence(self, *, fileName=None):
"""
Load a sequence from a .txt file.
:param fileName: Filename (string) of the to-be-loaded file.
"""
if fileName is None:
fileName, _ = QtGui.QFileDialog.getOpenFileName(self, 'OpenFile')
if len(fileName) == 0:
return
content = []
with open(fileName, "r") as file:
content = file.readlines()
pattern = re.compile("([-]+) \"(.*?)\", \"(.*?)\"")
for line in content:
line = line.strip()
match = pattern.search(line)
if not match:
continue
level = len(match.group(1)) - 1
if level < 0:
continue
parameter = match.group(2)
sequence = match.group(3)
self._add_tree_item(
level=level,
parameter=parameter,
sequence=sequence,
)
def _generate_sequence_from_tree(self):
"""
Generate a list of parameters from the sequence tree.
"""
iterator = QtGui.QTreeWidgetItemIterator(self.tree)
sequences = []
current_sequence = [[] for i in range(self.MAXDEPTH)]
temp_sequence = [[] for i in range(self.MAXDEPTH)]
while iterator.value():
item = iterator.value()
depth = self._depth_of_child(item)
name = self.tree.itemWidget(item, 1).currentText()
parameter = self.names_inv[name]
values = self.eval_string(
self.tree.itemWidget(item, 2).text(),
name, depth,
)
try:
sequence_entry = [{parameter: value} for value in values]
except TypeError:
log.error(
"TypeError, likely no sequence for one of the parameters"
)
else:
current_sequence[depth].extend(sequence_entry)
iterator += 1
next_depth = self._depth_of_child(iterator.value())
for depth_idx in range(depth, next_depth, -1):
temp_sequence[depth_idx].extend(current_sequence[depth_idx])
if depth_idx != 0:
sequence_products = list(product(
current_sequence[depth_idx - 1],
temp_sequence[depth_idx]
))
for i in range(len(sequence_products)):
try:
element = sequence_products[i][1]
except IndexError:
log.error(
"IndexError, likely empty nested parameter"
)
else:
if isinstance(element, tuple):
sequence_products[i] = (
sequence_products[i][0], *element)
temp_sequence[depth_idx - 1].extend(sequence_products)
temp_sequence[depth_idx] = []
current_sequence[depth_idx] = []
current_sequence[depth_idx - 1] = []
if depth == next_depth:
temp_sequence[depth].extend(current_sequence[depth])
current_sequence[depth] = []
sequences = temp_sequence[0]
for idx in range(len(sequences)):
if not isinstance(sequences[idx], tuple):
sequences[idx] = (sequences[idx],)
return sequences
@staticmethod
def _depth_of_child(item):
"""
Determine the level / depth of a child item in the sequence tree.
"""
depth = -1
while item:
item = item.parent()
depth += 1
return depth
@staticmethod
def eval_string(string, name=None, depth=None):
"""
Evaluate the given string. The string is evaluated using a list of
pre-defined functions that are deemed safe to use, to prevent the
execution of malicious code. For this purpose, also any built-in
functions or global variables are not available.
:param string: String to be interpreted.
:param name: Name of the to-be-interpreted string, only used for
error messages.
:param depth: Depth of the to-be-interpreted string, only used
for error messages.
"""
evaluated_string = None
if len(string) > 0:
try:
evaluated_string = eval(
string, {"__builtins__": None}, SAFE_FUNCTIONS
)
except TypeError:
log.error("TypeError, likely a typo in one of the " +
"functions for parameter '{}', depth {}".format(
name, depth
))
raise SequenceEvaluationException()
except SyntaxError:
log.error("SyntaxError, likely unbalanced brackets " +
"for parameter '{}', depth {}".format(name, depth))
raise SequenceEvaluationException()
except ValueError:
log.error("ValueError, likely wrong function argument " +
"for parameter '{}', depth {}".format(name, depth))
raise SequenceEvaluationException()
else:
log.error("No sequence entered for " +
"for parameter '{}', depth {}".format(name, depth))
raise SequenceEvaluationException()
evaluated_string = numpy.array(evaluated_string)
return evaluated_string
class DirectoryLineEdit(QtGui.QLineEdit):
"""
Widget that allows to choose a directory path.
A completer is implemented for quick completion.
A browse button is available.
"""
def __init__(self, parent=None):
super().__init__(parent=parent)
completer = QtGui.QCompleter(self)
completer.setCompletionMode(QtGui.QCompleter.PopupCompletion)
model = QtGui.QDirModel(completer)
model.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Drives | QtCore.QDir.NoDotAndDotDot | QtCore.QDir.AllDirs)
completer.setModel(model)
self.setCompleter(completer)
browse_action = QtGui.QAction(self)
browse_action.setIcon(self.style().standardIcon(getattr(QtGui.QStyle, 'SP_DialogOpenButton')))
browse_action.triggered.connect(self.browse_triggered)
self.addAction(browse_action, QtGui.QLineEdit.TrailingPosition)
def browse_triggered(self):
path = QtGui.QFileDialog.getExistingDirectory(self, 'Directory', '/')
if path != '':
self.setText(path)
|
py | b407678d0daac5059608d37b529af9cad3c17cca | import factory
from factory import fuzzy
from core.models import Group
class GroupFactory(factory.DjangoModelFactory):
class Meta:
model = Group
name = fuzzy.FuzzyText(prefix="name-")
|
py | b4076946111e36e272761f821f107e8e9064b141 | """
Preprocess a raw json dataset into hdf5/json files for use in data_loader.py
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: a json file and an hdf5 file
The hdf5 file contains several fields:
/labels is (M,max_length) uint32 array of encoded labels, zero padded
/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the
first and last indices (in range 1..M) of labels for each image
/label_length stores the length of the sequence for each of the M sequences
The json file has a dict that contains:
- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed
- an 'images' field that is a list holding auxiliary information for each image,
such as in particular the 'split' it was assigned to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import torch
import torchvision.models as models
import skimage.io
from PIL import Image
import jieba
import copy
import random
def build_vocab(imgs, params):
count_thr = params['word_count_threshold']
seq_length = {}
# count up the number of words
counts = {}
for img in imgs:
sentence_list = img['sentence_list']
# sentence vocal 分词
sentence_token = []
for sentence in sentence_list:
seq_list = list(jieba.cut(sentence.strip(), cut_all=False))
nw = len(seq_list)
seq_length[nw] = seq_length.get(nw, 0 ) + 1
sentence_token.append(seq_list)
for s_l in seq_list:
counts[s_l] = counts.get(s_l,0) + 1
img['sentence_token'] = sentence_token
cw = sorted([(count,w) for w,count in counts.items()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str,cw[:20])))
# print some stats
total_words = sum(counts.values())
print('total words:', total_words)
bad_words = [w for w,n in counts.items() if n <= count_thr]
vocab = [w for w,n in counts.items() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts)))
print('number of words in vocab would be %d' % (len(vocab), ))
print('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words))
# lets look at the distribution of lengths as well
max_len = max(seq_length.keys())
print('max length sentence in raw data: ', max_len)
print('sentence length distribution (count, number of words):')
sum_len = sum(seq_length.values())
for i in range(max_len+1):
print('%2d: %10d %f%%' % (i, seq_length.get(i,0), seq_length.get(i,0)*100.0/sum_len))
# lets now produce the final annotations
if bad_count > 0:
# additional special UNK token we will use below to map infrequent words to
print('inserting the special UNK token')
vocab.append('UNK')
for img in imgs:
img['final_captions'] = []
for sent in img['sentence_token']:
caption = [w if counts.get(w,0) > count_thr else 'UNK' for w in sent]
img['final_captions'].append(caption)
return vocab
def encode_captions(imgs, params, wtoi):
"""
encode all captions into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
"""
max_length = params['max_length']
N = len(imgs)
M = sum(len(img['final_captions']) for img in imgs) # total number of captions
print('total caption length {}'.format(M))
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for i,img in enumerate(imgs):
n = len(img['final_captions'])
assert n > 0, 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for j,s in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s)) # record the length of this sequence
caption_counter += 1
for k,w in enumerate(s):
if k < max_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
L = np.concatenate(label_arrays, axis=0) # put all the labels together
assert L.shape[0] == M, 'lengths don\'t match? that\'s weird'
assert np.all(label_length > 0), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return L, label_start_ix, label_end_ix, label_length
def main(params):
image_infos = json.load(open(params['input_json'], 'r'))
seed(123) # make reproducible
# create the vocab
vocab = build_vocab(image_infos,params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
# encode captions in large arrays, ready to ship to hdf5 file
L, label_start_ix, label_end_ix, label_length = encode_captions(image_infos, params, wtoi)
# create output h5 file
N = len(image_infos)
'''
f_lb = h5py.File(params['output_h5']+'_label.h5', "w")
f_lb.create_dataset("labels", dtype='uint32', data=L)
f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix)
f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix)
f_lb.create_dataset("label_length", dtype='uint32', data=label_length)
f_lb.close()
'''
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['images'] = []
for i,img in enumerate(image_infos):
print(img.keys())
print(img)
break
jimg = {}
jimg['split'] = img['split']
if 'filename' in img: jimg['file_path'] = os.path.join(img.get('filepath', ''), img['filename']) # copy it over, might need
if 'cocoid' in img:
jimg['id'] = img['cocoid'] # copy over & mantain an id, if present (e.g. coco ids, useful)
elif 'imgid' in img:
jimg['id'] = img['imgid']
if params['images_root'] != '':
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
jimg['width'], jimg['height'] = _img.size
out['images'].append(jimg)
'''
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--output_json', default='data.json', help='output json file')
parser.add_argument('--output_h5', default='data', help='output h5 file')
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
# options
parser.add_argument('--max_length', default=28, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=5, type=int, help='only words that occur more than this number of times will be put in vocab')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
#print(json.dumps(params, indent = 2))
main(params)
|
py | b407697d7dab9dded7a0fdedc269798e297e0353 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.dft import dks
class KnownValues(unittest.TestCase):
def test_dks_lda(self):
mol = gto.Mole()
mol.atom = [['Ne',(0.,0.,0.)]]
mol.basis = 'uncsto3g'
mol.verbose = 7
mol.output = '/dev/null'
mol.build()
mf = dks.DKS(mol)
mf.xc = 'lda,'
eks4 = mf.kernel()
self.assertAlmostEqual(eks4, -126.041808355268, 9)
mol.stdout.close()
def test_x2c_uks_lda(self):
mol = gto.Mole()
mol.atom = [['Ne',(0.,0.,0.)]]
mol.basis = 'uncsto3g'
mol.verbose = 7
mol.output = '/dev/null'
mol.build()
mf = dks.DKS(mol).x2c()
mf.xc = 'lda,'
eks4 = mf.kernel()
self.assertAlmostEqual(eks4, -126.03378903205831, 9)
mol.stdout.close()
if __name__ == "__main__":
print("Test DKS")
unittest.main()
|
py | b4076c0f4bae5a3a8f7e0e89e3559bafc35a8b25 | import os
import boto3
import pytest
from moto import mock_s3
import pygemstones.io.file as f
import pygemstones.vendor.aws as a
# -----------------------------------------------------------------------------
@pytest.fixture(scope="function")
def aws_credentials():
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
# -----------------------------------------------------------------------------
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
# -----------------------------------------------------------------------------
@mock_s3
def test_create_key(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists == False
a.s3_create_key(s3, bucket_name, key_name)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists
# -----------------------------------------------------------------------------
@mock_s3
def test_create_key_invalid_bucket(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists == False
with pytest.raises(SystemExit) as info:
a.s3_create_key(s3, "invalid-bucket", key_name)
assert info.value.args[0] == 10
assert "bucket does not exist" in info.value.args[1]
# -----------------------------------------------------------------------------
@mock_s3
def test_delete_key(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
s3.put_object(Bucket=bucket_name, Key=key_name, Body=b"test")
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists
a.s3_delete_key(s3, bucket_name, key_name)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists == False
# -----------------------------------------------------------------------------
@mock_s3
def test_delete_key_invalid_bucket(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
s3.put_object(Bucket=bucket_name, Key=key_name, Body=b"test")
with pytest.raises(SystemExit) as info:
a.s3_delete_key(s3, "invalid-bucket", key_name)
assert info.value.args[0] == 10
assert "bucket does not exist" in info.value.args[1]
# -----------------------------------------------------------------------------
@mock_s3
def test_key_exists(s3):
bukcet_name = "my-bucket"
s3.create_bucket(Bucket=bukcet_name)
exists = a.s3_key_exists(s3, bukcet_name, "path1/path2")
assert exists == False
s3.put_object(Bucket=bukcet_name, Key="path1/path2")
exists = a.s3_key_exists(s3, bukcet_name, "path1/path2")
assert exists
# -----------------------------------------------------------------------------
@mock_s3
def test_create_path(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists == False
a.s3_create_path(s3, bucket_name, key_name)
exists = a.s3_path_exists(s3, bucket_name, key_name)
assert exists
# -----------------------------------------------------------------------------
@mock_s3
def test_create_path_invalid_bucket(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists == False
with pytest.raises(SystemExit) as info:
a.s3_create_path(s3, "invalid-bucket", key_name)
assert info.value.args[0] == 10
assert "bucket does not exist" in info.value.args[1]
# -----------------------------------------------------------------------------
@mock_s3
def test_delete_path(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
a.s3_create_path(s3, bucket_name, key_name)
exists = a.s3_path_exists(s3, bucket_name, key_name)
assert exists
a.s3_delete_path(s3, bucket_name, key_name)
exists = a.s3_path_exists(s3, bucket_name, key_name)
assert exists == False
# -----------------------------------------------------------------------------
@mock_s3
def test_delete_path_invalid_bucket(s3):
bucket_name = "my-bucket"
key_name = "path1/path2"
s3.create_bucket(Bucket=bucket_name)
s3.put_object(Bucket=bucket_name, Key=key_name, Body=b"test")
with pytest.raises(SystemExit) as info:
a.s3_delete_path(s3, "invalid-bucket", key_name)
assert info.value.args[0] == 10
assert "bucket does not exist" in info.value.args[1]
# -----------------------------------------------------------------------------
@mock_s3
def test_path_exists(s3):
bukcet_name = "my-bucket"
s3.create_bucket(Bucket=bukcet_name)
exists = a.s3_path_exists(s3, bukcet_name, "path1/path2")
assert exists == False
s3.put_object(Bucket=bukcet_name, Key="path1/path2/")
exists = a.s3_path_exists(s3, bukcet_name, "path1/path2")
assert exists
# -----------------------------------------------------------------------------
@mock_s3
def test_upload(s3, tmp_path):
bucket_name = "my-bucket"
key_name = "path1/path2"
target_path = os.path.join(tmp_path, "new-dir")
file_path = os.path.join(target_path, "file1.txt")
f.set_file_content(file_path, "test")
s3.create_bucket(Bucket=bucket_name)
a.s3_upload(file_path, bucket_name, key_name, force=False)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists
# -----------------------------------------------------------------------------
@mock_s3
def test_upload_invalid_file(s3, tmp_path):
bucket_name = "my-bucket"
key_name = "path1/path2"
target_path = os.path.join(tmp_path, "new-dir")
file_path = os.path.join(target_path, "file.txt")
s3.create_bucket(Bucket=bucket_name)
with pytest.raises(SystemExit) as info:
a.s3_upload(file_path, bucket_name, key_name, force=False)
assert info.value.args[0] == 10
assert "not exists" in info.value.args[1]
# -----------------------------------------------------------------------------
@mock_s3
def test_upload_already_existing_key(s3, tmp_path):
bucket_name = "my-bucket"
key_name = "path1/path2"
target_path = os.path.join(tmp_path, "new-dir")
file_path = os.path.join(target_path, "file1.txt")
f.set_file_content(file_path, "test")
s3.create_bucket(Bucket=bucket_name)
a.s3_upload(file_path, bucket_name, key_name, force=False)
with pytest.raises(SystemExit) as info:
a.s3_upload(file_path, bucket_name, key_name, force=False)
assert info.value.args[0] == 10
assert "already exists" in info.value.args[1]
# -----------------------------------------------------------------------------
@mock_s3
def test_upload_force_replace(s3, tmp_path):
bucket_name = "my-bucket"
key_name = "path1/path2"
target_path = os.path.join(tmp_path, "new-dir")
file_path = os.path.join(target_path, "file1.txt")
f.set_file_content(file_path, "test")
s3.create_bucket(Bucket=bucket_name)
a.s3_upload(file_path, bucket_name, key_name, force=False)
a.s3_upload(file_path, bucket_name, key_name, force=True)
exists = a.s3_key_exists(s3, bucket_name, key_name)
assert exists
|
py | b4076c8a3522d172ae642639042dd60e98c90fae | #!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from basecls.configs import ResMLPConfig
_cfg = dict(
model=dict(
name="resmlp_s36",
),
)
class Cfg(ResMLPConfig):
def __init__(self, values_or_file=None, **kwargs):
super().__init__(_cfg)
self.merge(values_or_file, **kwargs)
|
py | b4076d994b1ee1f6d360fd4a1433e0e7ea105704 | from hypernode_vagrant_runner.settings import HYPERNODE_VAGRANT_DEFAULT_PHP_VERSION
from hypernode_vagrant_runner.vagrant import create_hypernode_vagrant
from tests.testcase import TestCase
class TestCreateHypernodeVagrant(TestCase):
def setUp(self):
self.try_sudo = self.set_up_patch(
'hypernode_vagrant_runner.vagrant.set_up.try_sudo'
)
self.ensure_directory_for_checkout = self.set_up_patch(
'hypernode_vagrant_runner.vagrant.set_up.ensure_directory_for_checkout'
)
self.ensure_hypernode_vagrant_checkout = self.set_up_patch(
'hypernode_vagrant_runner.vagrant.set_up.ensure_hypernode_vagrant_checkout'
)
self.ensure_required_plugins_are_installed = self.set_up_patch(
'hypernode_vagrant_runner.vagrant.set_up.ensure_required_plugins_are_installed'
)
self.start_hypernode_vagrant = self.set_up_patch(
'hypernode_vagrant_runner.vagrant.set_up.start_hypernode_vagrant'
)
def test_create_hypernode_vagrant_tries_sudo(self):
create_hypernode_vagrant()
self.try_sudo.assert_called_once_with()
def test_create_hypernode_vagrant_ensures_directory_for_checkout(self):
create_hypernode_vagrant()
self.ensure_directory_for_checkout.assert_called_once_with(
directory=None
)
def test_create_hypernode_vagrant_uses_specified_pre_existing_directory_if_specified(self):
create_hypernode_vagrant(directory='/tmp/some/pre/existing/directory')
self.ensure_directory_for_checkout.assert_called_once_with(
directory='/tmp/some/pre/existing/directory'
)
def test_create_hypernode_vagrant_ensures_hypernode_vagrant_checkout(self):
create_hypernode_vagrant()
self.ensure_hypernode_vagrant_checkout.assert_called_once_with(
directory=self.ensure_directory_for_checkout.return_value
)
def test_create_hypernode_vagrant_ensures_required_plugins_are_installed(self):
create_hypernode_vagrant()
self.ensure_required_plugins_are_installed.assert_called_once_with()
def test_create_hypernode_vagrant_starts_hypernode_vagrant_in_ensured_path(self):
create_hypernode_vagrant()
self.start_hypernode_vagrant.assert_called_once_with(
self.ensure_directory_for_checkout.return_value,
php_version=HYPERNODE_VAGRANT_DEFAULT_PHP_VERSION
)
def test_create_hypernode_vagrant_starts_hypernode_vagrant_with_specified_php_version(self):
create_hypernode_vagrant(php_version='5.5')
self.start_hypernode_vagrant.assert_called_once_with(
self.ensure_directory_for_checkout.return_value,
php_version='5.5'
)
def test_create_hypernode_vagrant_returns_ensured_directory(self):
ret = create_hypernode_vagrant()
self.assertEqual(ret, self.ensure_directory_for_checkout.return_value)
|
py | b4076dc5c8079fafbfd0917a127b1d425f7273db | import unittest
from mpfmonitor.core.playfield import *
from unittest.mock import MagicMock
class TestablePfWidgetNonDrawn(PfWidget):
def __init__(self, mpfmon_mock=None):
if mpfmon_mock is not None:
self.mpfmon = mpfmon_mock
"""
__init__ of PfWidget:
def __init__(self, mpfmon, widget, device_type, device_name, x, y,
size=None, rotation=0, shape=Shape.DEFAULT, save=True):
super().__init__()
self.widget = widget
self.mpfmon = mpfmon
self.name = device_name
self.move_in_progress = True
self.device_type = device_type
self.set_size(size=size)
self.shape = shape
self.angle = rotation
self.setToolTip('{}: {}'.format(self.device_type, self.name))
self.setAcceptedMouseButtons(Qt.LeftButton | Qt.RightButton)
self.setPos(x, y)
self.update_pos(save)
self.click_start = 0
self.release_switch = False
self.log = logging.getLogger('Core')
old_widget_exists = widget.set_change_callback(self.notify)
if old_widget_exists:
self.log.debug("Previous widget exists.")
old_widget_exists(destroy=True)
"""
class TestPfWidgetParameters(unittest.TestCase):
def setUp(self):
self.widget = TestablePfWidgetNonDrawn()
def test_shape_set_valid(self):
shape_to_be_set = Shape.TRIANGLE
self.widget.set_shape(shape_to_be_set)
self.assertEqual(self.widget.shape, shape_to_be_set)
def test_shape_set_invalid(self):
widget = TestablePfWidgetNonDrawn()
shape_to_be_set = "Not_A_Shape"
self.widget.set_shape(shape_to_be_set)
self.assertEqual(self.widget.shape, Shape.DEFAULT)
def test_rotation_set_valid(self):
rotation_to_be_set = 42
self.widget.set_rotation(rotation_to_be_set)
self.assertEqual(self.widget.angle, rotation_to_be_set)
def test_rotation_set_invalid(self):
rotation_to_be_set = 451
self.widget.set_rotation(rotation_to_be_set)
expected_angle = rotation_to_be_set % 360
self.assertEqual(self.widget.angle, expected_angle)
def test_size_set_default(self):
self.widget.mpfmon = MagicMock()
default_size = 0.07
scene_width = 1.00
self.widget.mpfmon.pf_device_size = default_size
self.widget.mpfmon.scene.width.return_value = scene_width
self.widget.set_size()
self.assertEqual(self.widget.size, default_size)
self.assertEqual(self.widget.device_size, default_size * scene_width)
def test_size_set_valid(self):
self.widget.mpfmon = MagicMock()
scene_width = 1.00
self.widget.mpfmon.scene.width.return_value = scene_width
size_to_be_set = 0.07
self.widget.set_size(size=size_to_be_set)
self.assertEqual(self.widget.size, size_to_be_set)
self.assertEqual(self.widget.device_size, size_to_be_set * scene_width)
class TestPfWidgetResizeToDefault(unittest.TestCase):
def setUp(self):
self.mock_mpfmon = MagicMock()
self.widget = TestablePfWidgetNonDrawn(mpfmon_mock=self.mock_mpfmon)
self.widget.device_type = MagicMock()
self.widget.name = MagicMock()
self.widget.set_size = MagicMock()
self.widget.update_pos = MagicMock()
self.config = MagicMock()
self.mock_mpfmon.config[self.widget.device_type].get.return_value = self.config
self.config.get.return_value = None
"""
def resize_to_default(self, force=False):
device_config = self.mpfmon.config[self.device_type].get(self.name, None)
if force:
device_config.pop('size', None) # Delete saved size info, None is incase key doesn't exist (popped twice)
device_size = device_config.get('size', None)
if device_size is not None:
# Do not change the size if it's already set
pass
elif device_config is not None:
self.set_size()
self.update_pos(save=False) # Do not save at this point. Let it be saved elsewhere. This reduces writes."""
def test_size_resize_to_default(self):
self.widget.resize_to_default()
self.mock_mpfmon.config[self.widget.device_type].get.assert_called_once_with(self.widget.name, None)
self.widget.set_size.assert_called_once()
self.widget.update_pos.assert_called_once_with(save=False)
def test_size_resize_to_default_with_force(self):
self.widget.resize_to_default(force=True)
self.mock_mpfmon.config[self.widget.device_type].get.assert_called_once_with(self.widget.name, None)
self.widget.set_size.assert_called_once()
self.widget.update_pos.assert_called_once_with(save=False)
self.config.pop.assert_called_once_with('size', None)
class TestPfWidgetColorFuncs(unittest.TestCase):
def setUp(self):
self.widget = TestablePfWidgetNonDrawn()
def test_color_gamma(self):
color_in = [0, 128, 255]
expected_color_out = [0, 203, 255] # Manually calculated 128 -> 203
color_out = self.widget.color_gamma(color=color_in)
self.assertEqual(color_out, expected_color_out, 'Gamma does not match expected value')
def test_colored_brush_light(self):
device_type = 'light'
mock_widget = MagicMock()
color_in = [0, 128, 255]
expected_q_brush_out = QBrush(QColor(*color_in), Qt.SolidPattern)
self.widget.color_gamma = MagicMock()
self.widget.color_gamma.return_value = color_in
q_brush_out = self.widget.set_colored_brush(device_type=device_type, widget=mock_widget)
self.widget.color_gamma.assert_called_once()
self.assertEqual(q_brush_out, expected_q_brush_out, 'Brush is not returning correct value')
def test_colored_brush_switch_off(self):
mock_widget = MagicMock()
mock_widget.data.return_value = {'state': False}
device_type = 'switch'
color_in = [0, 0, 0]
expected_q_brush_out = QBrush(QColor(*color_in), Qt.SolidPattern)
q_brush_out = self.widget.set_colored_brush(device_type=device_type, widget=mock_widget)
self.assertEqual(q_brush_out, expected_q_brush_out, 'Brush is not returning correct value')
def test_colored_brush_switch_on(self):
mock_widget = MagicMock()
mock_widget.data.return_value = {'state': True}
device_type = 'switch'
color_in = [0, 255, 0]
expected_q_brush_out = QBrush(QColor(*color_in), Qt.SolidPattern)
q_brush_out = self.widget.set_colored_brush(device_type=device_type, widget=mock_widget)
self.assertEqual(q_brush_out, expected_q_brush_out, 'Brush is not returning correct value')
class TestPfWidgetGetAndDestroy(unittest.TestCase):
def setUp(self):
self.widget = TestablePfWidgetNonDrawn(mpfmon_mock=MagicMock())
def test_delete_from_config(self):
device_type = MagicMock()
self.widget.device_type = device_type
name = "delete_test"
self.widget.name = name
self.widget.delete_from_config()
self.widget.mpfmon.config[device_type].pop.assert_called_once_with(name)
self.widget.mpfmon.save_config.assert_called_once()
def test_send_to_inspector_window(self):
self.widget.send_to_inspector_window()
self.widget.mpfmon.inspector_window_last_selected_cb.assert_called_once_with(pf_widget=self.widget)
|
py | b4076e94c329379ab4803f49931eb776074816b9 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('format01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with unused formats."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet('Data Sheet')
worksheet3 = workbook.add_worksheet()
unused1 = workbook.add_format({'bold': 1})
bold = workbook.add_format({'bold': 1})
unused2 = workbook.add_format({'bold': 1})
unused3 = workbook.add_format({'italic': 1})
worksheet1.write('A1', 'Foo')
worksheet1.write('A2', 123)
worksheet3.write('B2', 'Foo')
worksheet3.write('B3', 'Bar', bold)
worksheet3.write('C4', 234)
workbook.close()
self.assertExcelEqual()
|
py | b407706d36a675847d66c6228a8bb378915be9f9 | #from builtins import range
import dd
import numpy as np
from getsig import getsig
def ddelmsync(shotnr, diag, signal, experiment='AUGD', edition=0,
tBegin=0.0, tEnd=10.0, preft=0.001, suft=0.004,
felm_min=0, felm_max=1000,
elm_exper="AUGD", elm_edition=0):
"""Gets a selected signal or signal-group and syncs it to the ELMs in the desired time interval
Parameters
------------
shotnr: int
Number of the shot to analyse.
diag: str
Name of the shotfile containing the data.
signal: str
Name of the signal in 'diag' to analyse.
experiment: str
Naame of the experiment (username) containing the shotfile.
edition: int
Edition of the shotfile.
tBegin: float
Initial time of analysis.
tEnd: float
Final time for analysis.
preft: float
'Prefix time' to consider before ELM onset.
suft: float
'Suffix time' to consider after ELM ends.
felm_min: float
Minimum ELM frequency to include in analysis.
Default value 0Hz considers all ELMs.
felm_max : float
Maximum ELM frequency to include in analysis.
Default value 1000Hz considers all ELMs.
elm_exper: str
User of the experiment. Default is public shotfile 'AUGD'.
elm_edition: int
Edition of the ELM shotfile. Default is latest edition, 0.
Returns
------------
synctime: np.array(float)
Array of times resetted to the closest ELM.
syncsig: np.array(float)
1D or 2D array of data ordered according to the sychronized times.
"""
#################################
##Flag to check if data came from a signal group
sgrp = False
###### Gets ELM data ############
ELM = dd.shotfile("ELM", shotnr, experiment=elm_exper, edition=elm_edition)
elmd = ELM("t_endELM", tBegin=tBegin, tEnd=tEnd)
freq_ELM = ELM("f_ELM", tBegin=tBegin, tEnd=tEnd)
t_endELM = elmd.data
t_begELM = elmd.time
ELM.close()
################################
################################
###### Gets the signal or signal-group
signal = getsig(shotnr, diag, signal, tBegin=tBegin, tEnd=tEnd, edition=edition)
#################### Syncs the timebase to the ELM timebase
###########################
###### Signal group
###########################
syncsig = []#np.zeros_like(signal.data)
synctime = []#np.zeros_like(signal.time)
for elm in range(t_begELM.size):
#Only accept ELMs at the chosen frequency window
if (freq_ELM.data[elm]>=felm_min)&(freq_ELM.data[elm]<=felm_max):
t1, t2 =t_begELM[elm]-preft, t_endELM[elm]+suft
#Re-adjust ELM times so no overlap between consecutive ELMs occurs
if (elm >=1 ) :
tendprev = t_endELM[elm-1]
t1 = np.max([t1,tendprev])
if (elm<t_begELM.size-1):
tstartnext = t_begELM[elm+1]
t2 = np.min([t2,tstartnext])
elmind = np.where((signal.time >= t1) & (signal.time <=t2))
synctime.append(signal.time[elmind]-t_begELM[elm])
#Distinguish between 1D (signal) and 2D array (Signal group)
if len(signal.data.shape)==1:
syncsig.append(signal.data[elmind])
elif len(signal.data.shape)==2:
syncsig.append(signal.data[elmind,:])
else:
raise Exception('Array format not supported!')
else:#Space left open for possible analysis
a=0
#Finally, return is again dependent on array dimensions
if len(signal.data.shape)==1:
synctime_return = np.concatenate(synctime)
syncsig_return = np.concatenate(syncsig)
if len(signal.data.shape)==2:
synctime_return = np.concatenate(synctime)
syncsig_return = np.concatenate(syncsig, axis=1)[0,:,:]
return synctime_return, syncsig_return
|
py | b40770bfcfe908ffcf35b19b34d00a2365d1298a | import numpy as np
### BAO "2014" data (used in Planck 2015 as ext. data)
def get_loglike(class_input, likes_input, class_run):
lnl = 0.
rs = class_run.rs_drag()
# 6DF from 1106.3366
z, data, error = 0.106, 0.327, 0.015
da = class_run.angular_distance(z)
dr = z / class_run.Hubble(z)
dv = (da**2. * (1 + z)**2. * dr)**(1. / 3.)
theo = rs / dv
lnl += -0.5 * (theo - data)**2. / error**2.
# BOSS LOWZ & CMASS DR10&11 from 1312.4877
z, data, error = 0.32, 8.47, 0.17
da = class_run.angular_distance(z)
dr = z / class_run.Hubble(z)
dv = (da**2. * (1 + z)**2. * dr)**(1. / 3.)
theo = dv / rs
lnl += -0.5 * (theo - data)**2. / error**2.
z, data, error = 0.57, 13.77, 0.13
da = class_run.angular_distance(z)
dr = z / class_run.Hubble(z)
dv = (da**2. * (1 + z)**2. * dr)**(1. / 3.)
theo = dv / rs
lnl += -0.5 * (theo - data)**2. / error**2.
# SDSS DR7 MGS from 1409.3242
z, data, error = 0.15, 4.47, 0.16
da = class_run.angular_distance(z)
dr = z / class_run.Hubble(z)
dv = (da**2. * (1 + z)**2. * dr)**(1. / 3.)
theo = dv / rs
lnl += -0.5 * (theo - data)**2. / error**2.
# Return log(like)
return lnl
|
py | b40770d07a6f99b1b60cecd99e0a3666c9267765 | # coding: utf-8
from __future__ import unicode_literals
from .brightcove import BrightcoveNewIE
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
js_to_json,
smuggle_url,
try_get,
)
class NoovoIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?noovo\.ca/videos/(?P<id>[^/]+/[^/?#&]+)'
_TESTS = [{
# clip
'url': 'http://noovo.ca/videos/rpm-plus/chrysler-imperial',
'info_dict': {
'id': '5386045029001',
'ext': 'mp4',
'title': 'Chrysler Imperial',
'description': 'md5:de3c898d1eb810f3e6243e08c8b4a056',
'timestamp': 1491399228,
'upload_date': '20170405',
'uploader_id': '618566855001',
'series': 'RPM+',
},
'params': {
'skip_download': True,
},
}, {
# episode
'url': 'http://noovo.ca/videos/l-amour-est-dans-le-pre/episode-13-8',
'info_dict': {
'id': '5395865725001',
'title': 'Épisode 13 : Les retrouvailles',
'description': 'md5:888c3330f0c1b4476c5bc99a1c040473',
'ext': 'mp4',
'timestamp': 1492019320,
'upload_date': '20170412',
'uploader_id': '618566855001',
'series': "L'amour est dans le pré",
'season_number': 5,
'episode': 'Épisode 13',
'episode_number': 13,
},
'params': {
'skip_download': True,
},
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/618566855001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
brightcove_id = self._search_regex(
r'data-video-id=["\'](\d+)', webpage, 'brightcove id')
data = self._parse_json(
self._search_regex(
r'(?s)dataLayer\.push\(\s*({.+?})\s*\);', webpage, 'data',
default='{}'),
video_id, transform_source=js_to_json, fatal=False)
title = try_get(
data, lambda x: x['video']['nom'],
compat_str) or self._html_search_meta(
'dcterms.Title', webpage, 'title', fatal=True)
description = self._html_search_meta(
('dcterms.Description', 'description'), webpage, 'description')
series = try_get(
data, lambda x: x['emission']['nom']) or self._search_regex(
r'<div[^>]+class="banner-card__subtitle h4"[^>]*>([^<]+)',
webpage, 'series', default=None)
season_el = try_get(data, lambda x: x['emission']['saison'], dict) or {}
season = try_get(season_el, lambda x: x['nom'], compat_str)
season_number = int_or_none(try_get(season_el, lambda x: x['numero']))
episode_el = try_get(season_el, lambda x: x['episode'], dict) or {}
episode = try_get(episode_el, lambda x: x['nom'], compat_str)
episode_number = int_or_none(try_get(episode_el, lambda x: x['numero']))
return {
'_type': 'url_transparent',
'ie_key': BrightcoveNewIE.ie_key(),
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': ['CA']}),
'id': brightcove_id,
'title': title,
'description': description,
'series': series,
'season': season,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
}
|
py | b4077170286182ba4375074d7d1bac14fdec8a60 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import sys
import time
import uuid
from cloudprovider import AbstractInstanceProvider, LIMIT_EXCEEDED_ERROR_MASSAGE, LIMIT_EXCEEDED_EXIT_CODE
from random import randint
from time import sleep
from googleapiclient import discovery
from pipeline.autoscaling import utils
OS_DISK_SIZE = 10
INSTANCE_USER_NAME = "pipeline"
NO_BOOT_DEVICE_NAME = 'sdb1'
SWAP_DEVICE_NAME = 'sdb2'
# custom instance format
GPU_CUSTOM_INSTANCE_PARTS = 5
GPU_CUSTOM_INSTANCE_TYPE_INDEX = 3
GPU_CUSTOM_INSTANCE_COUNT_INDEX = 4
GPU_NVIDIA_PREFIX = 'nvidia-tesla-'
GPU_TYPE_PREFIX = 'gpu-'
class GCPInstanceProvider(AbstractInstanceProvider):
def __init__(self, cloud_region):
self.cloud_region = cloud_region
self.project_id = os.environ["GOOGLE_PROJECT_ID"]
self.client = discovery.build('compute', 'v1')
def run_instance(self, is_spot, bid_price, ins_type, ins_hdd, ins_img, ins_key, run_id, kms_encyr_key_id,
num_rep, time_rep, kube_ip, kubeadm_token):
ssh_pub_key = utils.read_ssh_key(ins_key)
swap_size = utils.get_swap_size(self.cloud_region, ins_type, is_spot, "GCP")
user_data_script = utils.get_user_data_script(self.cloud_region, ins_type, ins_img,
kube_ip, kubeadm_token, swap_size)
allowed_networks = utils.get_networks_config(self.cloud_region)
subnet_id = 'default'
network_name = 'default'
if allowed_networks and len(allowed_networks) > 0:
network_num = randint(0, len(allowed_networks) - 1)
network_name = allowed_networks.items()[network_num][0]
subnet_id = allowed_networks.items()[network_num][1]
utils.pipe_log('- Networks list found, subnet {} in Network {} will be used'.format(subnet_id, network_name))
else:
utils.pipe_log('- Networks list NOT found, default subnet in random AZ will be used')
instance_type, gpu_type, gpu_count = self.parse_instance_type(ins_type)
machine_type = 'zones/{}/machineTypes/{}'.format(self.cloud_region, instance_type)
instance_name = "gcp-" + uuid.uuid4().hex[0:16]
region_name = self.cloud_region[:self.cloud_region.rfind('-')]
if is_spot:
utils.pipe_log('Preemptible instance with run id: ' + run_id + ' will be launched')
body = {
'name': instance_name,
'machineType': machine_type,
'scheduling': {
'preemptible': is_spot
},
'canIpForward': True,
'disks': self.__get_disk_devices(ins_img, OS_DISK_SIZE, ins_hdd, swap_size),
'networkInterfaces': [
{
'accessConfigs': [
{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}
],
'network': 'projects/{project}/global/networks/{network}'.format(project=self.project_id,
network=network_name),
'subnetwork': 'projects/{project}/regions/{region}/subnetworks/{subnet}'.format(
project=self.project_id, subnet=subnet_id, region=region_name)
}
],
'labels': GCPInstanceProvider.get_tags(run_id),
"metadata": {
"items": [
{
"key": "ssh-keys",
"value": "{user}:{key} {user}".format(key=ssh_pub_key, user=INSTANCE_USER_NAME)
},
{
"key": "startup-script",
"value": user_data_script
}
]
}
}
if gpu_type is not None and gpu_count > 0:
gpu = {"guestAccelerators": [
{
"acceleratorCount": [gpu_count],
"acceleratorType": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/acceleratorTypes/{gpu_type}"
.format(project=self.project_id,
zone=self.cloud_region,
gpu_type=gpu_type)
}
]}
body.update(gpu)
try:
response = self.client.instances().insert(
project=self.project_id,
zone=self.cloud_region,
body=body).execute()
self.__wait_for_operation(response['name'])
except Exception as client_error:
if 'quota' in client_error.__str__().lower():
utils.pipe_log_warn(LIMIT_EXCEEDED_ERROR_MASSAGE)
sys.exit(LIMIT_EXCEEDED_EXIT_CODE)
else:
raise client_error
ip_response = self.client.instances().get(
project=self.project_id,
zone=self.cloud_region,
instance=instance_name
).execute()
private_ip = ip_response['networkInterfaces'][0]['networkIP']
return instance_name, private_ip
def parse_instance_type(self, ins_type):
# Custom type with GPU: gpu-custom-4-16000-k80-1
# Custom type with CPU only: custom-4-16000
# Predefined type: n1-standard-1
if not ins_type.startswith(GPU_TYPE_PREFIX):
return ins_type, None, 0
parts = ins_type[len(GPU_TYPE_PREFIX):].split('-')
if len(parts) != GPU_CUSTOM_INSTANCE_PARTS:
raise RuntimeError('Custom instance type with GPU "%s" does not match expected pattern.' % ins_type)
gpu_type = parts[GPU_CUSTOM_INSTANCE_TYPE_INDEX]
gpu_count = parts[GPU_CUSTOM_INSTANCE_COUNT_INDEX]
return '-'.join(parts[0:GPU_CUSTOM_INSTANCE_TYPE_INDEX]), GPU_NVIDIA_PREFIX + gpu_type, gpu_count
def find_and_tag_instance(self, old_id, new_id):
instance = self.__find_instance(old_id)
if instance:
labels = instance['labels']
labels['name'] = new_id
labels_body = {'labels': labels, 'labelFingerprint': instance['labelFingerprint']}
reassign = self.client.instances().setLabels(
project=self.project_id,
zone=self.cloud_region,
instance=instance['name'],
body=labels_body).execute()
self.__wait_for_operation(reassign['name'])
return instance['name']
else:
raise RuntimeError('Instance with id: {} not found!'.format(old_id))
def verify_run_id(self, run_id):
utils.pipe_log('Checking if instance already exists for RunID {}'.format(run_id))
instance = self.__find_instance(run_id)
if instance and len(instance['networkInterfaces'][0]) > 0:
ins_id = instance['name']
ins_ip = instance['networkInterfaces'][0]['networkIP']
utils.pipe_log('Found existing instance (ID: {}, IP: {}) for RunID {}\n-'.format(ins_id, ins_ip, run_id))
else:
ins_id = ''
ins_ip = ''
utils.pipe_log('No existing instance found for RunID {}\n-'.format(run_id))
return ins_id, ins_ip
def check_instance(self, ins_id, run_id, num_rep, time_rep):
utils.pipe_log('Checking instance ({}) boot state'.format(ins_id))
port = 8888
response = self.__find_instance(run_id)
ipaddr = response['networkInterfaces'][0]['networkIP']
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
utils.pipe_log('- Waiting for instance boot up...')
result = utils.poll_instance(sock, time_rep, ipaddr, port)
rep = 0
while result != 0:
sleep(time_rep)
result = utils.poll_instance(sock, time_rep, ipaddr, port)
rep = utils.increment_or_fail(num_rep, rep,
'Exceeded retry count ({}) for instance ({}) network check on port {}'.format(
num_rep, ins_id, port))
utils.pipe_log('Instance is booted. ID: {}, IP: {}\n-'.format(ins_id, ipaddr))
def get_instance_names(self, ins_id):
instance = self.client.instances().get(
project=self.project_id,
zone=self.cloud_region,
instance=ins_id).execute()
if instance:
# according to https://cloud.google.com/compute/docs/internal-dns#about_internal_dns
return '{}.{}.c.{}.internal'.format(instance['name'], self.cloud_region, self.project_id), instance['name']
return None, None
def find_instance(self, run_id):
instance = self.__find_instance(run_id)
if instance:
return instance['name']
return None
def terminate_instance(self, ins_id):
delete = self.client.instances().delete(
project=self.project_id,
zone=self.cloud_region,
instance=ins_id).execute()
self.__wait_for_operation(delete['name'])
def terminate_instance_by_ip_or_name(self, internal_ip, node_name):
items = self.__filter_instances("")
for instance in items:
if instance['networkInterfaces'][0]['networkIP'] == internal_ip:
self.terminate_instance(instance['name'])
def __find_instance(self, run_id):
items = self.__filter_instances('labels.name="{}"'.format(run_id))
if items:
filtered = [ins for ins in items if 'labels' in ins and ins['labels']['name'] == run_id]
if filtered and len(filtered) == 1:
return filtered[0]
return None
def __filter_instances(self, filter):
result = self.client.instances().list(
project=self.project_id,
zone=self.cloud_region,
filter=filter
).execute()
if 'items' in result:
return result['items']
else:
return None
def __get_boot_device(self, disk_size, image_family):
project_and_family = image_family.split("/")
if len(project_and_family) != 2:
print("node_image parameter doesn't match to Google image name convention: <project>/<imageFamily>")
return {
'boot': True,
'autoDelete': True,
'deviceName': 'sda1',
'initializeParams': {
'diskSizeGb': disk_size,
'diskType': 'projects/{}/zones/{}/diskTypes/pd-ssd'.format(self.project_id, self.cloud_region),
'sourceImage': 'projects/{}/global/images/{}'.format(project_and_family[0], project_and_family[1])
},
'mode': 'READ_WRITE',
'type': 'PERSISTENT'
}
def __get_disk_devices(self, ins_img, os_disk_size, ins_hdd, swap_size):
disks = [self.__get_boot_device(os_disk_size, ins_img),
self.__get_device(ins_hdd, NO_BOOT_DEVICE_NAME)]
if swap_size is not None and swap_size > 0:
disks.append(self.__get_device(swap_size, SWAP_DEVICE_NAME))
return disks
def __get_device(self, ins_hdd, device_name):
return {
'boot': False,
'autoDelete': True,
'deviceName': device_name,
'mode': 'READ_WRITE',
'type': 'PERSISTENT',
'initializeParams': {
'diskSizeGb': ins_hdd,
'diskType': 'projects/{}/zones/{}/diskTypes/pd-ssd'.format(self.project_id, self.cloud_region)
}
}
def __wait_for_operation(self, operation):
while True:
result = self.client.zoneOperations().get(
project=self.project_id,
zone=self.cloud_region,
operation=operation).execute()
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return result
time.sleep(1)
@staticmethod
def resource_tags():
tags = {}
_, config_tags = utils.load_cloud_config()
if config_tags is None:
return tags
for key, value in config_tags.iteritems():
tags.update({key: value})
return tags
@staticmethod
def run_id_tag(run_id):
return {
'name': run_id,
}
@staticmethod
def get_tags(run_id):
tags = GCPInstanceProvider.run_id_tag(run_id)
res_tags = GCPInstanceProvider.resource_tags()
for key in res_tags:
tags[key.lower()] = res_tags[key].lower()
return tags
|
py | b407720ae80d7e21d1b2d8b1a9a18bcc651bb227 | # encoding: utf-8
# ------------------------------------------------------------------------
# Copyright 2020 All Histolab Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import csv
import logging
import os
from abc import abstractmethod
from itertools import zip_longest
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import PIL
from .exceptions import LevelError, TileSizeError
from .masks import BiggestTissueBoxMask, BinaryMask
from .scorer import Scorer
from .slide import Slide
from .tile import Tile
from .types import CoordinatePair
from .util import (
random_choice_true_mask2d,
rectangle_to_mask,
region_coordinates,
regions_from_binary_mask,
regions_to_binary_mask,
scale_coordinates,
)
try:
from typing import Protocol, runtime_checkable
except ImportError:
from typing_extensions import Protocol, runtime_checkable
logger = logging.getLogger("tiler")
COORDS_WITHIN_EXTRACTION_MASK_THRESHOLD = 0.8
@runtime_checkable
class Tiler(Protocol):
"""General tiler object"""
level: int
tile_size: Tuple[int, int]
@abstractmethod
def extract(
self,
slide: Slide,
log_level: str,
extraction_mask: BinaryMask = BiggestTissueBoxMask(),
):
pass # pragma: no cover
def locate_tiles(
self,
slide: Slide,
extraction_mask: BinaryMask = BiggestTissueBoxMask(),
scale_factor: int = 32,
alpha: int = 128,
outline: Union[str, Iterable[str], Iterable[Tuple[int]]] = "red",
linewidth: int = 1,
tiles: Optional[Iterable[Tile]] = None,
) -> PIL.Image.Image:
"""Draw tile box references on a rescaled version of the slide
Parameters
----------
slide : Slide
Slide reference where placing the tiles
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`
scale_factor : int, optional
Scaling factor for the returned image. Default is 32.
alpha : int, optional
The alpha level to be applied to the rescaled slide. Default is 128.
outline : Union[str, Iterable[str], Iterable[Tuple[int]]], optional
The outline color for the tile annotations. Default is 'red'.
You can provide this as a string compatible with matplotlib, or
you can provide a list of the same length as the tiles, where
each color is your assigned color for the corresponding individual
tile. This list can be a list of matplotlib-style string colors, or
a list of tuples of ints in the [0, 255] range, each of
length 3, representing the red, green and blue color for each tile.
For example, if you have two tiles that you want to be colored
yellow, you can pass this argument as any of the following ..
- 'yellow'
- ['yellow', 'yellow']
- [(255, 255, 0), (255, 255, 0)]
linewidth : int, optional
Thickness of line used to draw tiles. Default is 1.
tiles : Optional[Iterable[Tile]], optional
Tiles to visualize. Will be extracted if None. Default is None.
You may decide to provide this argument if you do not want the
tiles to be re-extracted for visualization if you already have
the tiles in hand.
Returns
-------
PIL.Image.Image
PIL Image of the rescaled slide with the extracted tiles outlined
"""
img = slide.scaled_image(scale_factor)
img.putalpha(alpha)
draw = PIL.ImageDraw.Draw(img)
if tiles is None:
tiles = (
self._tiles_generator(slide, extraction_mask)[0]
if isinstance(self, ScoreTiler)
else self._tiles_generator(slide, extraction_mask)
)
tiles_coords = (tile._coords for tile in tiles)
tile_colors = (tile._color for tile in tiles)
for coords, color, one_outline in self._tile_coords_and_outline_generator(
tiles_coords, tile_colors, outline
):
rescaled = scale_coordinates(coords, slide.dimensions, img.size)
draw.rectangle(tuple(rescaled), fill=color, outline=one_outline, width=linewidth)
return img
# ------- implementation helpers -------
def _has_valid_tile_size(self, slide: Slide) -> bool:
"""Return True if the tile size is smaller or equal than the ``slide`` size.
Parameters
----------
slide : Slide
The slide to check the tile size against.
Returns
-------
bool
True if the tile size is smaller or equal than the ``slide`` size at
extraction level, False otherwise
"""
return (
self.tile_size[0] <= slide.level_dimensions(self.level)[0]
and self.tile_size[1] <= slide.level_dimensions(self.level)[1]
)
@staticmethod
def _tile_coords_and_outline_generator(
tiles_coords: Iterable[CoordinatePair],
tile_colors: Iterable[Tuple[int,int,int]],
outlines: Union[str, List[str], List[Tuple[int]]],
) -> Union[str, Tuple[int]]:
"""Zip tile coordinates and outlines from tile and outline iterators.
Parameters
----------
tiles_coords : Iterable[CoordinatePair]
Coordinates referring to the tiles' upper left and lower right corners.
outlines : Union[str, Iterable[str], Iterable[Tuple[int]]]
See docstring for ``locate_tiles`` for details.
Yields
-------
CoordinatePair
Coordinates referring to the tiles' upper left and lower right corners.
Union[str, Tuple[int]]
Fixed outline depending on user input to used by method ``locate_tiles``.
"""
if isinstance(outlines, str):
for coords, color in zip(tiles_coords, tile_colors):
yield coords, color, outlines
elif hasattr(outlines, "__iter__"):
for coords, color, one_outline in zip_longest(tiles_coords, tile_colors, outlines):
if None in (coords, color, one_outline):
raise ValueError(
"There should be as many outlines as there are tiles!"
)
yield coords, color, one_outline
else:
raise ValueError(
"The parameter ``outline`` should be of type: "
"str, Iterable[str], or Iterable[List[int]]"
)
def _tile_filename(
self, tile_wsi_coords: CoordinatePair, tiles_counter: int
) -> str:
"""Return the tile filename according to its 0-level coordinates and a counter.
Parameters
----------
tile_wsi_coords : CoordinatePair
0-level coordinates of the slide the tile has been extracted from.
tiles_counter : int
Counter of extracted tiles.
Returns
-------
str
Tile filename, according to the format
`{prefix}tile_{tiles_counter}_level{level}_{x_ul_wsi}-{y_ul_wsi}-{x_br_wsi}"
"-{y_br_wsi}{suffix}`
"""
x_ul_wsi, y_ul_wsi, x_br_wsi, y_br_wsi = tile_wsi_coords
tile_filename = (
f"{self.prefix}tile_{tiles_counter}_level{self.level}_{x_ul_wsi}-{y_ul_wsi}"
f"-{x_br_wsi}-{y_br_wsi}{self.suffix}"
)
return tile_filename
def _tiles_generator(
self, slide: Slide, extraction_mask: BinaryMask = BiggestTissueBoxMask()
) -> Tuple[Tile, CoordinatePair]:
pass # pragma: no cover
def _validate_level(self, slide: Slide) -> None:
"""Validate the Tiler's level according to the Slide.
Parameters
----------
slide : Slide
Slide from which to extract the tiles
Raises
------
LevelError
If the level is not available for the slide
"""
if len(slide.levels) - abs(self.level) < 0:
raise LevelError(
f"Level {self.level} not available. Number of available levels: "
f"{len(slide.levels)}"
)
def _validate_tile_size(self, slide: Slide) -> None:
"""Validate the tile size according to the Slide.
Parameters
----------
slide : Slide
Slide from which to extract the tiles
Raises
------
TileSizeError
If the tile size is larger than the slide size
"""
if not self._has_valid_tile_size(slide):
raise TileSizeError(
f"Tile size {self.tile_size} is larger than slide size "
f"{slide.level_dimensions(self.level)} at level {self.level}"
)
class GridTiler(Tiler):
"""Extractor of tiles arranged in a grid, at the given level, with the given size.
Arguments
---------
tile_size : Tuple[int, int]
(width, height) of the extracted tiles.
level : int, optional
Level from which extract the tiles. Default is 0.
check_tissue : bool, optional
Whether to check if the tile has enough tissue to be saved. Default is True.
tissue_percent : float, optional
Number between 0.0 and 100.0 representing the minimum required percentage of
tissue over the total area of the image, default is 80.0. This is considered
only if ``check_tissue`` equals to True.
pixel_overlap : int, optional
Number of overlapping pixels (for both height and width) between two adjacent
tiles. If negative, two adjacent tiles will be strided by the absolute value of
``pixel_overlap``. Default is 0.
prefix : str, optional
Prefix to be added to the tile filename. Default is an empty string.
suffix : str, optional
Suffix to be added to the tile filename. Default is '.png'
"""
def __init__(
self,
tile_size: Tuple[int, int],
level: int = 0,
check_tissue: bool = True,
tissue_percent: float = 80.0,
pixel_overlap: int = 0,
prefix: str = "",
suffix: str = ".png",
):
self.tile_size = tile_size
self.level = level
self.check_tissue = check_tissue
self.tissue_percent = tissue_percent
self.pixel_overlap = pixel_overlap
self.prefix = prefix
self.suffix = suffix
def extract(
self,
slide: Slide,
extraction_mask: BinaryMask = BiggestTissueBoxMask(),
log_level: str = "INFO",
) -> None:
"""Extract tiles arranged in a grid and save them to disk, following this
filename pattern:
`{prefix}tile_{tiles_counter}_level{level}_{x_ul_wsi}-{y_ul_wsi}-{x_br_wsi}-{y_br_wsi}{suffix}`
Parameters
----------
slide : Slide
Slide from which to extract the tiles
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
log_level : str, {"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}
Threshold level for the log messages. Default "INFO"
Raises
------
TileSizeError
If the tile size is larger than the slide size
LevelError
If the level is not available for the slide
"""
level = logging.getLevelName(log_level)
logger.setLevel(level)
self._validate_level(slide)
self._validate_tile_size(slide)
grid_tiles = self._tiles_generator(slide, extraction_mask)
tiles_counter = 0
for tiles_counter, (tile, tile_wsi_coords) in enumerate(grid_tiles):
tile_filename = self._tile_filename(tile_wsi_coords, tiles_counter)
full_tile_path = os.path.join(slide.processed_path, tile_filename)
tile.save(full_tile_path)
logger.info(f"\t Tile {tiles_counter} saved: {tile_filename}")
logger.info(f"{tiles_counter} Grid Tiles have been saved.")
@property
def tile_size(self) -> Tuple[int, int]:
"""(width, height) of the extracted tiles."""
return self._valid_tile_size
@tile_size.setter
def tile_size(self, tile_size_: Tuple[int, int]):
if tile_size_[0] < 1 or tile_size_[1] < 1:
raise ValueError(f"Tile size must be greater than 0 ({tile_size_})")
self._valid_tile_size = tile_size_
# ------- implementation helpers -------
def _are_coordinates_within_extraction_mask(
self,
tile_thumb_coords: CoordinatePair,
binary_mask_region: np.ndarray,
) -> bool:
"""Chack whether the ``tile_thumb_coords`` are inside of ``binary_mask_region``.
Return True if 80% of the tile area defined by tile_thumb_coords is inside the
area of the ``binary_mask_region.
Parameters
----------
tile_thumb_coords : CoordinatePair
Coordinates of the tile at thumbnail dimension.
binary_mask_region : np.ndarray
Binary mask with True inside of the tissue region considered.
Returns
-------
bool
Whether the 80% of the tile area defined by tile_thumb_coords is inside the
area of the ``binary_mask_region.
"""
tile_thumb_mask = rectangle_to_mask(
dims=binary_mask_region.shape, vertices=tile_thumb_coords
)
tile_in_binary_mask = binary_mask_region & tile_thumb_mask
tile_area = np.count_nonzero(tile_thumb_mask)
tile_in_binary_mask_area = np.count_nonzero(tile_in_binary_mask)
return tile_area > 0 and (
tile_in_binary_mask_area / tile_area
> COORDS_WITHIN_EXTRACTION_MASK_THRESHOLD
)
def _grid_coordinates_from_bbox_coordinates(
self,
bbox_coordinates_lvl: CoordinatePair,
slide: Slide,
binary_mask_region: np.ndarray,
) -> CoordinatePair:
"""Generate Coordinates at level 0 of grid tiles within a tissue box.
Parameters
----------
bbox_coordinates_lvl : CoordinatePair
Coordinates of the tissue box from which to calculate the coordinates of the
tiles.
slide : Slide
Slide from which to calculate the coordinates.
binary_mask_region : np.ndarray
Binary mask corresponding to the connected component (region) considered.
Notes
-----
This method needs to be called for every connected component (region) within the
extraction mask.
Yields
-------
Iterator[CoordinatePair]
Iterator of tiles' CoordinatePair
"""
tile_w_lvl, tile_h_lvl = self.tile_size
n_tiles_row = self._n_tiles_row(bbox_coordinates_lvl)
n_tiles_column = self._n_tiles_column(bbox_coordinates_lvl)
for i in range(n_tiles_row):
for j in range(n_tiles_column):
x_ul_lvl = (
bbox_coordinates_lvl.x_ul + tile_w_lvl * i - self.pixel_overlap
)
y_ul_lvl = (
bbox_coordinates_lvl.y_ul + tile_h_lvl * j - self.pixel_overlap
)
x_ul_lvl = np.clip(x_ul_lvl, bbox_coordinates_lvl.x_ul, None)
y_ul_lvl = np.clip(y_ul_lvl, bbox_coordinates_lvl.y_ul, None)
x_br_lvl = x_ul_lvl + tile_w_lvl
y_br_lvl = y_ul_lvl + tile_h_lvl
tile_lvl_coords = CoordinatePair(x_ul_lvl, y_ul_lvl, x_br_lvl, y_br_lvl)
tile_thumb_coords = scale_coordinates(
reference_coords=tile_lvl_coords,
reference_size=slide.level_dimensions(level=self.level),
target_size=binary_mask_region.shape[::-1],
)
if self._are_coordinates_within_extraction_mask(
tile_thumb_coords, binary_mask_region
):
tile_wsi_coords = scale_coordinates(
reference_coords=tile_lvl_coords,
reference_size=slide.level_dimensions(level=self.level),
target_size=slide.level_dimensions(level=0),
)
yield tile_wsi_coords
def _grid_coordinates_generator(
self, slide: Slide, extraction_mask: BinaryMask = BiggestTissueBoxMask()
) -> CoordinatePair:
"""Generate Coordinates at level 0 of grid tiles within the tissue.
Parameters
----------
slide : Slide
Slide from which to calculate the coordinates. Needed to calculate the
tissue area.
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
Yields
-------
Iterator[CoordinatePair]
Iterator of tiles' CoordinatePair
"""
binary_mask = extraction_mask(slide)
regions = regions_from_binary_mask(binary_mask)
for region in regions:
bbox_coordinates_thumb = region_coordinates(region) # coords of the bbox
bbox_coordinates_lvl = scale_coordinates(
bbox_coordinates_thumb,
binary_mask.shape[::-1],
slide.level_dimensions(self.level),
)
binary_mask_region = regions_to_binary_mask([region], binary_mask.shape)
yield from self._grid_coordinates_from_bbox_coordinates(
bbox_coordinates_lvl, slide, binary_mask_region
)
def _tiles_generator(
self, slide: Slide, extraction_mask: BinaryMask = BiggestTissueBoxMask()
) -> Tuple[Tile, CoordinatePair]:
"""Generator of tiles arranged in a grid.
Parameters
----------
slide : Slide
Slide from which to extract the tiles
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
Yields
-------
Tile
Extracted tile
CoordinatePair
Coordinates of the slide at level 0 from which the tile has been extracted
"""
grid_coordinates_generator = self._grid_coordinates_generator(
slide, extraction_mask
)
for coords in grid_coordinates_generator:
try:
tile = slide.extract_tile(coords, self.level, self.tile_size)
except ValueError:
continue
if not self.check_tissue or tile.has_enough_tissue(self.tissue_percent):
yield tile, coords
def _n_tiles_column(self, bbox_coordinates: CoordinatePair) -> int:
"""Return the number of tiles which can be extracted in a column.
Parameters
----------
bbox_coordinates : CoordinatePair
Coordinates of the tissue box
Returns
-------
int
Number of tiles which can be extracted in a column.
"""
return (bbox_coordinates.y_br - bbox_coordinates.y_ul) // (
self.tile_size[1] - self.pixel_overlap
)
def _n_tiles_row(self, bbox_coordinates: CoordinatePair) -> int:
"""Return the number of tiles which can be extracted in a row.
Parameters
----------
bbox_coordinates : CoordinatePair
Coordinates of the tissue box
Returns
-------
int
Number of tiles which can be extracted in a row.
"""
return (bbox_coordinates.x_br - bbox_coordinates.x_ul) // (
self.tile_size[0] - self.pixel_overlap
)
class RandomTiler(Tiler):
"""Extractor of random tiles from a Slide, at the given level, with the given size.
Arguments
---------
tile_size : Tuple[int, int]
(width, height) of the extracted tiles.
n_tiles : int
Maximum number of tiles to extract.
level : int, optional
Level from which extract the tiles. Default is 0.
seed : int, optional
Seed for RandomState. Must be convertible to 32 bit unsigned integers. Default
is 7.
check_tissue : bool, optional
Whether to check if the tile has enough tissue to be saved. Default is True.
tissue_percent : float, optional
Number between 0.0 and 100.0 representing the minimum required percentage of
tissue over the total area of the image, default is 80.0. This is considered
only if ``check_tissue`` equals to True.
prefix : str, optional
Prefix to be added to the tile filename. Default is an empty string.
suffix : str, optional
Suffix to be added to the tile filename. Default is '.png'
max_iter : int, optional
Maximum number of iterations performed when searching for eligible (if
``check_tissue=True``) tiles. Must be grater than or equal to ``n_tiles``.
"""
def __init__(
self,
tile_size: Tuple[int, int],
n_tiles: int,
level: int = 0,
seed: int = 7,
check_tissue: bool = True,
tissue_percent: float = 80.0,
prefix: str = "",
suffix: str = ".png",
max_iter: int = int(1e4),
):
self.tile_size = tile_size
self.n_tiles = n_tiles
self.max_iter = max_iter
self.level = level
self.seed = seed
self.check_tissue = check_tissue
self.tissue_percent = tissue_percent
self.prefix = prefix
self.suffix = suffix
def extract(
self,
slide: Slide,
extraction_mask: BinaryMask = BiggestTissueBoxMask(),
log_level: str = "INFO",
) -> None:
"""Extract random tiles and save them to disk, following this filename pattern:
`{prefix}tile_{tiles_counter}_level{level}_{x_ul_wsi}-{y_ul_wsi}-{x_br_wsi}-{y_br_wsi}{suffix}`
Parameters
----------
slide : Slide
Slide from which to extract the tiles
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
log_level: str, {"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}
Threshold level for the log messages. Default "INFO"
Raises
------
TileSizeError
If the tile size is larger than the slide size
LevelError
If the level is not available for the slide
"""
level = logging.getLevelName(log_level)
logger.setLevel(level)
self._validate_level(slide)
self._validate_tile_size(slide)
random_tiles = self._tiles_generator(slide, extraction_mask)
tiles_counter = 0
for tiles_counter, (tile, tile_wsi_coords) in enumerate(random_tiles):
tile_filename = self._tile_filename(tile_wsi_coords, tiles_counter)
full_tile_path = os.path.join(slide.processed_path, tile_filename)
tile.save(full_tile_path)
logger.info(f"\t Tile {tiles_counter} saved: {tile_filename}")
logger.info(f"{tiles_counter+1} Random Tiles have been saved.")
@property
def max_iter(self) -> int:
return self._valid_max_iter
@max_iter.setter
def max_iter(self, max_iter_: int = int(1e4)):
if max_iter_ < self.n_tiles:
raise ValueError(
f"The maximum number of iterations ({max_iter_}) must be grater than or"
f" equal to the maximum number of tiles ({self.n_tiles})."
)
self._valid_max_iter = max_iter_
@property
def tile_size(self) -> Tuple[int, int]:
return self._valid_tile_size
@tile_size.setter
def tile_size(self, tile_size_: Tuple[int, int]):
if tile_size_[0] < 1 or tile_size_[1] < 1:
raise ValueError(f"Tile size must be greater than 0 ({tile_size_})")
self._valid_tile_size = tile_size_
# ------- implementation helpers -------
def _random_tile_coordinates(
self, slide: Slide, extraction_mask: BinaryMask = BiggestTissueBoxMask()
) -> CoordinatePair:
"""Return 0-level Coordinates of a tile picked at random within the box.
Parameters
----------
slide : Slide
Slide from which calculate the coordinates. Needed to calculate the box.
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
Returns
-------
CoordinatePair
Random tile Coordinates at level 0
"""
binary_mask = extraction_mask(slide)
tile_w_lvl, tile_h_lvl = self.tile_size
x_ul_lvl, y_ul_lvl = random_choice_true_mask2d(binary_mask)
# Scale tile dimensions to extraction mask dimensions
tile_w_thumb = (
tile_w_lvl * binary_mask.shape[1] / slide.level_dimensions(self.level)[0]
)
tile_h_thumb = (
tile_h_lvl * binary_mask.shape[0] / slide.level_dimensions(self.level)[1]
)
x_br_lvl = x_ul_lvl + tile_w_thumb
y_br_lvl = y_ul_lvl + tile_h_thumb
tile_wsi_coords = scale_coordinates(
reference_coords=CoordinatePair(x_ul_lvl, y_ul_lvl, x_br_lvl, y_br_lvl),
reference_size=binary_mask.shape[::-1],
target_size=slide.dimensions,
)
return tile_wsi_coords
def _tiles_generator(
self, slide: Slide, extraction_mask: BinaryMask = BiggestTissueBoxMask()
) -> Tuple[Tile, CoordinatePair]:
"""Generate Random Tiles within a slide box.
Stops if:
* the number of extracted tiles is equal to ``n_tiles`` OR
* the maximum number of iterations ``max_iter`` is reached
Parameters
----------
slide : Slide
The Whole Slide Image from which to extract the tiles.
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
Yields
------
tile : Tile
The extracted Tile
coords : CoordinatePair
The level-0 coordinates of the extracted tile
"""
np.random.seed(self.seed)
iteration = valid_tile_counter = 0
while True:
tile_wsi_coords = self._random_tile_coordinates(slide, extraction_mask)
try:
tile = slide.extract_tile(tile_wsi_coords, self.level, self.tile_size)
except ValueError:
iteration -= 1
continue
if not self.check_tissue or tile.has_enough_tissue(self.tissue_percent):
yield tile, tile_wsi_coords
valid_tile_counter += 1
iteration += 1
if self.max_iter and iteration >= self.max_iter:
break
if valid_tile_counter >= self.n_tiles:
break
class ScoreTiler(GridTiler):
"""Extractor of tiles arranged in a grid according to a scoring function.
The extraction procedure is the same as the ``GridTiler`` extractor, but only the
first ``n_tiles`` tiles with the highest score are saved.
Arguments
---------
scorer : Scorer
Scoring function used to score the tiles.
tile_size : Tuple[int, int]
(width, height) of the extracted tiles.
n_tiles : int, optional
The number of tiles to be saved. Default is 0, which means that all the tiles
will be saved (same exact behaviour of a GridTiler). Cannot be negative.
level : int, optional
Level from which extract the tiles. Default is 0.
check_tissue : bool, optional
Whether to check if the tile has enough tissue to be saved. Default is True.
tissue_percent : float, optional
Number between 0.0 and 100.0 representing the minimum required percentage of
tissue over the total area of the image, default is 80.0. This is considered
only if ``check_tissue`` equals to True.
pixel_overlap : int, optional
Number of overlapping pixels (for both height and width) between two adjacent
tiles. If negative, two adjacent tiles will be strided by the absolute value of
``pixel_overlap``. Default is 0.
prefix : str, optional
Prefix to be added to the tile filename. Default is an empty string.
suffix : str, optional
Suffix to be added to the tile filename. Default is '.png'
"""
def __init__(
self,
scorer: Scorer,
tile_size: Tuple[int, int],
n_tiles: int = 0,
level: int = 0,
check_tissue: bool = True,
tissue_percent: float = 80.0,
pixel_overlap: int = 0,
prefix: str = "",
suffix: str = ".png",
):
self.scorer = scorer
self.n_tiles = n_tiles
super().__init__(
tile_size,
level,
check_tissue,
tissue_percent,
pixel_overlap,
prefix,
suffix,
)
def extract(
self,
slide: Slide,
extraction_mask: BinaryMask = BiggestTissueBoxMask(),
report_path: str = None,
log_level: str = "INFO",
) -> None:
"""Extract grid tiles and save them to disk, according to a scoring function and
following this filename pattern:
`{prefix}tile_{tiles_counter}_level{level}_{x_ul_wsi}-{y_ul_wsi}-{x_br_wsi}-{y_br_wsi}{suffix}`
Save a CSV report file with the saved tiles and the associated score.
Parameters
----------
slide : Slide
Slide from which to extract the tiles
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
report_path : str, optional
Path to the CSV report. If None, no report will be saved
log_level: str, {"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}
Threshold level for the log messages. Default "INFO"
Raises
------
TileSizeError
If the tile size is larger than the slide size
LevelError
If the level is not available for the slide
"""
level = logging.getLevelName(log_level)
logger.setLevel(level)
self._validate_level(slide)
self._validate_tile_size(slide)
highest_score_tiles, highest_scaled_score_tiles = self._tiles_generator(
slide, extraction_mask
)
tiles_counter = 0
filenames = []
for tiles_counter, (score, tile_wsi_coords) in enumerate(highest_score_tiles):
tile = slide.extract_tile(tile_wsi_coords, self.level, self.tile_size)
tile_filename = self._tile_filename(tile_wsi_coords, tiles_counter)
tile.save(os.path.join(slide.processed_path, tile_filename))
filenames.append(tile_filename)
logger.info(
f"\t Tile {tiles_counter} - score: {score} saved: {tile_filename}"
)
if report_path:
self._save_report(
report_path, highest_score_tiles, highest_scaled_score_tiles, filenames
)
logger.info(f"{tiles_counter+1} Grid Tiles have been saved.")
# ------- implementation helpers -------
def _tiles_generator(
self, slide: Slide, extraction_mask: BinaryMask = BiggestTissueBoxMask()
) -> Tuple[List[Tuple[float, CoordinatePair]], List[Tuple[float, CoordinatePair]]]:
r"""Calculate the tiles with the highest scores and their extraction coordinates
Parameters
----------
slide : Slide
The slide to extract the tiles from.
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
Returns
-------
Tuple[List[Tuple[float, CoordinatePair]], List[Tuple[float, CoordinatePair]]]
List of tuples containing the scores and the extraction coordinates
for the tiles with the highest scores. If scaled=True, each score `s_i` of
the i-th tile is normalized as
.. math::
s_{\hat{i}}=\frac{s_i-\min_{j\in T}{s_j}}{\max_{j\in T}{s_j}-\min_{j\in T}{s_j}}
where `T` is the set of all the retrieved tiles. Notice that the normalized
scores range between 0 and 1. This could be useful to have a more intuitive
comparison between the scores. Each tuple represents a tile.
Raises
------
ValueError
If ``n_tiles`` is negative.
""" # noqa
all_scores = self._scores(slide, extraction_mask)
scaled_scores = self._scale_scores(all_scores)
sorted_tiles_by_score = sorted(all_scores, key=lambda x: x[0], reverse=True)
sorted_tiles_by_scaled_score = sorted(
scaled_scores, key=lambda x: x[0], reverse=True
)
if self.n_tiles < 0:
raise ValueError(f"'n_tiles' cannot be negative ({self.n_tiles})")
if self.n_tiles > 0:
highest_score_tiles = sorted_tiles_by_score[: self.n_tiles]
highest_scaled_score_tiles = sorted_tiles_by_scaled_score[: self.n_tiles]
else:
highest_score_tiles = sorted_tiles_by_score
highest_scaled_score_tiles = sorted_tiles_by_scaled_score
return highest_score_tiles, highest_scaled_score_tiles
@staticmethod
def _save_report(
report_path: str,
highest_score_tiles: List[Tuple[float, CoordinatePair]],
highest_scaled_score_tiles: List[Tuple[float, CoordinatePair]],
filenames: List[str],
) -> None:
"""Save to ``filename`` the report of the saved tiles with the associated score.
The CSV file
Parameters
----------
slide : Slide
The slide to extract the tiles from.
report_path : str
Path to the report
highest_score_tiles : List[Tuple[float, CoordinatePair]]
List of tuples containing the score and the extraction coordinates for the
tiles with the highest score. Each tuple represents a tile.
highest_scaled_score_tiles : List[Tuple[float, CoordinatePair]]
List of tuples containing the scaled score between 0 and 1 and the
extraction coordinates for the tiles with the highest score. Each tuple
represents a tile.
filenames : List[str]
List of the tiles' filename
"""
header = ["filename", "score", "scaled_score"]
rows = [
dict(zip(header, values))
for values in zip(
filenames,
np.array(highest_score_tiles, dtype=object)[:, 0],
np.array(highest_scaled_score_tiles, dtype=object)[:, 0],
)
]
with open(report_path, "w+", newline="") as filename:
writer = csv.DictWriter(
filename, fieldnames=header, lineterminator=os.linesep
)
writer.writeheader()
writer.writerows(rows)
@staticmethod
def _scale_scores(
scores: List[Tuple[float, CoordinatePair]]
) -> List[Tuple[float, CoordinatePair]]:
"""Scale scores between 0 and 1.
Parameters
----------
scores : List[Tuple[float, CoordinatePair]]
Scores to be scaled
Returns
-------
List[Tuple[float, CoordinatePair]])
Scaled scores
"""
scores_ = np.array(scores, dtype=object)[:, 0]
coords = np.array(scores, dtype=object)[:, 1]
scores_scaled = (scores_ - np.min(scores_)) / (
np.max(scores_) - np.min(scores_)
)
return list(zip(scores_scaled, coords))
def _scores(
self, slide: Slide, extraction_mask: BinaryMask = BiggestTissueBoxMask()
) -> List[Tuple[float, CoordinatePair]]:
"""Calculate the scores for all the tiles extracted from the ``slide``.
Parameters
----------
slide : Slide
The slide to extract the tiles from.
extraction_mask : BinaryMask, optional
BinaryMask object defining how to compute a binary mask from a Slide.
Default `BiggestTissueBoxMask`.
Returns
-------
List[Tuple[float, CoordinatePair]]
List of tuples containing the score and the extraction coordinates for each
tile. Each tuple represents a tile.
"""
if next(super()._tiles_generator(slide, extraction_mask), None) is None:
raise RuntimeError(
"No tiles have been generated. This could happen if `check_tissue=True`"
)
grid_tiles = super()._tiles_generator(slide, extraction_mask)
scores = []
for tile, tile_wsi_coords in grid_tiles:
score = self.scorer(tile)
scores.append((score, tile_wsi_coords))
return scores
|
py | b407743006246ac8b92de666deb8951ff0405576 | import base64
import datetime
from algosdk.future import transaction
from algosdk import account, mnemonic
from algosdk.v2client import algod
# user declared account mnemonics
creator_mnemonic = "Your 25-word mnemonic goes here"
user_mnemonic = "A second distinct 25-word mnemonic goes here"
# user declared algod connection parameters
algod_address = "http://localhost:4001"
algod_token = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# declare application state storage (immutable)
local_ints = 1
local_bytes = 1
global_ints = 1
global_bytes = 0
global_schema = transaction.StateSchema(global_ints, global_bytes)
local_schema = transaction.StateSchema(local_ints, local_bytes)
# user declared approval program (initial)
approval_program_source_initial = b"""#pragma version 2
// Handle each possible OnCompletion type. We don't have to worry about
// handling ClearState, because the ClearStateProgram will execute in that
// case, not the ApprovalProgram.
txn OnCompletion
int NoOp
==
bnz handle_noop
txn OnCompletion
int OptIn
==
bnz handle_optin
txn OnCompletion
int CloseOut
==
bnz handle_closeout
txn OnCompletion
int UpdateApplication
==
bnz handle_updateapp
txn OnCompletion
int DeleteApplication
==
bnz handle_deleteapp
// Unexpected OnCompletion value. Should be unreachable.
err
handle_noop:
// Handle NoOp
// Check for creator
addr 5XWY6RBNYHCSY2HK5HCTO62DUJJ4PT3G4L77FQEBUKE6ZYRGQAFTLZSQQ4
txn Sender
==
bnz handle_optin
// read global state
byte "counter"
dup
app_global_get
// increment the value
int 1
+
// store to scratch space
dup
store 0
// update global state
app_global_put
// read local state for sender
int 0
byte "counter"
app_local_get
// increment the value
int 1
+
store 1
// update local state for sender
int 0
byte "counter"
load 1
app_local_put
// load return value as approval
load 0
return
handle_optin:
// Handle OptIn
// approval
int 1
return
handle_closeout:
// Handle CloseOut
//approval
int 1
return
handle_deleteapp:
// Check for creator
addr 5XWY6RBNYHCSY2HK5HCTO62DUJJ4PT3G4L77FQEBUKE6ZYRGQAFTLZSQQ4
txn Sender
==
return
handle_updateapp:
// Check for creator
addr 5XWY6RBNYHCSY2HK5HCTO62DUJJ4PT3G4L77FQEBUKE6ZYRGQAFTLZSQQ4
txn Sender
==
return
"""
# user declared approval program (refactored)
approval_program_source_refactored = b"""#pragma version 2
// Handle each possible OnCompletion type. We don't have to worry about
// handling ClearState, because the ClearStateProgram will execute in that
// case, not the ApprovalProgram.
txn OnCompletion
int NoOp
==
bnz handle_noop
txn OnCompletion
int OptIn
==
bnz handle_optin
txn OnCompletion
int CloseOut
==
bnz handle_closeout
txn OnCompletion
int UpdateApplication
==
bnz handle_updateapp
txn OnCompletion
int DeleteApplication
==
bnz handle_deleteapp
// Unexpected OnCompletion value. Should be unreachable.
err
handle_noop:
// Handle NoOp
// Check for creator
addr 5XWY6RBNYHCSY2HK5HCTO62DUJJ4PT3G4L77FQEBUKE6ZYRGQAFTLZSQQ4
txn Sender
==
bnz handle_optin
// read global state
byte "counter"
dup
app_global_get
// increment the value
int 1
+
// store to scratch space
dup
store 0
// update global state
app_global_put
// read local state for sender
int 0
byte "counter"
app_local_get
// increment the value
int 1
+
store 1
// update local state for sender
// update "counter"
int 0
byte "counter"
load 1
app_local_put
// update "timestamp"
int 0
byte "timestamp"
txn ApplicationArgs 0
app_local_put
// load return value as approval
load 0
return
handle_optin:
// Handle OptIn
// approval
int 1
return
handle_closeout:
// Handle CloseOut
//approval
int 1
return
handle_deleteapp:
// Check for creator
addr 5XWY6RBNYHCSY2HK5HCTO62DUJJ4PT3G4L77FQEBUKE6ZYRGQAFTLZSQQ4
txn Sender
==
return
handle_updateapp:
// Check for creator
addr 5XWY6RBNYHCSY2HK5HCTO62DUJJ4PT3G4L77FQEBUKE6ZYRGQAFTLZSQQ4
txn Sender
==
return
"""
# declare clear state program source
clear_program_source = b"""#pragma version 2
int 1
"""
# helper function to compile program source
def compile_program(client, source_code) :
compile_response = client.compile(source_code.decode('utf-8'))
return base64.b64decode(compile_response['result'])
# helper function that converts a mnemonic passphrase into a private signing key
def get_private_key_from_mnemonic(mn) :
private_key = mnemonic.to_private_key(mn)
return private_key
# helper function that waits for a given txid to be confirmed by the network
def wait_for_confirmation(client, txid) :
last_round = client.status().get('last-round')
txinfo = client.pending_transaction_info(txid)
while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0):
print("Waiting for confirmation...")
last_round += 1
client.status_after_block(last_round)
txinfo = client.pending_transaction_info(txid)
print("Transaction {} confirmed in round {}.".format(txid, txinfo.get('confirmed-round')))
return txinfo
# create new application
def create_app(client, private_key, approval_program, clear_program, global_schema, local_schema) :
# define sender as creator
sender = account.address_from_private_key(private_key)
# declare on_complete as NoOp
on_complete = transaction.OnComplete.NoOpOC.real
# get node suggested parameters
params = client.suggested_params()
# comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# create unsigned transaction
txn = transaction.ApplicationCreateTxn(sender, params, on_complete, \
approval_program, clear_program, \
global_schema, local_schema)
# sign transaction
signed_txn = txn.sign(private_key)
tx_id = signed_txn.transaction.get_txid()
# send transaction
client.send_transactions([signed_txn])
# await confirmation
wait_for_confirmation(client, tx_id)
# display results
transaction_response = client.pending_transaction_info(tx_id)
app_id = transaction_response['application-index']
print("Created new app-id: ",app_id)
return app_id
# opt-in to application
def opt_in_app(client, private_key, index) :
# declare sender
sender = account.address_from_private_key(private_key)
print("OptIn from account: ",sender)
# get node suggested parameters
params = client.suggested_params()
# comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# create unsigned transaction
txn = transaction.ApplicationOptInTxn(sender, params, index)
# sign transaction
signed_txn = txn.sign(private_key)
tx_id = signed_txn.transaction.get_txid()
# send transaction
client.send_transactions([signed_txn])
# await confirmation
wait_for_confirmation(client, tx_id)
# display results
transaction_response = client.pending_transaction_info(tx_id)
print("OptIn to app-id: ",transaction_response['txn']['txn']['apid'])
# call application
def call_app(client, private_key, index, app_args) :
# declare sender
sender = account.address_from_private_key(private_key)
print("Call from account: ",sender)
# get node suggested parameters
params = client.suggested_params()
# comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# create unsigned transaction
txn = transaction.ApplicationNoOpTxn(sender, params, index, app_args)
# sign transaction
signed_txn = txn.sign(private_key)
tx_id = signed_txn.transaction.get_txid()
# send transaction
client.send_transactions([signed_txn])
# await confirmation
wait_for_confirmation(client, tx_id)
# display results
transaction_response = client.pending_transaction_info(tx_id)
print("Called app-id: ",transaction_response['txn']['txn']['apid'])
if "global-state-delta" in transaction_response :
print("Global State updated :\n",transaction_response['global-state-delta'])
if "local-state-delta" in transaction_response :
print("Local State updated :\n",transaction_response['local-state-delta'])
# read user local state
def read_local_state(client, addr, app_id) :
results = client.account_info(addr)
local_state = results['apps-local-state'][0]
for index in local_state :
if local_state[index] == app_id :
print(f"local_state of account {addr} for app_id {app_id}: ", local_state['key-value'])
# read app global state
def read_global_state(client, addr, app_id) :
results = client.account_info(addr)
apps_created = results['created-apps']
for app in apps_created :
if app['id'] == app_id :
print(f"global_state for app_id {app_id}: ", app['params']['global-state'])
# update existing application
def update_app(client, private_key, app_id, approval_program, clear_program) :
# declare sender
sender = account.address_from_private_key(private_key)
# # define initial value for key "timestamp"
# app_args = [b'initial value']
# get node suggested parameters
params = client.suggested_params()
# comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# create unsigned transaction
txn = transaction.ApplicationUpdateTxn(sender, params, app_id, \
approval_program, clear_program) #, app_args)
# sign transaction
signed_txn = txn.sign(private_key)
tx_id = signed_txn.transaction.get_txid()
# send transaction
client.send_transactions([signed_txn])
# await confirmation
wait_for_confirmation(client, tx_id)
# display results
transaction_response = client.pending_transaction_info(tx_id)
app_id = transaction_response['txn']['txn']['apid']
print("Updated existing app-id: ",app_id)
# delete application
def delete_app(client, private_key, index) :
# declare sender
sender = account.address_from_private_key(private_key)
# get node suggested parameters
params = client.suggested_params()
# comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# create unsigned transaction
txn = transaction.ApplicationDeleteTxn(sender, params, index)
# sign transaction
signed_txn = txn.sign(private_key)
tx_id = signed_txn.transaction.get_txid()
# send transaction
client.send_transactions([signed_txn])
# await confirmation
wait_for_confirmation(client, tx_id)
# display results
transaction_response = client.pending_transaction_info(tx_id)
print("Deleted app-id: ",transaction_response['txn']['txn']['apid'])
# close out from application
def close_out_app(client, private_key, index) :
# declare sender
sender = account.address_from_private_key(private_key)
# get node suggested parameters
params = client.suggested_params()
# comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# create unsigned transaction
txn = transaction.ApplicationCloseOutTxn(sender, params, index)
# sign transaction
signed_txn = txn.sign(private_key)
tx_id = signed_txn.transaction.get_txid()
# send transaction
client.send_transactions([signed_txn])
# await confirmation
wait_for_confirmation(client, tx_id)
# display results
transaction_response = client.pending_transaction_info(tx_id)
print("Closed out from app-id: ",transaction_response['txn']['txn']['apid'])
# clear application
def clear_app(client, private_key, index) :
# declare sender
sender = account.address_from_private_key(private_key)
# get node suggested parameters
params = client.suggested_params()
# comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# create unsigned transaction
txn = transaction.ApplicationClearStateTxn(sender, params, index)
# sign transaction
signed_txn = txn.sign(private_key)
tx_id = signed_txn.transaction.get_txid()
# send transaction
client.send_transactions([signed_txn])
# await confirmation
wait_for_confirmation(client, tx_id)
# display results
transaction_response = client.pending_transaction_info(tx_id)
print("Cleared app-id: ",transaction_response['txn']['txn']['apid'])
def main() :
# initialize an algodClient
algod_client = algod.AlgodClient(algod_token, algod_address)
# define private keys
creator_private_key = get_private_key_from_mnemonic(creator_mnemonic)
user_private_key = get_private_key_from_mnemonic(user_mnemonic)
# compile programs
approval_program = compile_program(algod_client, approval_program_source_initial)
clear_program = compile_program(algod_client, clear_program_source)
# create new application
app_id = create_app(algod_client, creator_private_key, approval_program, clear_program, global_schema, local_schema)
# opt-in to application
opt_in_app(algod_client, user_private_key, app_id)
# call application without arguments
call_app(algod_client, user_private_key, app_id, None)
# read local state of application from user account
read_local_state(algod_client, account.address_from_private_key(user_private_key), app_id)
# read global state of application
read_global_state(algod_client, account.address_from_private_key(creator_private_key), app_id)
# update application
approval_program = compile_program(algod_client, approval_program_source_refactored)
update_app(algod_client, creator_private_key, app_id, approval_program, clear_program)
# call application with arguments
now = datetime.datetime.now().strftime("%H:%M:%S")
app_args = [now.encode("utf-8")]
call_app(algod_client, user_private_key, app_id, app_args)
# read local state of application from user account
read_local_state(algod_client, account.address_from_private_key(user_private_key), app_id)
# close-out from application
close_out_app(algod_client, user_private_key, app_id)
# opt-in again to application
opt_in_app(algod_client, user_private_key, app_id)
# call application with arguments
call_app(algod_client, user_private_key, app_id, app_args)
# read local state of application from user account
read_local_state(algod_client, account.address_from_private_key(user_private_key), app_id)
# delete application
delete_app(algod_client, creator_private_key, app_id)
# clear application from user account
clear_app(algod_client, user_private_key, app_id)
main()
|
py | b40774ce8af07feb4ba364d6d8d6f7e0abf7faf2 | # -*- coding: utf-8 -*-
#
# Invoice Ninja documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 19 12:02:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Invoice Ninja'
copyright = u'2017, Invoice Ninja'
author = u'Invoice Ninja'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'4.5'
# The full version, including alpha/beta/rc tags.
release = u'4.5.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Invoice Ninja v2.6.10'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'InvoiceNamedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'InvoiceName.tex', u'Invoice Ninja Documentation',
u'Hillel Coren', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'invoicename', u'Invoice Ninja Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'InvoiceName', u'Invoice Ninja Documentation',
author, 'InvoiceName', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
py | b40775c953a431e0427dcfffe1272f0fbf963d89 | from ryu import cfg
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet, arp, ipv4
from network_awareness import NetworkAwareness
CONF = cfg.CONF
class ShortestForward(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'network_awareness': NetworkAwareness
}
def __init__(self, *args, **kwargs):
super(ShortestForward, self).__init__(*args, **kwargs)
self.network_awareness = kwargs['network_awareness']
self.weight = CONF.weight
self.dpid_mac_port = {}
def add_flow(self, datapath, priority, match, actions, idle_timeout=0, hard_timeout=0):
dp = datapath
ofp = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(
datapath=dp, priority=priority,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
dp.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
parser = dp.ofproto_parser
dpid = dp.id
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth_pkt = pkt.get_protocol(ethernet.ethernet)
arp_pkt = pkt.get_protocol(arp.arp)
ipv4_pkt = pkt.get_protocol(ipv4.ipv4)
pkt_type = eth_pkt.ethertype
# layer 2 self-learning
dst_mac = eth_pkt.dst
src_mac = eth_pkt.src
self.dpid_mac_port.setdefault(dpid, {})
self.dpid_mac_port[dpid][src_mac] = in_port
if isinstance(arp_pkt, arp.arp):
self.handle_arp(msg, in_port, dst_mac, pkt_type)
if isinstance(ipv4_pkt, ipv4.ipv4):
self.handle_ipv4(msg, ipv4_pkt.src, ipv4_pkt.dst, pkt_type)
def handle_arp(self, msg, in_port, dst_mac, pkt_type):
dp = msg.datapath
ofp = dp.ofproto
parser = dp.ofproto_parser
dpid = dp.id
if dst_mac in self.dpid_mac_port[dpid]:
out_port = self.dpid_mac_port[dpid][dst_mac]
actions = [parser.OFPActionOutput(out_port)]
out = parser.OFPPacketOut(
datapath=dp, buffer_id=msg.buffer_id, in_port=in_port, actions=actions, data=msg.data)
dp.send_msg(out)
else:
# send to the switch port which linked hosts
for d, ports in self.network_awareness.port_info.items():
for p in ports:
# except the source host
if d == dpid and p == in_port:
continue
dp = self.network_awareness.switch_info[d]
actions = [parser.OFPActionOutput(p)]
out = parser.OFPPacketOut(
datapath=dp, buffer_id=msg.buffer_id, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=msg.data)
dp.send_msg(out)
def handle_ipv4(self, msg, src_ip, dst_ip, pkt_type):
parser = msg.datapath.ofproto_parser
dpid_path = self.network_awareness.shortest_path(src_ip, dst_ip, weight=self.weight)
if not dpid_path:
return
# get port path: h1 -> in_port, s1, out_port -> h2
port_path = []
for i in range(1, len(dpid_path) - 1):
in_port = self.network_awareness.link_info[(dpid_path[i], dpid_path[i - 1])]
out_port = self.network_awareness.link_info[(dpid_path[i], dpid_path[i + 1])]
port_path.append((in_port, dpid_path[i], out_port))
# calc path delay
if self.weight == 'delay':
delay = 0
for i in range(len(dpid_path) - 1):
delay += self.network_awareness.topo_map[dpid_path[i]][dpid_path[i + 1]]['delay']
self.logger.info('total path delay: {}'.format(delay))
self.show_path(src_ip, dst_ip, port_path)
# send flow mod
for node in port_path:
in_port, dpid, out_port = node
self.send_flow_mod(parser, dpid, pkt_type, src_ip, dst_ip, in_port, out_port)
self.send_flow_mod(parser, dpid, pkt_type, dst_ip, src_ip, out_port, in_port)
# send packet_out
_, dpid, out_port = port_path[-1]
dp = self.network_awareness.switch_info[dpid]
actions = [parser.OFPActionOutput(out_port)]
out = parser.OFPPacketOut(
datapath=dp, buffer_id=msg.buffer_id, in_port=in_port, actions=actions, data=msg.data)
dp.send_msg(out)
def send_flow_mod(self, parser, dpid, pkt_type, src_ip, dst_ip, in_port, out_port):
dp = self.network_awareness.switch_info[dpid]
match = parser.OFPMatch(
in_port=in_port, eth_type=pkt_type, ipv4_src=src_ip, ipv4_dst=dst_ip)
actions = [parser.OFPActionOutput(out_port)]
self.add_flow(dp, 1, match, actions, 10, 30)
def show_path(self, src, dst, port_path):
self.logger.info('path: {} -> {}'.format(src, dst))
path = src + ' -> '
for node in port_path:
path += '{}:s{}:{}'.format(*node) + ' -> '
path += dst
self.logger.info(path)
self.logger.info('\n')
|
py | b4077639f670f8c01cb55171916f17fb5d765081 | # Generated by Django 2.1.3 on 2018-12-07 08:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('spider', '0002_auto_20181207_1304'),
]
operations = [
migrations.CreateModel(
name='spiderLogModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('log', models.TextField(default='空', verbose_name='信息')),
('log_type', models.IntegerField(choices=[(0, 'Info'), (1, 'Warnings'), (2, 'Errors')], verbose_name='日志类型')),
('time', models.DateTimeField(auto_now=True, verbose_name='发生时间')),
('spider_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spider.spiderModel')),
],
),
]
|
py | b407772775457755b4556c7e218d28f847ecd5d3 | from __future__ import annotations
import re
from typing import TYPE_CHECKING, List, Literal, Optional, Union
import requests
from bs4 import BeautifulSoup
__all__ = (
"scrape_discord_username",
"alliance_bank_withdraw",
"scrape_treaties",
"scrape_treaty_web",
)
if TYPE_CHECKING:
from typing import TypedDict
class TreatyData(TypedDict):
from_: int
to_: int
treaty_type: str
def scrape_discord_username(nation_id: int, /) -> Optional[str]:
"""Scrape a nation page for the discord username
Parameters
----------
nation_id : int
The nation ID to scrape.
Returns
-------
Optional[str]
The discord username, or None if not found.
"""
try:
response = requests.request(
"GET", f"https://politicsandwar.com/nation/id={nation_id}"
)
return [
i.contents[1].text # type: ignore
for i in BeautifulSoup(response.text, "html.parser").find_all(
"tr", class_="notranslate"
)
if any("Discord Username:" in str(j) for j in i.contents) # type: ignore
][0]
except IndexError:
return None
def alliance_bank_withdraw(
email: str,
password: str,
alliance_id: int,
receiver: str,
receiver_type: Literal["alliance", "nation"],
note: Optional[str] = None,
**resources: Union[int, float, str],
) -> bool:
"""Send money from an alliance bank.
Parameters
----------
email : str
The email of the account to use.
password : str
The password of the account to use.
alliance_id : int
The alliance ID to send from.
receiver : str
The receiver of the withdrawal, must be a nation or alliance name.
receiver_type : Literal["alliance", "nation"]
The type of receiver, either "alliance" or "nation".
note : Optional[str], optional
The note to send with the withdrawal, by default no note is sent.
**resources : Union[int, float, str]
The resources to send, specified as kwargs. (i.e. money=100)
Returns
-------
bool
Whether or not the withdrawal was successful.
"""
with requests.Session() as session:
transaction_data = {f"with{key}": value for key, value in resources.items()}
transaction_data["withtype"] = receiver_type.capitalize()
if note is not None:
transaction_data["withnote"] = note
transaction_data["withrecipient"] = receiver
transaction_data["withsubmit"] = "Withdraw"
login_data = {
"email": email,
"password": password,
"loginform": "Login",
}
response = session.request(
"POST", "https://politicsandwar.com/login/", data=login_data
)
if "login failure" in response.text.lower():
return False
response = session.request(
"POST",
f"https://politicsandwar.com/alliance/id={alliance_id}&display=bank",
data=transaction_data,
)
content = response.text
if "Something went wrong" in content:
transaction_data["token"] = BeautifulSoup(content, "html.parser").find("input", {"name": "token"}).attrs["value"] # type: ignore
response = session.request(
"POST",
f"https://politicsandwar.com/alliance/id={alliance_id}&display=bank",
data=transaction_data,
)
content = response.text
return "successfully transferred" in content
def scrape_treaties(alliance_id: int, /) -> List[TreatyData]:
"""Scrape the treaties of an alliance.
Parameters
----------
alliance_id : int
The alliance ID of the alliance to scrape
Returns
-------
List[TreatyData]
A list of treaties, each treaty is a dict with the keys "from_", "to_", and "treaty_type".
"""
response = requests.request(
"GET", f"https://politicsandwar.com/alliance/id={alliance_id}"
)
text = response.text
matches = re.findall(
r"'from':(\d*), 'to':(\d*), 'color':'\#[\d|\w]*', 'length':\d*, 'title':'(\w*)'",
text,
)
return [
{"from_": int(i[0]), "to_": int(i[1]), "treaty_type": i[2]} for i in matches
]
def scrape_treaty_web() -> List[TreatyData]:
"""Scrape the treaty web
Returns
-------
List[TreatyData]
A list of treaties, each treaty is a dict with the keys "from_", "to_", and "treaty_type".
"""
response = requests.request(
"GET", "https://politicsandwar.com/alliances/treatyweb/all"
)
text = response.text
matches = re.findall(
r"'from':(\d*), 'to':(\d*), 'color':'\#[\d|\w]*', 'length':\d*, 'title':'(\w*)'",
text,
)
return [
{"from_": int(i[0]), "to_": int(i[1]), "treaty_type": i[2]} for i in matches
]
|
py | b40777bf5778dd7225e6b5c81d8cc014b23ab742 | from abc import ABC, abstractmethod
import os
from torch.utils.data import TensorDataset, DataLoader
import torch
class MusicDataset(ABC):
"""
Abstract Base Class for music datasets
"""
def __init__(self, cache_dir):
self._tensor_dataset = None
self.cache_dir = cache_dir
@abstractmethod
def iterator_gen(self):
"""
return: Iterator over the dataset
"""
pass
@abstractmethod
def make_tensor_dataset(self):
"""
:return: TensorDataset
"""
pass
@abstractmethod
def get_score_tensor(self, score):
"""
:param score: music21 score object
:return: torch tensor, with the score representation
as a tensor
"""
pass
@abstractmethod
def get_metadata_tensor(self, score):
"""
:param score: music21 score object
:return: torch tensor, with the metadata representation
as a tensor
"""
pass
@abstractmethod
def transposed_score_and_metadata_tensors(self, score, semi_tone):
"""
:param score: music21 score object
:param semi-tone: int, +12 to -12, semitones to transpose
:return: Transposed score shifted by the semi-tone
"""
pass
@abstractmethod
def extract_score_tensor_with_padding(self,
tensor_score,
start_tick,
end_tick):
"""
:param tensor_score: torch tensor containing the score representation
:param start_tick:
:param end_tick:
:return: tensor_score[:, start_tick: end_tick]
with padding if necessary
i.e. if start_tick < 0 or end_tick > tensor_score length
"""
pass
@abstractmethod
def extract_metadata_with_padding(self,
tensor_metadata,
start_tick,
end_tick):
"""
:param tensor_metadata: torch tensor containing metadata
:param start_tick:
:param end_tick:
:return:
"""
pass
@abstractmethod
def empty_score_tensor(self, score_length):
"""
:param score_length: int, length of the score in ticks
:return: torch long tensor, initialized with start indices
"""
pass
@abstractmethod
def random_score_tensor(self, score_length):
"""
:param score_length: int, length of the score in ticks
:return: torch long tensor, initialized with random indices
"""
pass
@abstractmethod
def tensor_to_score(self, tensor_score):
"""
:param tensor_score: torch tensor, tensor representation
of the score
:return: music21 score object
"""
pass
@property
def tensor_dataset(self):
"""
Loads or computes TensorDataset
:return: TensorDataset
"""
if self._tensor_dataset is None:
if self.tensor_dataset_is_cached():
print(f'Loading TensorDataset for {self.__repr__()}')
self._tensor_dataset = torch.load(self.tensor_dataset_filepath)
else:
print(f'Creating {self.__repr__()} TensorDataset'
f' since it is not cached')
self._tensor_dataset = self.make_tensor_dataset()
torch.save(self._tensor_dataset, self.tensor_dataset_filepath)
print(f'TensorDataset for {self.__repr__()} '
f'saved in {self.tensor_dataset_filepath}')
return self._tensor_dataset
@tensor_dataset.setter
def tensor_dataset(self, value):
self._tensor_dataset = value
def tensor_dataset_is_cached(self):
return os.path.exists(self.tensor_dataset_filepath)
@property
def tensor_dataset_filepath(self):
tensor_datasets_cache_dir = os.path.join(
self.cache_dir,
'tensor_datasets')
if not os.path.exists(tensor_datasets_cache_dir):
os.mkdir(tensor_datasets_cache_dir)
fp = os.path.join(
tensor_datasets_cache_dir,
self.__repr__()
)
return fp
@property
def filepath(self):
tensor_datasets_cache_dir = os.path.join(
self.cache_dir,
'datasets')
if not os.path.exists(tensor_datasets_cache_dir):
os.mkdir(tensor_datasets_cache_dir)
return os.path.join(
self.cache_dir,
'datasets',
self.__repr__()
)
def data_loaders(self, batch_size, split=(0.85, 0.10)):
"""
Returns three data loaders obtained by splitting
self.tensor_dataset according to split
:param batch_size:
:param split:
:return:
"""
assert sum(split) < 1
dataset = self.tensor_dataset
num_examples = len(dataset)
a, b = split
train_dataset = TensorDataset(*dataset[: int(a * num_examples)])
val_dataset = TensorDataset(*dataset[int(a * num_examples):
int((a + b) * num_examples)])
eval_dataset = TensorDataset(*dataset[int((a + b) * num_examples):])
train_dl = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=True,
drop_last=True,
)
val_dl = DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0,
pin_memory=False,
drop_last=True,
)
eval_dl = DataLoader(
eval_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0,
pin_memory=False,
drop_last=True,
)
return train_dl, val_dl, eval_dl
|
py | b4077834833ff15c146bae953d17a2cb20e09338 | """This module is made to parse osc metadata file version 2"""
from metadata_models import *
class MetadataParser:
"""MetadataParser is a class capable of parsing a Metadata File compatible with the
known MetadataItem versions"""
def __init__(self, file_path: str):
self.file_path = file_path
self._body_pointer: int = None
self._data_pointer: int = None
self._device_item: Device = None
self._metadata_version = None
self._alias_definitions: {str: MetadataItemDefinition} = {}
self._configure_headers()
def start_new_reading(self):
"""This method sets the reading file pointer to the body section of the metadata"""
self._data_pointer = self._body_pointer
def next_item_with_class(self, item_class) -> MetadataItem:
"""this method returns the next metadata item found in the current metadata file"""
with open(self.file_path) as metadata_file:
metadata_file.seek(self._data_pointer)
item = None
line = metadata_file.readline()
while line and "END" not in line:
timestamp, alias, item_data = self._timestamp_alias_data_from_row(line)
if alias == item_class.definition.alias:
definition = self._alias_definitions[alias]
item_parser = definition.parsers[0]
item = item_parser.parse(item_data, timestamp)
break
line = metadata_file.readline()
self._data_pointer = metadata_file.tell()
return item
return None
def all_photos(self):
"""this method returns all the photos from the current metadata file"""
return self._items_with_class(Photo)
def next_metadata_item(self) -> MetadataItem:
"""Device name is found in Device item"""
with open(self.file_path) as metadata_file:
metadata_file.seek(self._data_pointer)
line = metadata_file.readline()
timestamp, alias, item_data = self._timestamp_alias_data_from_row(line)
definition = self._alias_definitions[alias]
parser = definition.parsers[0]
self._data_pointer = metadata_file.tell()
return parser.parse(item_data, timestamp)
return None
def metadata_version(self) -> str:
"""According to the documentation the version is found in the first line
eg. METADATA:2.0"""
if self._metadata_version is not None:
return self._metadata_version
if "METADATA:" not in self.header_line:
return None
header_elements = self.header_line.split(":")
if len(header_elements) != 2:
return None
return header_elements[1]
def device_name(self) -> str:
"""Device name is found in Device item """
if self._device_item:
return self._device_item.device_raw_name
device_items = self._items_with_class(Device)
if device_items:
self._device_item = device_items[0]
return self._device_item.device_raw_name
def recording_type(self) -> str:
"""Recording type is found in Device item """
if self._device_item:
return self._device_item.recording_type
device_items = self._items_with_class(Device)
if device_items:
self._device_item = device_items[0]
return self._device_item.recording_type
def os_version(self) -> str:
"""OS version is found in Device item """
if self._device_item:
return self._device_item.os_version
device_items = self._items_with_class(Device)
if device_items:
self._device_item = device_items[0]
return self._device_item.os_version
# <editor-fold desc="Private">
def _configure_headers(self):
with open(self.file_path) as metadata_file:
self.header_line = metadata_file.readline()
line = metadata_file.readline()
if "HEADER" not in line:
return None
# find the definition lines
line = metadata_file.readline()
while line and "BODY" not in line:
if "ALIAS:" not in line:
return None
alias_line_elements = line.split(":")
if ";" not in alias_line_elements[1]:
return None
definition = MetadataItemDefinition.definition_from_row(line)
self._alias_definitions[definition.alias] = definition
line = metadata_file.readline()
self._body_pointer = metadata_file.tell()
self.start_new_reading()
@classmethod
def _timestamp_alias_data_from_row(cls, row) -> (str, str, str):
if ":" not in row:
return None
elements = row.split(":")
if len(elements) != 3:
return None
timestamp = elements[0]
item_alias = elements[1]
item_data = elements[2]
return timestamp, item_alias, item_data
def _items_with_class(self, item_class) -> [MetadataItem]:
alias = item_class.definition.alias
with open(self.file_path) as metadata_file:
metadata_file.seek(self._body_pointer)
item_instances = []
for line in metadata_file:
name = ":" + alias + ":"
if name in line and item_class.definition.parsers:
response = self._timestamp_alias_data_from_row(line)
timestamp = response[0]
item_data = response[2]
parser = item_class.definition.parsers[0]
item_instance = parser.parse(item_data, timestamp)
item_instances.append(item_instance)
return item_instances
# </editor-fold>
|
py | b40779f11d1d99a81b544beec44e2d4e9fe081ca | # Generated by Django 4.0.3 on 2022-03-31 05:46
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('utility', '0003_rename_contractmanagerphone_addcontractmanager_contractmanagerphoneno'),
]
operations = [
migrations.CreateModel(
name='booth_manager',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=50, unique=True)),
('phone_no', phonenumber_field.modelfields.PhoneNumberField(max_length=12, region=None, unique=True)),
('aadhaar_no', models.CharField(max_length=50, unique=True)),
('password', models.CharField(max_length=50)),
('confirm_password', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='candidate',
fields=[
('candidate_id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('phone_no', models.IntegerField()),
('email', models.EmailField(max_length=200)),
('aadhaar_no', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='candidate_constituency',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('candidate_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.candidate')),
],
),
migrations.CreateModel(
name='candidate_party',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('candidate_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.candidate')),
],
),
migrations.CreateModel(
name='constituency',
fields=[
('constituency_id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=200)),
('State', models.CharField(max_length=200)),
('total_voters', models.IntegerField()),
],
),
migrations.CreateModel(
name='constituency_type',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('constituency_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.constituency')),
],
),
migrations.CreateModel(
name='contract_manager',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=50)),
('phone_no', phonenumber_field.modelfields.PhoneNumberField(max_length=12, region=None, unique=True)),
('aadhaar_no', models.CharField(max_length=50, unique=True)),
('password', models.CharField(max_length=50)),
('confirm_password', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='election_type',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('type_of_election', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='party',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('symbol', models.ImageField(upload_to='images')),
],
),
migrations.CreateModel(
name='voter',
fields=[
('voter_id', models.IntegerField(primary_key=True, serialize=False)),
('voter_aadhaar_no', models.CharField(max_length=50, unique=True)),
('name', models.CharField(max_length=100)),
('age', models.IntegerField()),
('address', models.TextField(max_length=200)),
('email', models.EmailField(blank=True, max_length=50)),
('phone_no', phonenumber_field.modelfields.PhoneNumberField(max_length=12, region=None, unique=True)),
],
),
migrations.CreateModel(
name='voter_constituency',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('loksabha_id', models.IntegerField()),
('vidhansabha_id', models.IntegerField()),
('voter_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.voter')),
],
),
migrations.CreateModel(
name='voter_vote_status',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('casted_vote', models.BooleanField()),
('voter_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.voter')),
],
),
migrations.CreateModel(
name='votes',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('total_votes', models.IntegerField()),
('candidate_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.candidate')),
],
),
migrations.DeleteModel(
name='AddContractManager',
),
migrations.AddField(
model_name='constituency_type',
name='election_type_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.election_type'),
),
migrations.AddField(
model_name='candidate_party',
name='party_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.party'),
),
migrations.AddField(
model_name='candidate_constituency',
name='constituency_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.constituency'),
),
migrations.AddField(
model_name='candidate_constituency',
name='election_type_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.election_type'),
),
migrations.AddField(
model_name='booth_manager',
name='constituency_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='utility.constituency'),
),
]
|
py | b4077b74abde60b8c0e4199af4d2dba3d0dba585 | from flask import Blueprint, jsonify
from dashboard.db import get_db
bp = Blueprint('datalog', __name__, url_prefix='/datalog')
@bp.route("/lasthour", methods=["GET"])
def lasthour():
db = get_db()
sensor_data = {"date": [], "temp": [], "hum": [], "soil": []}
try:
with db.cursor() as cursor:
select_cmd = "SELECT `date`, AVG(`temp`) AS `temp`, AVG(`hum`) AS `hum`, AVG(`soil`) AS `soil` FROM `sensor_log` WHERE `date` > NOW() - INTERVAL 1 HOUR GROUP BY YEAR(`date`), MONTH(`date`), DAY(`date`), HOUR(`date`), MINUTE(`date`)"
cursor.execute(select_cmd)
for point in cursor:
sensor_data["date"].append(point["date"].isoformat())
sensor_data["temp"].append(round(point["temp"], 2))
sensor_data["hum"].append(round(point["hum"], 2))
sensor_data["soil"].append(round(point["soil"], 2))
except Exception as e:
return "Error: " + str(e)
return jsonify(sensor_data)
@bp.route("/hourly", methods=["GET"])
def hourly():
db = get_db()
sensor_data = {"date": [], "temp": [], "hum": [], "soil": []}
try:
with db.cursor() as cursor:
select_cmd = "SELECT `date`, AVG(`temp`) AS `temp`, AVG(`hum`) AS `hum`, AVG(`soil`) AS `soil` FROM `sensor_log` WHERE `date` > NOW() - INTERVAL 1 DAY GROUP BY YEAR(`date`), MONTH(`date`), DAY(`date`), HOUR(`date`)"
cursor.execute(select_cmd)
for point in cursor:
sensor_data["date"].append(point["date"].isoformat())
sensor_data["temp"].append(round(point["temp"], 2))
sensor_data["hum"].append(round(point["hum"], 2))
sensor_data["soil"].append(round(point["soil"], 2))
except Exception as e:
return "Error: " + str(e)
return jsonify(sensor_data)
@bp.route("/daily", methods=["GET"])
def daily():
db = get_db()
sensor_data = {"date": [], "temp": [], "hum": [], "soil": []}
try:
with db.cursor() as cursor:
select_cmd = "SELECT `date`, AVG(`temp`) AS `temp`, AVG(`hum`) AS `hum`, AVG(`soil`) AS `soil` FROM `sensor_log` WHERE `date` > NOW() - INTERVAL 1 MONTH GROUP BY YEAR(`date`), MONTH(`date`), DAY(`date`)"
cursor.execute(select_cmd)
for point in cursor:
sensor_data["date"].append(point["date"].isoformat())
sensor_data["temp"].append(round(point["temp"], 2))
sensor_data["hum"].append(round(point["hum"], 2))
sensor_data["soil"].append(round(point["soil"], 2))
except Exception as e:
return "Error: " + str(e)
return jsonify(sensor_data)
|
py | b4077bf7ac0be8477fbaea8cda771cbd0d676fbe | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from os import fstat
from .utils import *
def ReadEncodedString(f, ID, limit, lensize=2):
if lensize == 2:
# 2 bytes length + string without \0
namelen = np.fromfile(f, dtype=np.uint16, count=1)[0]
elif lensize == 4:
# 2 bytes length + string without \0
namelen = np.fromfile(f, dtype=np.uint32, count=1)[0]
else:
print("CODING ERROR: bp4dbp_data.ReadEncodedString: "
"lensize must be 2 or 4")
return False, ""
if (namelen > limit):
print("ERROR: " + ID + " string length ({0}) is longer than the "
"limit to stay inside the block ({1})".format(
namelen, limit))
return False, ""
name = f.read(namelen).decode('ascii')
return True, name
def ReadEncodedStringArray(f, ID, limit, nStrings):
s = []
for i in range(nStrings):
# 2 bytes length + string
# !!! String here INCLUDES Terminating \0 !!!
namelen = np.fromfile(f, dtype=np.uint32, count=1)[0]
if (namelen > limit - 4):
print("ERROR: " + ID + " string length ({0}) is longer than the "
"limit to stay inside the block ({1})".format(
namelen, limit - 4))
return False, s
name = f.read(namelen).decode('ascii')
limit = limit - namelen - 4
s.append(name[0:-1]) # omit the terminating \0
return True, s
def readDataToNumpyArray(f, typeName, nElements):
if typeName == 'byte':
return np.fromfile(f, dtype=np.int8, count=nElements)
elif typeName == 'short':
return np.fromfile(f, dtype=np.int16, count=nElements)
elif typeName == 'integer':
return np.fromfile(f, dtype=np.int32, count=nElements)
elif typeName == 'long':
return np.fromfile(f, dtype=np.int64, count=nElements)
elif typeName == 'unsigned_byte':
return np.fromfile(f, dtype=np.uint8, count=nElements)
elif typeName == 'unsigned_short':
return np.fromfile(f, dtype=np.uint16, count=nElements)
elif typeName == 'unsigned_integer':
return np.fromfile(f, dtype=np.uint32, count=nElements)
elif typeName == 'unsigned_long':
return np.fromfile(f, dtype=np.uint64, count=nElements)
elif typeName == 'real':
return np.fromfile(f, dtype=np.float32, count=nElements)
elif typeName == 'double':
return np.fromfile(f, dtype=np.float64, count=nElements)
elif typeName == 'long_double':
return np.fromfile(f, dtype=np.float128, count=nElements)
elif typeName == 'complex':
return np.fromfile(f, dtype=np.complex64, count=nElements)
elif typeName == 'double_complex':
return np.fromfile(f, dtype=np.complex128, count=nElements)
else:
return np.zeros(1, dtype=np.uint32)
def ReadCharacteristicsFromData(f, limit, typeID, ndim):
cStartPosition = f.tell()
dataTypeName = GetTypeName(typeID)
# 1 byte NCharacteristics
nCharacteristics = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(" # of Characteristics : {0}".format(nCharacteristics))
# 4 bytes length
charLen = np.fromfile(f, dtype=np.uint32, count=1)[0]
print(" Characteristics Length : {0}".format(charLen))
for i in range(nCharacteristics):
print(" Characteristics[{0}]".format(i))
# 1 byte TYPE
cID = np.fromfile(f, dtype=np.uint8, count=1)[0]
cName = GetCharacteristicName(cID)
print(" Type : {0} ({1}) ".format(cName, cID))
if cName == 'value' or cName == 'min' or cName == 'max':
if dataTypeName == 'string':
namelimit = limit - (f.tell() - cStartPosition)
status, s = ReadEncodedString(f, "String Value", namelimit)
if not status:
return False
print(" Value : '" + s + "'")
else:
data = readDataToNumpyArray(f, dataTypeName, 1)
print(" Value : {0}".format(data[0]))
elif cName == 'offset' or cName == 'payload_offset':
data = readDataToNumpyArray(f, 'unsigned_long', 1)
print(" Value : {0}".format(data[0]))
elif cName == 'time_index' or cName == 'file_index':
data = readDataToNumpyArray(f, 'unsigned_integer', 1)
print(" Value : {0}".format(data[0]))
elif cName == 'minmax':
nBlocks = np.fromfile(f,
dtype=np.uint16, count=1)[0]
print(" nBlocks : {0}".format(nBlocks))
bminmax = readDataToNumpyArray(f, dataTypeName, 2)
print(" Min/max : {0} / {1}".format(
bminmax[0], bminmax[1]))
if nBlocks > 1:
method = np.fromfile(f, dtype=np.uint8,
count=1)[0]
print(" Division method: {0}".format(method))
blockSize = np.fromfile(f, dtype=np.uint64,
count=1)[0]
print(" Block size : {0}".format(blockSize))
div = np.fromfile(f, dtype=np.uint16,
count=ndim)
print(" Division vector: (", end="")
for d in range(ndim):
print("{0}".format(div[d]), end="")
if d < ndim - 1:
print(", ", end="")
else:
print(")")
minmax = readDataToNumpyArray(
f, dataTypeName, 2 * nBlocks)
for i in range(nBlocks):
print(" Min/max : {0} / {1}".format(
minmax[2 * i], minmax[2 * i + 1]))
else:
print(" ERROR: could not understand this "
"characteristics type '{0}' id {1}".format(cName, cID))
return True
# Read String Variable data
def ReadStringVarData(f, expectedSize,
varsStartPosition):
# 2 bytes String Length
len = np.fromfile(f, dtype=np.uint16, count=1)[0]
if len != expectedSize - 2:
print("ERROR: Variable data block size does not equal the size "
"calculated from var block length")
print("Expected size = {0} calculated size "
"from encoded length info {1}".
format(expectedSize, len + 2))
return False
str = f.read(len).decode('ascii')
print(" Variable Data : '" + str + "'")
return True
# Read Variable data
def ReadVarData(f, nElements, typeID, ldims, varLen,
varsStartPosition, varsTotalLength):
if typeID == 9: # string type
return ReadStringVarData(f, varLen, varsStartPosition)
typeSize = GetTypeSize(typeID)
if (typeSize == 0):
print("ERROR: Cannot process variable data block with "
"unknown type size")
return False
currentPosition = f.tell()
print(" Payload offset : {0}".format(currentPosition))
if (currentPosition + varLen > varsStartPosition + varsTotalLength):
print("ERROR: Variable data block of size would reach beyond all "
"variable blocks")
print("VarsStartPosition = {0} varsTotalLength = {1}".format(
varsStartPosition, varsTotalLength))
print("current Position = {0} var block length = {1}".format(
currentPosition, varLen))
return False
nBytes = int(varLen.item())
if nElements == 1:
# single value. read and print
value = readDataToNumpyArray(f, GetTypeName(typeID),
nElements)
print(" Payload (value) : {0} ({1} bytes)".format(
value[0], nBytes))
else:
# seek instead of reading for now
# f.read(nBytes)
f.seek(nBytes, 1)
# data = readDataToNumpyArray(f, GetTypeName(typeID),
# nElements)
print(" Payload (array) : {0} bytes".format(nBytes))
return True
# Read a variable's metadata
def ReadVMD(f, varidx, varsStartPosition, varsTotalLength):
startPosition = f.tell()
print(" Var {0:5d}".format(varidx))
print(" Starting offset : {0}".format(startPosition))
# 4 bytes TAG
tag = f.read(4)
if (tag != b"[VMD"):
print(" Tag: " + str(tag))
print("ERROR: VAR group does not start with [VMD")
return False
print(" Tag : " + tag.decode('ascii'))
# 8 bytes VMD Length
vmdlen = np.fromfile(f, dtype=np.uint64, count=1)[0]
print(" Var block size : {0} bytes (+4 for Tag)".format(vmdlen))
expectedVarBlockLength = vmdlen + 4 # [VMD is not included in vmdlen
if (startPosition + expectedVarBlockLength >
varsStartPosition + varsTotalLength):
print("ERROR: There is not enough bytes inside this PG to read "
"this Var block")
print("VarsStartPosition = {0} varsTotalLength = {1}".format(
varsStartPosition, varsTotalLength))
print("current var's start position = {0} var block length = {1}".
format(startPosition, expectedVarBlockLength))
return False
# 4 bytes VAR MEMBER ID
memberID = np.fromfile(f, dtype=np.uint32, count=1)[0]
print(" Member ID : {0}".format(memberID))
# VAR NAME, 2 bytes length + string without \0
sizeLimit = expectedVarBlockLength - (f.tell() - startPosition)
status, varname = ReadEncodedString(f, "Var Name", sizeLimit)
if not status:
return False
print(" Var Name : " + varname)
# VAR PATH, 2 bytes length + string without \0
# sizeLimit = expectedVarBlockLength - (f.tell() - startPosition)
# status, varpath = ReadEncodedString(f, "Var Path", sizeLimit)
# if not status:
# return False
# print(" Var Path : " + varpath)
# 1 byte ORDER (K, C, F)
order = f.read(1)
if (order != b'K' and order != b'C' and order != b'F' and order != b'\x00'):
print(
"ERROR: Next byte for Order must be 'K', 'C', or 'F' "
"but it isn't = {0}".format(order))
return False
if (order == b'\x00'):
order = b'0'
print(" Order : " + order.decode('ascii'))
# 1 byte UNUSED
unused = f.read(1)
print(" Unused byte : {0}".format(ord(unused)))
# 1 byte TYPE
typeID = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(" Type : {0} ({1}) ".format(
GetTypeName(typeID), typeID))
# ISDIMENSIONS 1 byte, 'y' or 'n'
isDimensionVar = f.read(1)
if (isDimensionVar != b'y' and isDimensionVar != b'n'):
print(
"ERROR: Next byte for isDimensionVar must be 'y' or 'n' "
"but it isn't = {0}".format(isDimensionVar))
return False
print(" isDimensionVar : " + isDimensionVar.decode('ascii'))
# 1 byte NDIMENSIONS
ndims = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(" # of Dimensions : {0}".format(
ndims))
# DIMLENGTH
dimsLen = np.fromfile(f, dtype=np.uint16, count=1)[0]
print(" Dims Length : {0}".format(
dimsLen))
nElements = np.uint64(1)
ldims = np.zeros(ndims, dtype=np.uint64)
isLocalValueArray = False
for i in range(ndims):
print(" Dim[{0}]".format(i))
# Read Local Dimensions (1 byte flag + 8 byte value)
# Is Dimension a variable ID 1 byte, 'y' or 'n' or '\0'
isDimensionVarID = f.read(1)
if (isDimensionVarID != b'y' and isDimensionVarID != b'n' and
isDimensionVarID != b'\0'):
print(
"ERROR: Next byte for isDimensionVarID must be 'y' or 'n' "
"but it isn't = {0}".format(isDimensionVarID))
return False
if (isDimensionVarID == b'\0'):
isDimensionVarID = b'n'
ldims[i] = np.fromfile(f, dtype=np.uint64, count=1)[0]
print(" local dim : {0}".format(ldims[i]))
nElements = nElements * ldims[i]
# Read Global Dimensions (1 byte flag + 8 byte value)
# Is Dimension a variable ID 1 byte, 'y' or 'n' or '\0'
isDimensionVarID = f.read(1)
if (isDimensionVarID != b'y' and isDimensionVarID != b'n' and
isDimensionVarID != b'\0'):
print(
"ERROR: Next byte for isDimensionVarID must be 'y' or 'n' "
"but it isn't = {0}".format(isDimensionVarID))
return False
if (isDimensionVarID == b'\0'):
isDimensionVarID = b'n'
gdim = np.fromfile(f, dtype=np.uint64, count=1)[0]
if i == 0 and ldims[i] == 0 and gdim == LocalValueDim:
print(" global dim : LocalValueDim ({0})".format(gdim))
isLocalValueArray = True
else:
print(" global dim : {0}".format(gdim))
# Read Offset Dimensions (1 byte flag + 8 byte value)
# Is Dimension a variable ID 1 byte, 'y' or 'n' or '\0'
isDimensionVarID = f.read(1)
if (isDimensionVarID != b'y' and isDimensionVarID != b'n' and
isDimensionVarID != b'\0'):
print(
"ERROR: Next byte for isDimensionVarID must be 'y' or 'n' "
"but it isn't = {0}".format(isDimensionVarID))
return False
if (isDimensionVarID == b'\0'):
isDimensionVarID = b'n'
offset = np.fromfile(f, dtype=np.uint64, count=1)[0]
print(" offset dim : {0}".format(offset))
sizeLimit = expectedVarBlockLength - (f.tell() - startPosition)
status = ReadCharacteristicsFromData(f, sizeLimit, typeID, ndims)
if not status:
return False
# Padded end TAG
# 1 byte length of tag
endTagLen = np.fromfile(f, dtype=np.uint8, count=1)[0]
tag = f.read(endTagLen)
if (not tag.endswith(b"VMD]")):
print(" Tag: " + str(tag))
print("ERROR: VAR group metadata does not end with VMD]")
return False
print(" Tag (pad {0:2d}) : {1}".format(
endTagLen - 4, tag.decode('ascii')))
# special case: LocalValueDim: local values turned into 1D global array
# but it seems there is no data block at all for these variables
if isLocalValueArray:
ldims[0] = 1
nElements = np.uint64(1)
else:
expectedVarDataSize = expectedVarBlockLength - \
(f.tell() - startPosition)
status = ReadVarData(f, nElements, typeID, ldims, expectedVarDataSize,
varsStartPosition, varsTotalLength)
if not status:
return False
return True
# Read an attribute's metadata and value
def ReadAMD(f, attridx, attrsStartPosition, attrsTotalLength):
startPosition = f.tell()
print(" attr {0:5d}".format(attridx))
print(" Starting offset : {0}".format(startPosition))
# 4 bytes TAG
tag = f.read(4)
if (tag != b"[AMD"):
print(" Tag: " + str(tag))
print("ERROR: ATTR group does not start with [AMD")
return False
print(" Tag : " + tag.decode('ascii'))
# 8 bytes AMD Length
amdlen = np.fromfile(f, dtype=np.uint32, count=1)[0]
print(" Attr block size : {0} bytes (+4 for Tag)".format(amdlen))
expectedAttrBlockLength = amdlen + 4 # [AMD is not included in amdlen
if (startPosition + expectedAttrBlockLength >
attrsStartPosition + attrsTotalLength):
print("ERROR: There is not enough bytes inside this PG "
"to read this Attr block")
print("AttrsStartPosition = {0} attrsTotalLength = {1}".format(
attrsStartPosition, attrsTotalLength))
print("current attr's start position = {0} "
"attr block length = {1}".format(
startPosition, expectedAttrBlockLength))
return False
# 4 bytes ATTR MEMBER ID
memberID = np.fromfile(f, dtype=np.uint32, count=1)[0]
print(" Member ID : {0}".format(memberID))
# ATTR NAME, 2 bytes length + string without \0
sizeLimit = expectedAttrBlockLength - (f.tell() - startPosition)
status, attrname = ReadEncodedString(f, "Attr Name", sizeLimit)
if not status:
return False
print(" Attr Name : " + attrname)
# ATTR PATH, 2 bytes length + string without \0
sizeLimit = expectedAttrBlockLength - (f.tell() - startPosition)
status, attrpath = ReadEncodedString(f, "Attr Path", sizeLimit)
if not status:
return False
print(" Attr Path : " + attrpath)
# isAttrAVar 1 byte, 'y' or 'n'
isAttrAVar = f.read(1)
if (isAttrAVar != b'y' and isAttrAVar != b'n'):
print(
"ERROR: Next byte for isAttrAVar must be 'y' or 'n' "
"but it isn't = {0}".format(isAttrAVar))
return False
print(" Refers to Var? : " + isAttrAVar.decode('ascii'))
# 1 byte TYPE
typeID = np.fromfile(f, dtype=np.uint8, count=1)[0]
typeName = GetTypeName(typeID)
print(" Type : {0} ({1}) ".format(typeName, typeID))
# Read Attribute data
if typeName == 'string':
sizeLimit = expectedAttrBlockLength - (f.tell() - startPosition)
status, s = ReadEncodedString(
f, "Attribute String Value", sizeLimit, 4)
if not status:
return False
print(" Value : '" + s + "'")
elif typeName == 'string_array':
nElems = np.fromfile(f, dtype=np.uint32, count=1)[0]
sizeLimit = expectedAttrBlockLength - (f.tell() - startPosition)
status, strList = ReadEncodedStringArray(
f, "Attribute String Array", sizeLimit, nElems)
if not status:
return False
print(" Value : [", end="")
for j in range(len(strList)):
print("'" + strList[j] + "'", end="")
if j < len(strList) - 1:
print(", ", end="")
print("]")
else:
nBytes = np.fromfile(f, dtype=np.uint32, count=1)[0]
typeSize = GetTypeSize(typeID)
nElems = int(nBytes / typeSize)
data = readDataToNumpyArray(f, typeName, nElems)
print(" Value : [", end="")
for j in range(nElems):
print("{0}".format(data[j]), end="")
if j < nElems - 1:
print(", ", end="")
print("]")
# End TAG AMD]
tag = f.read(4)
if (tag != b"AMD]"):
print(" Tag: " + str(tag))
print("ERROR: PG group metadata does not end with AMD]")
return False
print(" Tag : {0}".format(tag.decode('ascii')))
return True
# Read one PG process group (variables and attributes from one process in
# one step)
def ReadPG(f, fileSize, pgidx):
pgStartPosition = f.tell()
if pgidx > 0:
print("========================================================")
print("Process Group {0}: ".format(pgidx))
print(" Starting offset : {0}".format(pgStartPosition))
tag = f.read(4)
if (tag != b"[PGI"):
print(" Tag: " + str(tag))
print("ERROR: PG group does not start with [PGI")
return False
print(" Tag : " + tag.decode('ascii'))
# 8 bytes PG Length
pglen = np.fromfile(f, dtype=np.uint64, count=1)[0]
print(" PG length : {0} bytes (+4 for Tag)".format(pglen))
# pglen does not include the opening tag 4 bytes:
expectedPGLength = pglen + 4
if (pgStartPosition + expectedPGLength > fileSize):
print("ERROR: There is not enough bytes in file to read this PG")
return False
# ColumnMajor (host language Fortran) 1 byte, 'y' or 'n'
isColumnMajor = f.read(1)
if (isColumnMajor != b'y' and isColumnMajor != b'n'):
print(
"ERROR: Next byte for isColumnMajor must be 'y' or 'n' "
"but it isn't = {0}".format(isColumnMajor))
return False
print(" isColumnMajor : " + isColumnMajor.decode('ascii'))
# PG Name, 2 bytes length + string without \0
sizeLimit = expectedPGLength - (f.tell() - pgStartPosition)
status, pgname = ReadEncodedString(f, "PG Name", sizeLimit)
if not status:
return False
print(" PG Name : " + pgname)
# 4 bytes unused (for Coordination variable)
tag = f.read(4)
print(" Unused 4 bytes : " + str(tag))
# Timestep name
sizeLimit = expectedPGLength - (f.tell() - pgStartPosition)
status, tsname = ReadEncodedString(f, "Timestep Name", sizeLimit)
if not status:
return False
print(" Step Name : " + tsname)
# STEP 4 bytes
step = np.fromfile(f, dtype=np.uint32, count=1)[0]
print(" Step Value : {0}".format(step))
# Method Count 1 byte1
nMethods = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(" Methods count : {0}".format(nMethods))
# Method Length 2 byte1
lenMethods = np.fromfile(f, dtype=np.uint16, count=1)[0]
print(" Methods length : {0}".format(lenMethods))
print(" Methods info")
for i in range(nMethods):
# Method ID
methodID = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(" Method ID : {0}".format(methodID))
sizeLimit = expectedPGLength - (f.tell() - pgStartPosition)
status, methodParams = ReadEncodedString(
f, "Method Parameters", sizeLimit)
if not status:
return False
print(' M. params : "' + methodParams + '"')
# VARIABLES
# VARS COUNT 4 bytes
nVars = np.fromfile(f, dtype=np.uint32, count=1)[0]
print(" # of Variables : {0}".format(nVars))
# VARS SIZE 8 bytes
varlen = np.fromfile(f, dtype=np.uint64, count=1)[0]
print(" Vars length : {0} bytes".format(varlen))
sizeLimit = expectedPGLength - (f.tell() - pgStartPosition)
expectedVarsLength = varlen # need to read this more
if (expectedVarsLength > sizeLimit):
print("ERROR: There is not enough bytes in PG to read the variables")
return False
varsStartPosition = f.tell()
for i in range(nVars):
# VMD block
status = ReadVMD(f, i, varsStartPosition, expectedVarsLength)
if not status:
return False
# ATTRIBUTES
# ATTRS COUNT 4 bytes
nAttrs = np.fromfile(f, dtype=np.uint32, count=1)[0]
print(" # of Attributes : {0}".format(nAttrs))
attrsStartPosition = f.tell()
# ATTS SIZE 8 bytes
# attlen includes the 8 bytes of itself, so remember position before this
attlen = np.fromfile(f, dtype=np.uint64, count=1)[0]
print(" Attrs length : {0} bytes".format(attlen))
sizeLimit = expectedPGLength - (attrsStartPosition - pgStartPosition) - 4
expectedAttrsLength = attlen # need to read this more before reaching PGI]
if (expectedAttrsLength > sizeLimit):
print("ERROR: There is not enough bytes in PG to read the attributes")
return False
attrsStartPosition = f.tell()
for i in range(nAttrs):
# AMD block
status = ReadAMD(f, i, attrsStartPosition, expectedAttrsLength)
if not status:
return False
# End TAG PGI]
tag = f.read(4)
if (tag != b"PGI]"):
print(" Tag: " + str(tag))
print("ERROR: PG group metadata does not end with PGI]")
return False
print(" Tag : {0}".format(tag.decode('ascii')))
return True
def DumpData(fileName):
print("========================================================")
print(" Data File: " + fileName)
print("========================================================")
with open(fileName, "rb") as f:
fileSize = fstat(f.fileno()).st_size
status = ReadHeader(f, fileSize, "Data")
if not status:
return status
pgidx = 0
while (f.tell() < fileSize - 12 and status):
status = ReadPG(f, fileSize, pgidx)
pgidx = pgidx + 1
return status
if __name__ == "__main__":
print("ERROR: Utility main program is bp4dbg.py")
|
py | b4077c7c59b1a9076ae0626fed8a4f96971ecfdc | """Top-level logic for the new semantic analyzer.
The semantic analyzer binds names, resolves imports, detects various
special constructs that don't have dedicated AST nodes after parse
(such as 'cast' which looks like a call), and performs various simple
consistency checks.
Semantic analysis of each SCC (strongly connected component; import
cycle) is performed in one unit. Each module is analyzed as multiple
separate *targets*; the module top level is one target and each function
is a target. Nested functions are not separate targets, however. This is
mostly identical to targets used by mypy daemon (but classes aren't
targets in semantic analysis).
We first analyze each module top level in an SCC. If we encounter some
names that we can't bind because the target of the name may not have
been processed yet, we *defer* the current target for further
processing. Deferred targets will be analyzed additional times until
everything can be bound, or we reach a maximum number of iterations.
We keep track of a set of incomplete namespaces, i.e. namespaces that we
haven't finished populating yet. References to these namespaces cause a
deferral if they can't be satisfied. Initially every module in the SCC
will be incomplete.
"""
import contextlib
from typing import List, Tuple, Optional, Union, Callable, Iterator
from typing_extensions import TYPE_CHECKING
from mypy.nodes import (
MypyFile, TypeInfo, FuncDef, Decorator, OverloadedFuncDef, Var
)
from mypy.newsemanal.semanal_typeargs import TypeArgumentAnalyzer
from mypy.state import strict_optional_set
from mypy.newsemanal.semanal import (
NewSemanticAnalyzer, apply_semantic_analyzer_patches, remove_imported_names_from_symtable
)
from mypy.newsemanal.semanal_classprop import (
calculate_class_abstract_status, calculate_class_vars, check_protocol_status,
add_type_promotion
)
from mypy.errors import Errors
from mypy.newsemanal.semanal_infer import infer_decorator_signature_if_simple
from mypy.checker import FineGrainedDeferredNode
from mypy.server.aststripnew import SavedAttributes
import mypy.build
if TYPE_CHECKING:
from mypy.build import Graph, State
Patches = List[Tuple[int, Callable[[], None]]]
# If we perform this many iterations, raise an exception since we are likely stuck.
MAX_ITERATIONS = 20
# Number of passes over core modules before going on to the rest of the builtin SCC.
CORE_WARMUP = 2
core_modules = ['typing', 'builtins', 'abc', 'collections']
def semantic_analysis_for_scc(graph: 'Graph', scc: List[str], errors: Errors) -> None:
"""Perform semantic analysis for all modules in a SCC (import cycle).
Assume that reachability analysis has already been performed.
The scc will be processed roughly in the order the modules are included
in the list.
"""
patches = [] # type: Patches
# Note that functions can't define new module-level attributes
# using 'global x', since module top levels are fully processed
# before functions. This limitation is unlikely to go away soon.
process_top_levels(graph, scc, patches)
process_functions(graph, scc, patches)
# We use patch callbacks to fix up things when we expect relatively few
# callbacks to be required.
apply_semantic_analyzer_patches(patches)
# This pass might need fallbacks calculated above.
check_type_arguments(graph, scc, errors)
calculate_class_properties(graph, scc, errors)
check_blockers(graph, scc)
# Clean-up builtins, so that TypeVar etc. are not accessible without importing.
if 'builtins' in scc:
cleanup_builtin_scc(graph['builtins'])
def cleanup_builtin_scc(state: 'State') -> None:
"""Remove imported names from builtins namespace.
This way names imported from typing in builtins.pyi aren't available
by default (without importing them). We can only do this after processing
the whole SCC is finished, when the imported names aren't needed for
processing builtins.pyi itself.
"""
assert state.tree is not None
remove_imported_names_from_symtable(state.tree.names, 'builtins')
def semantic_analysis_for_targets(
state: 'State',
nodes: List[FineGrainedDeferredNode],
graph: 'Graph',
saved_attrs: SavedAttributes) -> None:
"""Semantically analyze only selected nodes in a given module.
This essentially mirrors the logic of semantic_analysis_for_scc()
except that we process only some targets. This is used in fine grained
incremental mode, when propagating an update.
The saved_attrs are implicitly declared instance attributes (attributes
defined on self) removed by AST stripper that may need to be reintroduced
here. They must be added before any methods are analyzed.
"""
patches = [] # type: Patches
if any(isinstance(n.node, MypyFile) for n in nodes):
# Process module top level first (if needed).
process_top_levels(graph, [state.id], patches)
restore_saved_attrs(saved_attrs)
analyzer = state.manager.new_semantic_analyzer
for n in nodes:
if isinstance(n.node, MypyFile):
# Already done above.
continue
process_top_level_function(analyzer, state, state.id,
n.node.fullname(), n.node, n.active_typeinfo, patches)
apply_semantic_analyzer_patches(patches)
check_type_arguments_in_targets(nodes, state, state.manager.errors)
calculate_class_properties(graph, [state.id], state.manager.errors)
def restore_saved_attrs(saved_attrs: SavedAttributes) -> None:
"""Restore instance variables removed during AST strip that haven't been added yet."""
for (cdef, name), sym in saved_attrs.items():
info = cdef.info
existing = info.get(name)
defined_in_this_class = name in info.names
assert isinstance(sym.node, Var)
# This needs to mimic the logic in SemanticAnalyzer.analyze_member_lvalue()
# regarding the existing variable in class body or in a superclass:
# If the attribute of self is not defined in superclasses, create a new Var.
if (existing is None or
# (An abstract Var is considered as not defined.)
(isinstance(existing.node, Var) and existing.node.is_abstract_var) or
# Also an explicit declaration on self creates a new Var unless
# there is already one defined in the class body.
sym.node.explicit_self_type and not defined_in_this_class):
info.names[name] = sym
def process_top_levels(graph: 'Graph', scc: List[str], patches: Patches) -> None:
# Process top levels until everything has been bound.
# Reverse order of the scc so the first modules in the original list will be
# be processed first. This helps with performance.
scc = list(reversed(scc))
# Initialize ASTs and symbol tables.
for id in scc:
state = graph[id]
assert state.tree is not None
state.manager.new_semantic_analyzer.prepare_file(state.tree)
# Initially all namespaces in the SCC are incomplete (well they are empty).
state.manager.incomplete_namespaces.update(scc)
worklist = scc[:]
# HACK: process core stuff first. This is mostly needed to support defining
# named tuples in builtin SCC.
if all(m in worklist for m in core_modules):
worklist += list(reversed(core_modules)) * CORE_WARMUP
final_iteration = False
iteration = 0
analyzer = state.manager.new_semantic_analyzer
analyzer.deferral_debug_context.clear()
while worklist:
iteration += 1
if iteration > MAX_ITERATIONS:
# Just pick some module inside the current SCC for error context.
assert state.tree is not None
with analyzer.file_context(state.tree, state.options):
analyzer.report_hang()
break
if final_iteration:
# Give up. It's impossible to bind all names.
state.manager.incomplete_namespaces.clear()
all_deferred = [] # type: List[str]
any_progress = False
while worklist:
next_id = worklist.pop()
state = graph[next_id]
assert state.tree is not None
deferred, incomplete, progress = semantic_analyze_target(next_id, state,
state.tree,
None,
final_iteration,
patches)
all_deferred += deferred
any_progress = any_progress or progress
if not incomplete:
state.manager.incomplete_namespaces.discard(next_id)
if final_iteration:
assert not all_deferred, 'Must not defer during final iteration'
# Reverse to process the targets in the same order on every iteration. This avoids
# processing the same target twice in a row, which is inefficient.
worklist = list(reversed(all_deferred))
final_iteration = not any_progress
def process_functions(graph: 'Graph', scc: List[str], patches: Patches) -> None:
# Process functions.
for module in scc:
tree = graph[module].tree
assert tree is not None
analyzer = graph[module].manager.new_semantic_analyzer
# In principle, functions can be processed in arbitrary order,
# but _methods_ must be processed in the order they are defined,
# because some features (most notably partial types) depend on
# order of definitions on self.
#
# There can be multiple generated methods per line. Use target
# name as the second sort key to get a repeatable sort order on
# Python 3.5, which doesn't preserve dictionary order.
targets = sorted(get_all_leaf_targets(tree), key=lambda x: (x[1].line, x[0]))
for target, node, active_type in targets:
assert isinstance(node, (FuncDef, OverloadedFuncDef, Decorator))
process_top_level_function(analyzer,
graph[module],
module,
target,
node,
active_type,
patches)
def process_top_level_function(analyzer: 'NewSemanticAnalyzer',
state: 'State',
module: str,
target: str,
node: Union[FuncDef, OverloadedFuncDef, Decorator],
active_type: Optional[TypeInfo],
patches: Patches) -> None:
"""Analyze single top-level function or method.
Process the body of the function (including nested functions) again and again,
until all names have been resolved (ot iteration limit reached).
"""
# We need one more iteration after incomplete is False (e.g. to report errors, if any).
final_iteration = False
incomplete = True
# Start in the incomplete state (no missing names will be reported on first pass).
# Note that we use module name, since functions don't create qualified names.
deferred = [module]
analyzer.deferral_debug_context.clear()
analyzer.incomplete_namespaces.add(module)
iteration = 0
while deferred:
iteration += 1
if iteration == MAX_ITERATIONS:
# Just pick some module inside the current SCC for error context.
assert state.tree is not None
with analyzer.file_context(state.tree, state.options):
analyzer.report_hang()
break
if not (deferred or incomplete) or final_iteration:
# OK, this is one last pass, now missing names will be reported.
analyzer.incomplete_namespaces.discard(module)
deferred, incomplete, progress = semantic_analyze_target(target, state, node, active_type,
final_iteration, patches)
if final_iteration:
assert not deferred, 'Must not defer during final iteration'
if not progress:
final_iteration = True
analyzer.incomplete_namespaces.discard(module)
# After semantic analysis is done, discard local namespaces
# to avoid memory hoarding.
analyzer.saved_locals.clear()
TargetInfo = Tuple[str, Union[MypyFile, FuncDef, OverloadedFuncDef, Decorator], Optional[TypeInfo]]
def get_all_leaf_targets(file: MypyFile) -> List[TargetInfo]:
"""Return all leaf targets in a symbol table (module-level and methods)."""
result = [] # type: List[TargetInfo]
for fullname, node, active_type in file.local_definitions():
if isinstance(node.node, (FuncDef, OverloadedFuncDef, Decorator)):
result.append((fullname, node.node, active_type))
return result
def semantic_analyze_target(target: str,
state: 'State',
node: Union[MypyFile, FuncDef, OverloadedFuncDef, Decorator],
active_type: Optional[TypeInfo],
final_iteration: bool,
patches: Patches) -> Tuple[List[str], bool, bool]:
"""Semantically analyze a single target.
Return tuple with these items:
- list of deferred targets
- was some definition incomplete
- were any new names were defined (or placeholders replaced)
"""
state.manager.processed_targets.append(target)
tree = state.tree
assert tree is not None
analyzer = state.manager.new_semantic_analyzer
# TODO: Move initialization to somewhere else
analyzer.global_decls = [set()]
analyzer.nonlocal_decls = [set()]
analyzer.globals = tree.names
analyzer.progress = False
with state.wrap_context(check_blockers=False):
refresh_node = node
if isinstance(refresh_node, Decorator):
# Decorator expressions will be processed as part of the module top level.
refresh_node = refresh_node.func
analyzer.refresh_partial(refresh_node,
patches,
final_iteration,
file_node=tree,
options=state.options,
active_type=active_type)
if isinstance(node, Decorator):
infer_decorator_signature_if_simple(node, analyzer)
for dep in analyzer.imports:
state.dependencies.append(dep)
priority = mypy.build.PRI_LOW
if priority <= state.priorities.get(dep, priority):
state.priorities[dep] = priority
if analyzer.deferred:
return [target], analyzer.incomplete, analyzer.progress
else:
return [], analyzer.incomplete, analyzer.progress
def check_type_arguments(graph: 'Graph', scc: List[str], errors: Errors) -> None:
for module in scc:
state = graph[module]
assert state.tree
analyzer = TypeArgumentAnalyzer(errors,
state.options,
errors.is_typeshed_file(state.path or ''))
with state.wrap_context():
with strict_optional_set(state.options.strict_optional):
state.tree.accept(analyzer)
def check_type_arguments_in_targets(targets: List[FineGrainedDeferredNode], state: 'State',
errors: Errors) -> None:
"""Check type arguments against type variable bounds and restrictions.
This mirrors the logic in check_type_arguments() except that we process only
some targets. This is used in fine grained incremental mode.
"""
analyzer = TypeArgumentAnalyzer(errors,
state.options,
errors.is_typeshed_file(state.path or ''))
with state.wrap_context():
with strict_optional_set(state.options.strict_optional):
for target in targets:
func = None # type: Optional[Union[FuncDef, OverloadedFuncDef]]
if isinstance(target.node, (FuncDef, OverloadedFuncDef)):
func = target.node
saved = (state.id, target.active_typeinfo, func) # module, class, function
with errors.scope.saved_scope(saved) if errors.scope else nothing():
analyzer.recurse_into_functions = func is not None
target.node.accept(analyzer)
def calculate_class_properties(graph: 'Graph', scc: List[str], errors: Errors) -> None:
for module in scc:
tree = graph[module].tree
assert tree
for _, node, _ in tree.local_definitions():
if isinstance(node.node, TypeInfo):
saved = (module, node.node, None) # module, class, function
with errors.scope.saved_scope(saved) if errors.scope else nothing():
calculate_class_abstract_status(node.node, tree.is_stub, errors)
check_protocol_status(node.node, errors)
calculate_class_vars(node.node)
add_type_promotion(node.node, tree.names, graph[module].options)
def check_blockers(graph: 'Graph', scc: List[str]) -> None:
for module in scc:
graph[module].check_blockers()
@contextlib.contextmanager
def nothing() -> Iterator[None]:
yield
|
py | b4077ca5c414815401bc12c7a3292adf4057c13e | def test_construction(viewservice_config):
assert 99 == viewservice_config.flag
|
py | b4077caeb47f9f33c55b229dff473ba03972699f | import pandas as pd
import numpy as np
import datetime
import time
import logging
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
import argparse
import json
from utils import load_datasets, load_target
from logs.logger import log_best
from models.lgbm import train_and_predict
timesteps = 7
startDay = 0
TrTestWin = 56
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/default.json')
options = parser.parse_args()
config = json.load(open(options.config))
now = datetime.datetime.now()
logging.basicConfig(
filename='./logs/log_{0:%Y%m%d%H%M%S}.log'.format(now), level=logging.DEBUG
)
logging.debug('./logs/log_{0:%Y%m%d%H%M%S}.log'.format(now))
feats = config['features']
logging.debug(feats)
# target_name = config['target_name']
feats_train, feats_test = load_datasets(feats)
y_train_all = load_target()
lr_Train = pd.concat([y_train_all, feats_train], axis = 1)
lr_Train.head()
sc = MinMaxScaler(feature_range = (0, 1))
lr_Train_scaled = sc.fit_transform(lr_Train)
X_Train = []
y_Train = []
for i in range(timesteps, 1913 - startDay):
X_Train.append(lr_Train_scaled[i-timesteps:i]) #i = 14の場合、[0:14], i = 15の場合、[1:15]
y_Train.append(lr_Train_scaled[i][0:30490]) #i = 14の場合、[14][0:30490], i = 15の場合、[15][0:30490]
X_Train = np.array(X_Train)
y_Train = np.array(y_Train)
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
layer_1_units=40
regressor.add(LSTM(units = layer_1_units, return_sequences = True, input_shape = (X_Train.shape[1], X_Train.shape[2])))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
layer_2_units=300
regressor.add(LSTM(units = layer_2_units, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
layer_3_units=300
regressor.add(LSTM(units = layer_3_units))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 30490))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
# epoch_no=32
epoch_no=3
batch_size_RNN=44
regressor.fit(X_Train, y_Train, epochs = epoch_no, batch_size = batch_size_RNN)
inputs = lr_Train[-timesteps:]
inputs = sc.transform(inputs)
X_Test = []
X_Test.append(inputs[0:timesteps])
X_Test = np.array(X_Test)
predictions = []
for j in range(timesteps,timesteps + 28): # range(14,42)
#X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_Test[0,j - timesteps:j].reshape(1, timesteps, X_Train.shape[2]))
# .reshape(1, timesteps, ~)) ~はX_trainの行と一致させること。特徴量増やしたら変更必要。
testInput = np.column_stack((np.array(predicted_stock_price), np.array(feats_test)[j - timesteps].reshape(1, X_Train.shape[2] - 30490))) #特徴量変更後注意
# j = 14の場合、..(X_test[0,0:14].reshape(1, 14, 30494)) j = 15の場合、..(X_test[0,1:15].reshape(1, 14, 30494))
# testInput = np.column_stack((np.array(testInput), pd.get_dummies(DateFlagTest[category_col].astype("category"), drop_first = True)[1913 + j - timesteps]))
X_Test = np.append(X_Test, testInput).reshape(1,j + 1,X_Train.shape[2])
# j = 14の場合、..reshape(1, 15, 30538)) j = 15の場合、..reshape(1, 16, 30538))
predicted_stock_price = sc.inverse_transform(testInput)[:,0:30490] # 正規化していたのを戻している
predictions.append(predicted_stock_price)
submission = pd.DataFrame(data=np.array(predictions).reshape(28,30490))
submission = submission.T
submission = pd.concat((submission, submission), ignore_index=True)
sample_submission = pd.read_csv("data/input/sample_submission.csv")
idColumn = sample_submission[["id"]]
submission[["id"]] = idColumn
cols = list(submission.columns)
cols = cols[-1:] + cols[:-1]
submission = submission[cols]
colsdeneme = ["id"] + [f"F{i}" for i in range (1,29)]
submission.columns = colsdeneme
currentDateTime = time.strftime("%d%m%Y_%H%M%S")
submission.to_csv("./data/output/submission.csv", index=False) |
py | b4077d3e7b5653eaf18d2b8555abe72f10b3fb16 | from bs4 import BeautifulSoup
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import requests
from tqdm.notebook import tqdm
# download and clean data
# ChIP-seq
## download
def _mkdir(path):
try:
os.mkdir(path)
except:
pass
def chip_seq(task):
'''download ChIP-seq data'''
if task not in {'motif_discovery', 'motif_occupancy'}:
raise ValueError(f'task can only be in {tasks}, got \'{task}\'')
r = requests.get(f'http://cnn.csail.mit.edu/{task}/')
soup = BeautifulSoup(r.text)
trs = soup.find('table').find_all('tr')[3:-1]
folders = [tr.a.text for tr in trs]
mkdir(task)
for folder in tqdm(folders):
_mkdir(os.path.join(task, folder))
for data in ['train.data', 'test.data']:
r = requests.get(f'http://cnn.csail.mit.edu/{task}/{folder}/{data}')
with open(os.path.join(task, folder, data), 'w') as f:
f.write(r.text)
## transform
def load_chip_seq_as_df(path, file):
'''load the downloaded text files into a single DataFrame'''
dfs = []
for folder in tqdm(os.listdir(path)):
try:
df = pd.read_csv(os.path.join(path, folder, file), sep=' ', header=None)
dfs.append(df)
except:
print(f'Skip {folder}')
continue
result = pd.concat(dfs)
result.sort_index(inplace=True)
return result
def df_to_fasta(df, file, data_dir):
'''dump the DataFrame as a fasta file, skip sequenecs that have N'''
gen = (
SeqRecord(Seq(record[1]), id='', name='', description=str(record[2]))
for idx, record in df.iterrows()
if not 'N' in record[1]
)
with open(data_dir + file, 'w') as f:
SeqIO.write(tqdm(gen), f, 'fasta')
# histone
## download
def histone():
'''download histone data'''
links = [1, 2, 4, 5, 8, 10, 11, 12, 13, 14]
files = ['H3', 'H4', 'H3K9ac', 'H3K14ac', 'H4ac',
'H3K4me1', 'H3K4me2', 'H3K4me3', 'H3K36me3', 'H3K79me3']
files = [file + '.fasta' for file in files]
for link, file in zip(links, files):
r = requests.get(f'http://www.jaist.ac.jp/~tran/nucleosome/ten_dataset/dataset{link:02}.txt')
with open(file, 'w') as f:
f.write(r.text)
## transform
def clean(record):
'''separate the sequence and label'''
seq = record.seq._data
return SeqRecord(Seq(seq[:-1]), id='', name='', description=str(seq[-1]))
def clean_histone(files, files_):
'''clean the histone fasta file'''
for i in tqdm(range(len(files))):
with open(files[i], 'r') as f:
records = [clean(record) for record in SeqIO.parse(f, 'fasta')
if len(record) == 501]
with open(files_[i], 'w') as f:
SeqIO.write(records, f, 'fasta') |
py | b407810b78d7b8f62a7b12729ca3098f841ef366 | from opts.base_opts import Opts
class VizOpts(Opts):
def __init__(self):
super().__init__()
def init(self):
super().init()
self.parser.add_argument('-vizIgnoreMask', dest='vizIgnoreMask', action='store_true', help='Visualize Ignore Mask')
self.parser.add_argument('-vizHeatMap', dest='vizHeatMap', action='store_true', help='Visualize Heatmap')
self.parser.add_argument('-vizPaf', dest='vizPaf', action='store_true', help='Visualize PAF')
|
py | b407813cc77065fb35174a589b9479a066131c54 | '''
Solution to https://adventofcode.com/2019/day/1
Run with full or relative path to input file. Eg/
$ python fuel.py /tmp/input.txt
Input file should contain contents of https://adventofcode.com/2019/day/1/input
'''
import sys
def clean_input(mass):
mass = mass.replace('\n', '')
try:
return int(mass)
except ValueError:
return 0
def compute_fuel_requirement(mass):
return max([mass / 3 - 2, 0])
def main():
with open(sys.argv[1]) as f:
masses = map(clean_input, f.readlines())
fuel_requirements = map(compute_fuel_requirement, masses)
return sum(fuel_requirements)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Expected one argument: path to input file')
sys.exit(1)
print(main())
|
py | b40781b348e323fd09f5fdb659564305590e9c09 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class OnsMqttQueryClientByGroupIdRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ons', '2019-02-14', 'OnsMqttQueryClientByGroupId','ons')
def get_PreventCache(self):
return self.get_query_params().get('PreventCache')
def set_PreventCache(self,PreventCache):
self.add_query_param('PreventCache',PreventCache)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId) |
py | b407821c0d3b03335547cf696a820f3fb1dc5324 | # Create your tasks here
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from policyengine.models import Proposal, LogAPICall, Proposal, BooleanVote
from integrations.reddit.models import RedditCommunity, RedditUser, RedditMakePost
import datetime
import logging
import json
logger = logging.getLogger(__name__)
def is_policykit_action(community, name, call_type, test_a, test_b):
vote_post_id = Proposal.objects.filter(vote_post_id=name, action__community=community)
if vote_post_id.exists():
logger.info('approve PolicyKit post')
community.make_call('api/approve', {'id': name})
return True
else:
current_time_minus = datetime.datetime.now() - datetime.timedelta(minutes=2)
logs = LogAPICall.objects.filter(proposal_time__gte=current_time_minus,
call_type=call_type)
if logs.exists():
logger.info("checking API logging")
for log in logs:
j_info = json.loads(log.extra_info)
if test_a == j_info[test_b]:
logger.info("checking API logging FOUND")
return True
return False
@shared_task
def reddit_listener_actions():
for community in RedditCommunity.objects.all():
actions = []
res = community.make_call('r/policykit/about/unmoderated')
call_type = 'api/submit'
for item in res['data']['children']:
data = item['data']
if not is_policykit_action(community, data['name'], call_type, data['title'], 'title'):
post_exists = RedditMakePost.objects.filter(name=data['name'])
if not post_exists.exists():
logger.info('make new action')
new_api_action = RedditMakePost()
new_api_action.community = community
new_api_action.text = data['selftext']
new_api_action.title = data['title']
new_api_action.name = data['name']
u,_ = RedditUser.objects.get_or_create(username=data['author'],
community=community)
new_api_action.initiator = u
actions.append(new_api_action)
for action in actions:
action.community_origin = True
action.save() # save triggers policy proposal
# Manage proposals
pending_proposals = Proposal.objects.filter(
status=Proposal.PROPOSED,
action__community=community,
vote_post_id__isnull=False
)
for proposal in pending_proposals:
id = proposal.vote_post_id.split('_')[1]
call = 'r/policykit/comments/' + id + '.json'
res = community.make_call(call)
replies = res[1]['data']['children']
for reply in replies:
data = reply['data']
text = data['body']
logger.info(text)
val = None
if '\\-1' in text:
val = False
elif '\\+1' in text:
val = True
if val != None:
username = data['author']
u = RedditUser.objects.filter(username=username,
community=community)
if u.exists():
u = u[0]
bool_vote = BooleanVote.objects.filter(proposal=proposal,
user=u)
if bool_vote.exists():
vote = bool_vote[0]
if vote.boolean_value != val:
vote.boolean_value = val
vote.save()
else:
b = BooleanVote.objects.create(proposal=proposal,
user=u,
boolean_value=val)
logger.info('created vote')
|
py | b40782e8185e0471bac7fd90a49715d5c1080d1d | import awkward as ak
from coffea import hist, processor
from coffea import nanoevents
class NanoEventsProcessor(processor.ProcessorABC):
def __init__(self, columns=[], canaries=[]):
self._columns = columns
self._canaries = canaries
dataset_axis = hist.Cat("dataset", "Primary dataset")
mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300)
pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 30000, 0.25, 300)
self.expected_usermeta = {
"ZJets": ("someusermeta", "hello"),
"Data": ("someusermeta2", "world"),
}
self._accumulator = processor.dict_accumulator(
{
"mass": hist.Hist("Counts", dataset_axis, mass_axis),
"pt": hist.Hist("Counts", dataset_axis, pt_axis),
"cutflow": processor.defaultdict_accumulator(int),
"worker": processor.set_accumulator(),
}
)
@property
def columns(self):
return self._columns
@property
def accumulator(self):
return self._accumulator
def process(self, events):
output = self.accumulator.identity()
dataset = events.metadata["dataset"]
print(events.metadata)
if "checkusermeta" in events.metadata:
metaname, metavalue = self.expected_usermeta[dataset]
assert metavalue == events.metadata[metaname]
mapping = events.behavior["__events_factory__"]._mapping
muon_pt = events.Muon.pt
if isinstance(mapping, nanoevents.mapping.CachedMapping):
keys_in_cache = list(mapping.cache.cache.keys())
has_canaries = [canary in keys_in_cache for canary in self._canaries]
if has_canaries:
try:
from distributed import get_worker
worker = get_worker()
output["worker"].add(worker.name)
except ValueError:
pass
dimuon = ak.combinations(events.Muon, 2)
dimuon = dimuon["0"] + dimuon["1"]
output["pt"].fill(dataset=dataset, pt=ak.flatten(muon_pt))
output["mass"].fill(dataset=dataset, mass=ak.flatten(dimuon.mass))
output["cutflow"]["%s_pt" % dataset] += sum(ak.num(events.Muon))
output["cutflow"]["%s_mass" % dataset] += sum(ak.num(dimuon))
return output
def postprocess(self, accumulator):
return accumulator
|
py | b40782ecba742adacec0612c7524dcd5996770d5 | from .cyclejoin import DeBruijnPoly, DeBruijnZech
from .fsr import FeedbackShiftRegister
from . import greedy
from . import helpers
__all__ = ['DeBruijnPoly', 'DeBruijnZech', 'FeedbackShiftRegister', 'greedy', 'helpers']
# TODO: write docstrings
# TODO: (not urgent) re-read paper and figure out a cleaner implementation?
|
py | b40784c19683911555c694534934dba3dec04253 | from eth_keys.datatypes import PrivateKey
from eth_typing import Address
from eth._utils.transactions import (
create_transaction_signature,
)
from eth.vm.forks.berlin.transactions import (
BerlinLegacyTransaction,
BerlinUnsignedLegacyTransaction,
BerlinTransactionBuilder,
)
class BusanLegacyTransaction(BerlinLegacyTransaction):
pass
class BusanUnsignedLegacyTransaction(BerlinUnsignedLegacyTransaction):
def as_signed_transaction(self,
private_key: PrivateKey,
chain_id: int = None) -> BerlinLegacyTransaction:
v, r, s = create_transaction_signature(self, private_key, chain_id=chain_id)
return BerlinLegacyTransaction(
nonce=self.nonce,
gas_price=self.gas_price,
gas=self.gas,
to=self.to,
value=self.value,
data=self.data,
v=v,
r=r,
s=s,
)
class BusanTransactionBuilder(BerlinTransactionBuilder):
# Override
legacy_signed = BusanLegacyTransaction
legacy_unsigned = BusanUnsignedLegacyTransaction |
py | b4078629de3864c42e6474d728617ffc3cebc4d9 | import logging
import time
import os
import sys
import pytest
from common.utilities import wait_until
from common.config_reload import config_reload
DUT_THERMAL_POLICY_FILE = '/usr/share/sonic/device/{}/thermal_policy.json'
DUT_THERMAL_POLICY_BACKUP_FILE = '/usr/share/sonic/device/{}/thermal_policy.json.bak'
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
FILES_DIR = os.path.join(BASE_DIR, 'files')
class BaseMocker:
"""
@summary: Base class for thermal control data mocker
This base class defines the basic interface to be provided by base mocker. Mockers implemented by each
vendor must be a subclass of this base class.
"""
# Mocker type dictionary. Vendor must register their concrete mocker class to this dictionary.
_mocker_type_dict = {}
def __init__(self, dut):
"""
Constructor of a mocker.
:param dut: DUT object representing a SONiC switch under test.
"""
self.dut = dut
def mock_data(self):
"""
Generate mock data.
:return:
"""
pass
def check_result(self, actual_data):
"""
Check actual data with mocked data.
:param actual_data: A dictionary contains actual command line data. Key of the dictionary is the unique id
of a line of command line data. For 'show platform fan', the key is FAN name. Value
of the dictionary is a list of field values for a line.
:return: True if actual data match mocked data else False
"""
pass
def deinit(self):
"""
Destructor. Vendor specific clean up work should do here.
:return:
"""
pass
@classmethod
def register_mocker_type(cls, name, mocker_type):
"""
Register mocker type with its name.
:param name: Name of a mocker type. For example: FanStatusMocker.
:param mocker_type: Class of a mocker.
:return:
"""
cls._mocker_type_dict[name] = mocker_type
@classmethod
def get_mocker_type(cls, name):
"""
Get mocker type by its name.
:param name: Name of a mocker type. For example: FanStatusMocker.
:return: Class of a mocker.
"""
return cls._mocker_type_dict[name] if name in cls._mocker_type_dict else None
def mocker(type_name):
"""
Decorator for register mocker type.
:param type_name: Name of a mocker type.
:return:
"""
def wrapper(object_type):
BaseMocker.register_mocker_type(type_name, object_type)
return object_type
return wrapper
@pytest.fixture
def mocker_factory():
"""
Fixture for thermal control data mocker factory.
:return: A function for creating thermal control related data mocker.
"""
mockers = []
def _create_mocker(dut, mocker_name):
"""
Create vendor specified mocker object by mocker name.
:param dut: DUT object representing a SONiC switch under test.
:param mocker_name: Name of a mocker type.
:return: Created mocker instance.
"""
platform = dut.facts['platform']
mocker_object = None
if 'mlnx' in platform:
current_file_dir = os.path.dirname(os.path.realpath(__file__))
if current_file_dir not in sys.path:
sys.path.append(current_file_dir)
sub_folder_dir = os.path.join(current_file_dir, "mellanox")
if sub_folder_dir not in sys.path:
sys.path.append(sub_folder_dir)
import mellanox_thermal_control_test_helper
mocker_type = BaseMocker.get_mocker_type(mocker_name)
if mocker_type:
mocker_object = mocker_type(dut)
mockers.append(mocker_object)
else:
pytest.skip("No mocker defined for this platform %s")
return mocker_object
yield _create_mocker
for m in mockers:
m.deinit()
class FanStatusMocker(BaseMocker):
"""
Fan status mocker. Vendor should implement this class to provide a FAN mocker.
This class could mock speed, presence/absence and so on for all FANs and check
the actual data equal to the mocked data.
"""
def check_all_fan_speed(self, expected_speed):
"""
Check all fan speed with a given expect value.
:param expected_speed: Expect FAN speed percentage.
:return: True if match else False.
"""
pass
class SingleFanMocker(BaseMocker):
"""
Single FAN mocker. Vendor should implement this class to provide a FAN mocker.
This class could mock speed, presence/absence for one FAN, check LED color and
other information.
"""
def is_fan_removable(self):
"""
:return: True if FAN is removable else False
"""
pass
def mock_normal(self):
"""
Change the mocked FAN status to 'Present' and normal speed.
:return:
"""
pass
def mock_absence(self):
"""
Change the mocked FAN status to 'Not Present'.
:return:
"""
pass
def mock_presence(self):
"""
Change the mocked FAN status to 'Present'
:return:
"""
pass
def mock_status(self, status):
"""
Change the mocked FAN status to good or bad
:param status: bool value indicate the target status of the FAN.
:return:
"""
pass
def mock_normal_speed(self):
"""
Change the mocked FAN speed to a normal value.
:return:
"""
pass
def mock_under_speed(self):
"""
Change the mocked FAN speed to slower than target speed and exceed speed tolerance.
:return:
"""
pass
def mock_over_speed(self):
"""
Change the mocked FAN speed to faster than target speed and exceed speed tolerance.
:return:
"""
pass
class ThermalStatusMocker(BaseMocker):
"""
Thermal status mocker. Vendor should implement this class to provide a Thermal data mocker.
This class could mock temperature, high threshold, high critical threshold and so on for all
FANs and check the actual data equal to the mocked data.
"""
def check_thermal_algorithm_status(self, expected_status):
"""
Check thermal control algorithm status equal to the given value.
:param expected_status: Expected thermal control status. True means enable, false means disable.
:return: True if match else False.
"""
pass
def get_field_range(second_line):
"""
@summary: Utility function to help get field range from a simple tabulate output line.
Simple tabulate output looks like:
Head1 Head2 H3 H4
----- ------ ------- --
V1 V2 V3 V4
@return: Returned a list of field range. E.g. [(0,4), (6, 10)] means there are two fields for
each line, the first field is between position 0 and position 4, the second field is between
position 6 and position 10.
"""
field_ranges = []
begin = 0
while 1:
end = second_line.find(' ', begin)
if end == -1:
field_ranges.append((begin, len(second_line)))
break
field_ranges.append((begin, end))
begin = second_line.find('-', end)
if begin == -1:
break
return field_ranges
def get_fields(line, field_ranges):
"""
@summary: Utility function to help extract all fields from a simple tabulate output line
based on field ranges got from function get_field_range.
@return: A list of fields.
"""
fields = []
for field_range in field_ranges:
field = line[field_range[0]:field_range[1]].encode('utf-8')
fields.append(field.strip())
return fields
def check_cli_output_with_mocker(dut, mocker_object, command, max_wait_time, key_index=0):
"""
Check the command line output matches the mocked data.
:param dut: DUT object representing a SONiC switch under test.
:param mocker_object: A mocker instance.
:param command: The command to be executed. E.g, 'show platform fan'
:param max_wait_time: Max wait time.
:return: True if the actual data matches the mocked data.
"""
time.sleep(max_wait_time)
output = dut.command(command)
assert output["rc"] == 0, "Run command '%s' failed" % command
second_line = output["stdout_lines"][1]
field_ranges = get_field_range(second_line)
actual_data = {}
for line in output["stdout_lines"][2:]:
fields = get_fields(line, field_ranges)
actual_data[fields[key_index]] = fields
return mocker_object.check_result(actual_data)
def check_thermal_algorithm_status(dut, mocker_factory, expected_status):
"""
Check thermal control algorithm status.
:param dut: DUT object representing a SONiC switch under test.
:param mocker_factory: Mocker factory.
:param expected_status: Expect thermal control algorithm status.
:return: True if actual thermal control status match expect value.
"""
thermal_mocker = mocker_factory(dut, 'ThermalStatusMocker')
if thermal_mocker is not None:
return thermal_mocker.check_thermal_algorithm_status(expected_status)
return True # if vendor doesn't provide a thermal mocker, ignore this check by return True.
def restart_thermal_control_daemon(dut):
"""
Restart thermal control daemon by killing it and waiting supervisord to restart
it automatically.
:param dut: DUT object representing a SONiC switch under test.
:return:
"""
logging.info('Restarting thermal control daemon...')
find_thermalctld_pid_cmd = 'docker exec -i pmon bash -c \'pgrep thermalctld | sort\''
output = dut.command(find_thermalctld_pid_cmd)
assert output["rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd
assert len(output["stdout_lines"]) == 2, "There should be 2 thermalctld process"
pid_0 = int(output["stdout_lines"][0].strip())
pid_1 = int(output["stdout_lines"][1].strip())
# find and kill the parent process
pid_to_kill = pid_0 if pid_0 < pid_1 else pid_1
logging.info('Killing old thermal control daemon with pid: {}'.format(pid_to_kill))
kill_thermalctld_cmd = 'docker exec -i pmon bash -c \'kill {}\''.format(pid_to_kill)
output = dut.command(kill_thermalctld_cmd) # kill thermalctld and wait supervisord auto reboot thermalctld
assert output["rc"] == 0, "Run command '%s' failed" % kill_thermalctld_cmd
# make sure thermalctld has restarted
max_wait_time = 30
while max_wait_time > 0:
max_wait_time -= 1
output = dut.command(find_thermalctld_pid_cmd)
assert output["rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd
if len(output["stdout_lines"]) != 2:
time.sleep(1)
continue
new_pid_0 = int(output["stdout_lines"][0].strip())
new_pid_1 = int(output["stdout_lines"][1].strip())
parent_pid = new_pid_0 if new_pid_0 < new_pid_1 else new_pid_1
if parent_pid == pid_to_kill:
logging.info('Old thermal control daemon is still alive, waiting...')
time.sleep(1)
continue
else:
logging.info('New pid of thermal control daemon is {}'.format(parent_pid))
return
# try restore by config reload...
config_reload(dut)
assert 0, 'Wait thermal control daemon restart failed'
class ThermalPolicyFileContext:
"""
Context class to help replace thermal control policy file and restore it automatically.
"""
def __init__(self, dut, src):
"""
Constructor of ThermalPolicyFileContext.
:param dut: DUT object representing a SONiC switch under test.
:param src: Local policy file path.
"""
self.dut = dut
self.src = src
platform_str = dut.facts['platform']
self.thermal_policy_file_path = DUT_THERMAL_POLICY_FILE.format(platform_str)
self.thermal_policy_file_backup_path = DUT_THERMAL_POLICY_BACKUP_FILE.format(platform_str)
def __enter__(self):
"""
Back up original thermal control policy file and replace it with the given one. Restart
thermal control daemon to make it effect.
:return:
"""
self.dut.command('mv -f {} {}'.format(self.thermal_policy_file_path, self.thermal_policy_file_backup_path))
self.dut.copy(src=os.path.join(FILES_DIR, self.src), dest=self.thermal_policy_file_path)
restart_thermal_control_daemon(self.dut)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Restore original thermal control policy file. Restart thermal control daemon to make it effect.
:param exc_type: Not used.
:param exc_val: Not used.
:param exc_tb: Not used.
:return:
"""
self.dut.command('mv -f {} {}'.format(self.thermal_policy_file_backup_path, self.thermal_policy_file_path))
restart_thermal_control_daemon(self.dut)
|
py | b4078706cb929e098682c994eebcf087fc87cbfd | import random
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from .forms import ContactForm, FeedbackForm
def contact(request):
spam_check = False
if request.method != "POST":
test_val = random.randint(0, 20)
context = make_contact_context(request, spam_check, test_val)
return render(request, "contact/contact.html", context)
else:
contact_form = ContactForm(request.POST)
if contact_form.is_valid():
# spam check
if contact_form.get_right_answer() == contact_form.get_answer():
# Sends mail
subject, message, email = contact_form.process()
if not email:
email = "[email protected]"
try:
if contact_form.get_reciever() == "PostKom":
mailadress = "[email protected]"
elif contact_form.get_reciever() == "ITV ved IFY":
mailadress = "[email protected]"
elif contact_form.get_reciever() == "ITV ved IMF":
mailadress = "[email protected]"
else:
mailadress = "[email protected]"
send_mail(
subject, message, email, [mailadress], fail_silently=False
)
except BadHeaderError:
return HttpResponse("Invalid header found")
return HttpResponseRedirect("/contact/success/")
else:
spam_check = True
test_val = random.randint(0, 20)
context = make_contact_context(request, spam_check, test_val)
return render(request, "contact/contact.html", context)
def feedback(request, template="feedback.html", send_to="[email protected]"):
spam_check = False
if request.method != "POST":
test_val = random.randint(0, 20)
context = make_feedback_context(request, spam_check, test_val)
return render(request, "contact/" + template, context)
else:
feedback_form = FeedbackForm(request.POST)
if feedback_form.is_valid():
# spam check
if feedback_form.get_right_answer() == feedback_form.get_answer():
# Sends mail
subject, message, email = feedback_form.process()
try:
send_mail(subject, message, email, [send_to], fail_silently=False)
except BadHeaderError:
return HttpResponse("Invalid header found")
return HttpResponseRedirect("/contact/success/")
else:
spam_check = True
test_val = random.randint(0, 20)
context = make_feedback_context(request, spam_check, test_val)
return render(request, "contact/" + template, context)
def success(request):
return render(request, "contact/success.html")
#######################################################################
# The two functions below are not views, they return appropriate context for feedback and contact view
def make_contact_context(request, spam_check, test_val):
board_emails = (
("Hele styret", "nabla"),
("Leder", "leder"),
("Nestleder", "nestleder"),
("Faddersjef/sekretær", "sekretaer"),
("Kasserer", "kasserer"),
("Bedkomsjef", "bedriftskontakt"),
("Arrangementsjef", "arrsjef"),
("Kjellersjef", "kjellersjef"),
("Ambassadør", "ambassador"),
("Websjef", "websjef"),
("Redaktør", "redaktor"),
)
nabla_pos_emails = (
("Alle gruppeledere", "gruppeledere"),
("Leder av ProKom", "leder.prokom"),
("Leder av QuizKom", "quizkom"),
("Leder av Koreolis", "koreolis.kraften"),
("Leder av Reka", "reka"),
("Leder av Reven", "reven"),
("Leder av Skråttcast", "skraattcast"),
("Leder av Gravitones", "leder.gravitones"),
("Leder av the Stokes", "lederstokes"),
("Musikalsk leder - Maxwells Muntre Musikanter", "maxwells.muntre"),
("Økonomiansvarlig i bedriftskontakten", "bnokonomi"),
("Revysjef", "revy"),
("Bryggemester", "bryggemester"),
)
group_emails = (
("PostKom", "postkom"),
("Arrkom", "arrkom"),
("BN - Bedriftkontakten Nabla", "bedkom"),
("Eurekakom", "eureka"),
("Educom", "educom"),
("ProKom", "prokom"),
("Redaksjonen", "nabladet"),
("WebKom", "webkom"),
("Excom17", "ekskom2019"),
("Excom18", "excom18"),
("Kontorkom", "kontorkom"),
("Koreolis", "koreolis"),
("nablarevyen", "revy-alle"),
("the Gravitones", "gravitones"),
("the Stokes", "thestokes"),
("utfluks", "utfluks"),
("Kjellersamarbeidet (Nabla, HC, Janus)", "kjellern.hk18"),
)
if request.user.is_authenticated:
# skjema uten navn og e-post
contact_form = ContactForm(
initial={
"your_name": request.user.get_full_name(),
"email": request.user.email,
"right_answer": test_val,
"spam_check": "ditt svar",
}
)
else:
# tomt skjema
contact_form = ContactForm(initial={"right_answer": test_val})
context = {
"contact_form": contact_form,
"spam_check": spam_check,
"test_val": test_val,
}
contact_form.fields[
"spam_check"
].label = (
f"Hva er kvadratroten av {test_val} ganget med kvadratroten av {test_val}? "
)
contact_form.fields[
"spam_check"
].help_text = "Skriv inn svaret over for å verifisere at du er et menneske"
context["board_emails"] = board_emails
context["nabla_pos_emails"] = nabla_pos_emails
context["group_emails"] = group_emails
return context
def make_feedback_context(request, spam_check, test_val):
if request.user.is_authenticated:
# skjema uten navn og e-post
feedback_form = FeedbackForm(
initial={
"your_name": request.user.get_full_name(),
"email": request.user.email,
"right_answer": test_val,
}
)
else:
# tomt skjema
feedback_form = FeedbackForm(initial={"right_answer": test_val})
feedback_form.fields[
"spam_check"
].label = (
f"Hva er kvadratroten av {test_val} ganget med kvadratroten av {test_val}? "
)
feedback_form.fields[
"spam_check"
].help_text = "Skriv inn svaret over for å verifisere at du er et menneske"
context = {
"feedback_form": feedback_form,
"spam_check": spam_check,
"test_val": test_val,
}
return context
|
py | b4078771313754f5ec238dfd4fc2587fae8b5b64 | from financialmodelingprep.decorator import get_json_data
BASE_URL = 'https://financialmodelingprep.com'
class calendars():
BASE_URL = 'https://financialmodelingprep.com'
API_KEY = ''
def __init__(self, API_KEY):
self.API = API_KEY
@get_json_data
def earning_calendar(self):
'''
Earnings Calendar
'''
return f'{self.BASE_URL}/api/v3/earning_calendar?apikey={self.API}'
@get_json_data
def earning_calendar_period(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/earning_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def company_historical_earnings_calendar(self, ticker: str, limit: int):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/historical/earning_calendar/{ticker}?limit={str(limit)}&apikey={self.API}'
@get_json_data
def company_historical_earnings_calender(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/ipo_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def ipo_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/ipo_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def stock_split_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/stock_split_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def stock_dividend_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/stock_dividend_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def economic_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/economic_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
|
py | b4078793a806fbe888f00ede2310cd4b1dcf0dab | from django.core.exceptions import PermissionDenied
from django import http, shortcuts
from django.utils.crypto import constant_time_compare
from rest_framework import exceptions, status
from rest_framework.mixins import CreateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
import olympia.core.logger
from olympia import amo
from olympia.amo.decorators import use_primary_db
from olympia.amo.utils import HttpResponseXSendFile
from olympia.api.authentication import JWTKeyAuthentication, WebTokenAuthentication
from olympia.api.permissions import AllowOwner, APIGatePermission
from olympia.api.throttling import file_upload_throttles
from olympia.devhub import tasks as devhub_tasks
from olympia.devhub.permissions import IsSubmissionAllowedFor
from .models import FileUpload
from .serializers import FileUploadSerializer
log = olympia.core.logger.getLogger('z.addons')
@use_primary_db
def serve_file_upload(request, uuid):
"""
This is to serve file uploads using authenticated download URLs. This is
currently used by the "scanner" services.
"""
upload = shortcuts.get_object_or_404(FileUpload, uuid=uuid)
access_token = request.GET.get('access_token')
if not access_token:
log.error('Denying access to %s, no token.', upload.id)
raise PermissionDenied
if not constant_time_compare(access_token, upload.access_token):
log.error('Denying access to %s, token invalid.', upload.id)
raise PermissionDenied
if not upload.path:
log.info('Preventing access to %s, upload path is falsey.' % upload.id)
return http.HttpResponseGone('upload path does not exist anymore')
return HttpResponseXSendFile(
request, upload.path, content_type='application/octet-stream'
)
class FileUploadViewSet(CreateModelMixin, ReadOnlyModelViewSet):
queryset = FileUpload.objects.all()
serializer_class = FileUploadSerializer
permission_classes = [
APIGatePermission('addon-submission-api'),
AllowOwner,
IsSubmissionAllowedFor,
]
authentication_classes = [JWTKeyAuthentication, WebTokenAuthentication]
lookup_field = 'uuid'
throttle_classes = file_upload_throttles
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def create(self, request):
if 'upload' in request.FILES:
filedata = request.FILES['upload']
else:
raise exceptions.ValidationError(
'Missing "upload" key in multipart file data.',
status.HTTP_400_BAD_REQUEST,
)
channel = amo.CHANNEL_CHOICES_LOOKUP.get(request.POST.get('channel'))
if not channel:
raise exceptions.ValidationError(
'Missing "channel" arg.',
status.HTTP_400_BAD_REQUEST,
)
upload = FileUpload.from_post(
filedata,
filedata.name,
filedata.size,
channel=channel,
source=amo.UPLOAD_SOURCE_ADDON_API,
user=request.user,
)
devhub_tasks.validate(upload, listed=(channel == amo.RELEASE_CHANNEL_LISTED))
headers = self.get_success_headers({})
data = self.get_serializer(instance=upload).data
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
py | b4078809958bce4360a7adebd624a7539d27318d | """Top level driver functionality for processing a sequencing lane.
"""
import os
import copy
from bcbio.log import logger
from bcbio import utils
from bcbio.pipeline.fastq import get_fastq_files
from bcbio.pipeline.demultiplex import split_by_barcode
from bcbio.pipeline.alignment import align_to_sort_bam
from bcbio.ngsalign.split import split_read_files
from bcbio.bam.trim import brun_trim_fastq
def _prep_fastq_files(item, bc_files, dirs, config):
"""Potentially prepare input FASTQ files for processing.
"""
fastq1, fastq2 = bc_files[item["barcode_id"]]
split_size = config.get("distributed", {}).get("align_split_size",
config["algorithm"].get("align_split_size", None))
if split_size:
split_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_splitprep", item["description"]))
return split_read_files(fastq1, fastq2, split_size, split_dir, config)
else:
return [[fastq1, fastq2, None]]
def process_lane(lane_items, fc_name, fc_date, dirs, config):
"""Prepare lanes, potentially splitting based on barcodes.
"""
lane_name = "%s_%s_%s" % (lane_items[0]['lane'], fc_date, fc_name)
logger.info("Demulitplexing %s" % lane_name)
full_fastq1, full_fastq2 = get_fastq_files(dirs["fastq"], dirs["work"],
lane_items[0], fc_name,
config=_update_config_w_custom(config, lane_items[0]))
bc_files = split_by_barcode(full_fastq1, full_fastq2, lane_items,
lane_name, dirs, config)
out = []
for item in lane_items:
config = _update_config_w_custom(config, item)
# Can specify all barcodes but might not have actual sequences
# Would be nice to have a good way to check this is okay here.
if bc_files.has_key(item["barcode_id"]):
for fastq1, fastq2, lane_ext in _prep_fastq_files(item, bc_files, dirs, config):
cur_lane_name = lane_name
cur_lane_desc = item["description"]
if item.get("name", "") and config["algorithm"].get("include_short_name", True):
cur_lane_desc = "%s : %s" % (item["name"], cur_lane_desc)
if item["barcode_id"] is not None:
cur_lane_name += "_%s" % (item["barcode_id"])
if lane_ext is not None:
cur_lane_name += "_s{0}".format(lane_ext)
if config["algorithm"].get("trim_reads", False):
trim_info = brun_trim_fastq([x for x in [fastq1, fastq2] if x is not None],
dirs, config)
fastq1 = trim_info[0]
if fastq2 is not None:
fastq2 = trim_info[1]
out.append((fastq1, fastq2, item, cur_lane_name, cur_lane_desc,
dirs, config))
return out
def process_alignment(fastq1, fastq2, info, lane_name, lane_desc,
dirs, config):
"""Do an alignment of fastq files, preparing a sorted BAM output file.
"""
aligner = config["algorithm"].get("aligner", None)
out_bam = ""
if os.path.exists(fastq1) and aligner:
logger.info("Aligning lane %s with %s aligner" % (lane_name, aligner))
out_bam = align_to_sort_bam(fastq1, fastq2, info["genome_build"], aligner,
lane_name, lane_desc, dirs, config)
elif os.path.exists(fastq1) and fastq1.endswith(".bam"):
out_bam = fastq1
return [{"fastq": [fastq1, fastq2], "out_bam": out_bam, "info": info,
"config": config}]
def _update_config_w_custom(config, lane_info):
"""Update the configuration for this lane if a custom analysis is specified.
"""
name_remaps = {"variant": ["SNP calling", "variant"],
"SNP calling": ["SNP calling", "variant"]}
config = copy.deepcopy(config)
base_name = lane_info.get("analysis")
for analysis_type in name_remaps.get(base_name, [base_name]):
custom = config["custom_algorithms"].get(analysis_type, None)
if custom:
for key, val in custom.iteritems():
config["algorithm"][key] = val
# apply any algorithm details specified with the lane
for key, val in lane_info.get("algorithm", {}).iteritems():
config["algorithm"][key] = val
return config
|
py | b4078a4b540a9d104afd68e45d41aa710b017b93 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 48591)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 48592)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
py | b4078a68893ea09f2154f4479d08f7a98d6318c5 | # coding: utf-8
"""
TGS API
A production scale tool for BYOND server management # noqa: E501
OpenAPI spec version: 9.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class InstanceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def instance_controller_create(self, body, api, user_agent, **kwargs): # noqa: E501
"""Create or attach an Tgstation.Server.Api.Models.Instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_create(body, api, user_agent, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body36 body: The Tgstation.Server.Api.Models.Request.InstanceCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:return: InstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.instance_controller_create_with_http_info(body, api, user_agent, **kwargs) # noqa: E501
else:
(data) = self.instance_controller_create_with_http_info(body, api, user_agent, **kwargs) # noqa: E501
return data
def instance_controller_create_with_http_info(self, body, api, user_agent, **kwargs): # noqa: E501
"""Create or attach an Tgstation.Server.Api.Models.Instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_create_with_http_info(body, api, user_agent, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body36 body: The Tgstation.Server.Api.Models.Request.InstanceCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:return: InstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'api', 'user_agent'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method instance_controller_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `instance_controller_create`") # noqa: E501
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `instance_controller_create`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `instance_controller_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Instance', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InstanceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def instance_controller_delete(self, api, user_agent, id, **kwargs): # noqa: E501
"""Detach an Tgstation.Server.Api.Models.Instance with the given id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_delete(api, user_agent, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int id: The Tgstation.Server.Api.Models.EntityId.Id of the instance to detach. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.instance_controller_delete_with_http_info(api, user_agent, id, **kwargs) # noqa: E501
else:
(data) = self.instance_controller_delete_with_http_info(api, user_agent, id, **kwargs) # noqa: E501
return data
def instance_controller_delete_with_http_info(self, api, user_agent, id, **kwargs): # noqa: E501
"""Detach an Tgstation.Server.Api.Models.Instance with the given id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_delete_with_http_info(api, user_agent, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int id: The Tgstation.Server.Api.Models.EntityId.Id of the instance to detach. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method instance_controller_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `instance_controller_delete`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `instance_controller_delete`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `instance_controller_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Instance/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def instance_controller_get_id(self, api, user_agent, id, **kwargs): # noqa: E501
"""Get a specific Tgstation.Server.Api.Models.Instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_get_id(api, user_agent, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int id: The instance Tgstation.Server.Api.Models.EntityId.Id to retrieve. (required)
:return: InstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.instance_controller_get_id_with_http_info(api, user_agent, id, **kwargs) # noqa: E501
else:
(data) = self.instance_controller_get_id_with_http_info(api, user_agent, id, **kwargs) # noqa: E501
return data
def instance_controller_get_id_with_http_info(self, api, user_agent, id, **kwargs): # noqa: E501
"""Get a specific Tgstation.Server.Api.Models.Instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_get_id_with_http_info(api, user_agent, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int id: The instance Tgstation.Server.Api.Models.EntityId.Id to retrieve. (required)
:return: InstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method instance_controller_get_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `instance_controller_get_id`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `instance_controller_get_id`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `instance_controller_get_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Instance/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InstanceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def instance_controller_grant_permissions(self, api, user_agent, id, **kwargs): # noqa: E501
"""Gives the current user full permissions on a given instance id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_grant_permissions(api, user_agent, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int id: The instance Tgstation.Server.Api.Models.EntityId.Id to give permissions on. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.instance_controller_grant_permissions_with_http_info(api, user_agent, id, **kwargs) # noqa: E501
else:
(data) = self.instance_controller_grant_permissions_with_http_info(api, user_agent, id, **kwargs) # noqa: E501
return data
def instance_controller_grant_permissions_with_http_info(self, api, user_agent, id, **kwargs): # noqa: E501
"""Gives the current user full permissions on a given instance id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_grant_permissions_with_http_info(api, user_agent, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int id: The instance Tgstation.Server.Api.Models.EntityId.Id to give permissions on. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method instance_controller_grant_permissions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `instance_controller_grant_permissions`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `instance_controller_grant_permissions`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `instance_controller_grant_permissions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Instance/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def instance_controller_list(self, api, user_agent, **kwargs): # noqa: E501
"""List Tgstation.Server.Api.Models.Instances. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_list(api, user_agent, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int page: The current page.
:param int page_size: The page size.
:return: PaginatedInstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.instance_controller_list_with_http_info(api, user_agent, **kwargs) # noqa: E501
else:
(data) = self.instance_controller_list_with_http_info(api, user_agent, **kwargs) # noqa: E501
return data
def instance_controller_list_with_http_info(self, api, user_agent, **kwargs): # noqa: E501
"""List Tgstation.Server.Api.Models.Instances. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_list_with_http_info(api, user_agent, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int page: The current page.
:param int page_size: The page size.
:return: PaginatedInstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'page', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method instance_controller_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `instance_controller_list`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `instance_controller_list`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Instance/List', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaginatedInstanceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def instance_controller_update(self, body, api, user_agent, **kwargs): # noqa: E501
"""Modify an Tgstation.Server.Api.Models.Instance's settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_update(body, api, user_agent, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body40 body: The updated Tgstation.Server.Api.Models.Instance settings. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:return: InstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.instance_controller_update_with_http_info(body, api, user_agent, **kwargs) # noqa: E501
else:
(data) = self.instance_controller_update_with_http_info(body, api, user_agent, **kwargs) # noqa: E501
return data
def instance_controller_update_with_http_info(self, body, api, user_agent, **kwargs): # noqa: E501
"""Modify an Tgstation.Server.Api.Models.Instance's settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.instance_controller_update_with_http_info(body, api, user_agent, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body40 body: The updated Tgstation.Server.Api.Models.Instance settings. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:return: InstanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'api', 'user_agent'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method instance_controller_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `instance_controller_update`") # noqa: E501
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `instance_controller_update`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `instance_controller_update`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Instance', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InstanceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | b4078ac761eebb4264eb3bbdb1677fbd425d575b | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class TolaManagementConfig(AppConfig):
name = 'tola_management'
|
py | b4078b2bc368b836c75a72e7b3915bdc30735738 | ### python lib
import os, sys, random, math, cv2, pickle, subprocess
import numpy as np
from PIL import Image
### torch lib
import torch
from torch.utils.data.sampler import Sampler
from torch.utils.data import DataLoader
### custom lib
from networks.resample2d_package.resample2d import Resample2d
FLO_TAG = 202021.25
EPS = 1e-12
UNKNOWN_FLOW_THRESH = 1e7
SMALLFLOW = 0.0
LARGEFLOW = 1e8
######################################################################################
## Training utility
######################################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def normalize_ImageNet_stats(batch):
mean = torch.zeros_like(batch)
std = torch.zeros_like(batch)
mean[:, 0, :, :] = 0.485
mean[:, 1, :, :] = 0.456
mean[:, 2, :, :] = 0.406
std[:, 0, :, :] = 0.229
std[:, 1, :, :] = 0.224
std[:, 2, :, :] = 0.225
batch_out = (batch - mean) / std
return batch_out
def img2tensor(img):
img_t = np.expand_dims(img.transpose(2, 0, 1), axis=0)
img_t = torch.from_numpy(img_t.astype(np.float32))
return img_t
def tensor2img(img_t):
img = img_t[0].detach().to("cpu").numpy()
img = np.transpose(img, (1, 2, 0))
return img
def save_model(model, optimizer, opts):
# save opts
opts_filename = os.path.join(opts.model_dir, "opts.pth")
print("Save %s" %opts_filename)
with open(opts_filename, 'wb') as f:
pickle.dump(opts, f)
# serialize model and optimizer to dict
state_dict = {
'model': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}
model_filename = os.path.join(opts.model_dir, "model_epoch_%d.pth" %model.epoch)
print("Save %s" %model_filename)
torch.save(state_dict, model_filename)
def load_model(model, optimizer, opts, epoch):
# load model
model_filename = os.path.join(opts.model_dir, "model_epoch_%d.pth" %epoch)
print("Load %s" %model_filename)
state_dict = torch.load(model_filename)
model.load_state_dict(state_dict['model'])
optimizer.load_state_dict(state_dict['optimizer'])
### move optimizer state to GPU
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
model.epoch = epoch ## reset model epoch
return model, optimizer
class SubsetSequentialSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return len(self.indices)
def create_data_loader(data_set, opts, mode):
### generate random index
if mode == 'train':
total_samples = opts.train_epoch_size * opts.batch_size
else:
total_samples = opts.valid_epoch_size * opts.batch_size
num_epochs = int(math.ceil(float(total_samples) / len(data_set)))
indices = np.random.permutation(len(data_set))
indices = np.tile(indices, num_epochs)
indices = indices[:total_samples]
### generate data sampler and loader
sampler = SubsetSequentialSampler(indices)
data_loader = DataLoader(dataset=data_set, num_workers=opts.threads, batch_size=opts.batch_size, sampler=sampler, pin_memory=True)
return data_loader
def learning_rate_decay(opts, epoch):
### 1 ~ offset : lr_init
### offset ~ offset + step : lr_init * drop^1
### offset + step ~ offset + step * 2 : lr_init * drop^2
### ...
if opts.lr_drop == 0: # constant learning rate
decay = 0
else:
assert(opts.lr_step > 0)
decay = math.floor( float(epoch) / opts.lr_step )
decay = max(decay, 0) ## decay = 1 for the first lr_offset iterations
lr = opts.lr_init * math.pow(opts.lr_drop, decay)
lr = max(lr, opts.lr_init * opts.lr_min)
return lr
def count_network_parameters(model):
parameters = filter(lambda p: p.requires_grad, model.parameters())
N = sum([np.prod(p.size()) for p in parameters])
return N
######################################################################################
## Image utility
######################################################################################
def rotate_image(img, degree, interp=cv2.INTER_LINEAR):
height, width = img.shape[:2]
image_center = (width/2, height/2)
rotation_mat = cv2.getRotationMatrix2D(image_center, degree, 1.)
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
img_out = cv2.warpAffine(img, rotation_mat, (bound_w, bound_h), flags=interp+cv2.WARP_FILL_OUTLIERS)
return img_out
def numpy_to_PIL(img_np):
## input image is numpy array in [0, 1]
## convert to PIL image in [0, 255]
img_PIL = np.uint8(img_np * 255)
img_PIL = Image.fromarray(img_PIL)
return img_PIL
def PIL_to_numpy(img_PIL):
img_np = np.asarray(img_PIL)
img_np = np.float32(img_np) / 255.0
return img_np
def read_img(filename, grayscale=0):
## read image and convert to RGB in [0, 1]
if grayscale:
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
if img is None:
raise Exception("Image %s does not exist" %filename)
img = np.expand_dims(img, axis=2)
else:
img = cv2.imread(filename)
if img is None:
raise Exception("Image %s does not exist" %filename)
img = img[:, :, ::-1] ## BGR to RGB
img = np.float32(img) / 255.0
return img
def save_img(img, filename):
# print("Save %s" %filename)
if img.ndim == 3:
img = img[:, :, ::-1] ### RGB to BGR
## clip to [0, 1]
img = np.clip(img, 0, 1)
## quantize to [0, 255]
img = np.uint8(img * 255.0)
cv2.imwrite(filename, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])
######################################################################################
## Flow utility
######################################################################################
def read_flo(filename):
with open(filename, 'rb') as f:
tag = np.fromfile(f, np.float32, count=1)
if tag != FLO_TAG:
sys.exit('Wrong tag. Invalid .flo file %s' %filename)
else:
w = int(np.fromfile(f, np.int32, count=1))
h = int(np.fromfile(f, np.int32, count=1))
#print 'Reading %d x %d flo file' % (w, h)
data = np.fromfile(f, np.float32, count=2*w*h)
# Reshape data into 3D array (columns, rows, bands)
flow = np.resize(data, (h, w, 2))
return flow
def save_flo(flow, filename):
with open(filename, 'wb') as f:
tag = np.array([FLO_TAG], dtype=np.float32)
(height, width) = flow.shape[0:2]
w = np.array([width], dtype=np.int32)
h = np.array([height], dtype=np.int32)
tag.tofile(f)
w.tofile(f)
h.tofile(f)
flow.tofile(f)
def resize_flow(flow, W_out=0, H_out=0, scale=0):
if W_out == 0 and H_out == 0 and scale == 0:
raise Exception("(W_out, H_out) or scale should be non-zero")
H_in = flow.shape[0]
W_in = flow.shape[1]
if scale == 0:
y_scale = float(H_out) / H_in
x_scale = float(W_out) / W_in
else:
y_scale = scale
x_scale = scale
flow_out = cv2.resize(flow, None, fx=x_scale, fy=y_scale, interpolation=cv2.INTER_LINEAR)
flow_out[:, :, 0] = flow_out[:, :, 0] * x_scale
flow_out[:, :, 1] = flow_out[:, :, 1] * y_scale
return flow_out
def rotate_flow(flow, degree, interp=cv2.INTER_LINEAR):
## angle in radian
angle = math.radians(degree)
H = flow.shape[0]
W = flow.shape[1]
#rotation_matrix = cv2.getRotationMatrix2D((W/2, H/2), math.degrees(angle), 1)
#flow_out = cv2.warpAffine(flow, rotation_matrix, (W, H))
flow_out = rotate_image(flow, degree, interp)
fu = flow_out[:, :, 0] * math.cos(-angle) - flow_out[:, :, 1] * math.sin(-angle)
fv = flow_out[:, :, 0] * math.sin(-angle) + flow_out[:, :, 1] * math.cos(-angle)
flow_out[:, :, 0] = fu
flow_out[:, :, 1] = fv
return flow_out
def hflip_flow(flow):
flow_out = cv2.flip(flow, flipCode=0)
flow_out[:, :, 0] = flow_out[:, :, 0] * (-1)
return flow_out
def vflip_flow(flow):
flow_out = cv2.flip(flow, flipCode=1)
flow_out[:, :, 1] = flow_out[:, :, 1] * (-1)
return flow_out
def flow_to_rgb(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
#print "max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv)
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.float32(img) / 255.0
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
def compute_flow_magnitude(flow):
flow_mag = flow[:, :, 0] ** 2 + flow[:, :, 1] ** 2
return flow_mag
def compute_flow_gradients(flow):
H = flow.shape[0]
W = flow.shape[1]
flow_x_du = np.zeros((H, W))
flow_x_dv = np.zeros((H, W))
flow_y_du = np.zeros((H, W))
flow_y_dv = np.zeros((H, W))
flow_x = flow[:, :, 0]
flow_y = flow[:, :, 1]
flow_x_du[:, :-1] = flow_x[:, :-1] - flow_x[:, 1:]
flow_x_dv[:-1, :] = flow_x[:-1, :] - flow_x[1:, :]
flow_y_du[:, :-1] = flow_y[:, :-1] - flow_y[:, 1:]
flow_y_dv[:-1, :] = flow_y[:-1, :] - flow_y[1:, :]
return flow_x_du, flow_x_dv, flow_y_du, flow_y_dv
def detect_occlusion(fw_flow, bw_flow):
## fw-flow: img1 => img2
## bw-flow: img2 => img1
with torch.no_grad():
## convert to tensor
fw_flow_t = img2tensor(fw_flow).cuda()
bw_flow_t = img2tensor(bw_flow).cuda()
## warp fw-flow to img2
flow_warping = Resample2d().cuda()
fw_flow_w = flow_warping(fw_flow_t, bw_flow_t)
## convert to numpy array
fw_flow_w = tensor2img(fw_flow_w)
## occlusion
fb_flow_sum = fw_flow_w + bw_flow
fb_flow_mag = compute_flow_magnitude(fb_flow_sum)
fw_flow_w_mag = compute_flow_magnitude(fw_flow_w)
bw_flow_mag = compute_flow_magnitude(bw_flow)
mask1 = fb_flow_mag > 0.01 * (fw_flow_w_mag + bw_flow_mag) + 0.5
## motion boundary
fx_du, fx_dv, fy_du, fy_dv = compute_flow_gradients(bw_flow)
fx_mag = fx_du ** 2 + fx_dv ** 2
fy_mag = fy_du ** 2 + fy_dv ** 2
mask2 = (fx_mag + fy_mag) > 0.01 * bw_flow_mag + 0.002
## combine mask
mask = np.logical_or(mask1, mask2)
occlusion = np.zeros((fw_flow.shape[0], fw_flow.shape[1]))
occlusion[mask == 1] = 1
return occlusion
######################################################################################
## Other utility
######################################################################################
def save_vector_to_txt(matrix, filename):
with open(filename, 'w') as f:
print("Save %s" %filename)
for i in range(matrix.size):
line = "%f" %matrix[i]
f.write("%s\n"%line)
def run_cmd(cmd):
print(cmd)
subprocess.call(cmd, shell=True)
def make_video(input_dir, img_fmt, video_filename, fps=24):
cmd = "ffmpeg -y -loglevel error -framerate %s -i %s/%s -vcodec libx264 -pix_fmt yuv420p -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" %s" \
%(fps, input_dir, img_fmt, video_filename)
run_cmd(cmd)
|
py | b4078bfc81405d62262cdbab03d3a8bb2638c3d9 | """
This module manages the webapp of the marxist press review.
"""
import logging
import os
import pandas as pd
from flask import Flask
from flask import render_template
from flask import request
from sqlalchemy import create_engine
from app_functions import select_articles_from_section, text_generator
logging.basicConfig(level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s')
# ENVIRONMENT VARIABLE
# PostgreSQL
POSTGRES_PSW = os.getenv("POSTGRES_PASSWORD")
POSTGRES_USER = "postgres"
HOST = "postgresdb"
PORT = "5432"
DATABASE_NAME = "pg_guardian"
app = Flask(__name__)
pg = create_engine(f'postgresql://{POSTGRES_USER}:{POSTGRES_PSW}@{HOST}:{PORT}/{DATABASE_NAME}')
try:
pg.connect()
logging.info(f'Connected to the postgres server on port {PORT}.')
except:
logging.critical(f'Could not connect to server: connection refused.\nIs the server running on host "{HOST}"?\nIs it accepting TCP/IP connections on port {PORT}?\n\nExit.\n')
exit()
@app.route('/')
def start_page():
return render_template('start.html')
@app.route('/world')
def world_press_review():
articles = pg.execute(select_articles_from_section('world'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('world.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/education')
def education_press_review():
articles = pg.execute(select_articles_from_section('education'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('education.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/politics')
def politics_press_review():
articles = pg.execute(select_articles_from_section('politics'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('politics.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/environment')
def environment_press_review():
articles = pg.execute(select_articles_from_section('environment'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('environment.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/global-development')
def glob_dev_press_review():
articles = pg.execute(select_articles_from_section('global-development'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('global-development.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/money')
def money_press_review():
articles = pg.execute(select_articles_from_section('money'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('money.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/sport')
def sport_press_review():
articles = pg.execute(select_articles_from_section('sport'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('sport.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/business')
def business_press_review():
articles = pg.execute(select_articles_from_section('business'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('business.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/culture')
def culture_press_review():
articles = pg.execute(select_articles_from_section('culture'))
df = pd.DataFrame(articles, columns=articles.keys())
return render_template('culture.html', ar1=df.loc[0],ar2=df.loc[1],ar3=df.loc[2],ar4=df.loc[3],ar5=df.loc[4])
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/custom-generator')
def custom_generator():
if request.args:
html_data = dict(request.args)
prompt = html_data['prompt']
result = text_generator(prompt)
return render_template('speak.html',result=result)
else:
return render_template('speak.html')
|
py | b4078c154db8cbdc8eea5228b05cb18ca2245760 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/12/12
# @Author : github.com/guofei9987
import numpy as np
def get_data_from_pic():
pass
def get_data_from_csv():
pass
def get_data_from_func():
'''
心图作为 demo
:return:
'''
n = 100
t = np.linspace(0, 2 * np.pi, n)
x = 16 * (np.sin(t)) ** 3
y = 13 * np.cos(t) - 5 * np.cos(2 * t) - 2 * np.cos(3 * t) - np.cos(4 * t)
X = x + 1j * y
return X
def sort_data(X):
'''
TODO: X必须是有序的(最短路径的),这里以后补充,假设输入数据已经是有序的了
'''
return X
def clean_data(X):
X = sort_data(X)
x, y = X.real, X.imag
x = (x - x.min()) / (x.max() - x.min()) - 0.5
y = (y - y.min()) / (y.max() - y.min()) - 0.5
X = x + 1j * y
return X
|
py | b4078cd657251ac233cbb58eab58fdde2c05035a | """This module shows how to load extensions from local code"""
import ebonite
def main():
# load extension
# you just use plain module name, if it's installed from pip
# or, you can just directly import your classes
# to automatically load extension on startup, set EBONITE_EXTENSIONS env variable
ebonite.load_extensions('myext.extension_source')
# set up client
ebnt = ebonite.Ebonite.local(clear=True)
# create a model using myext extension
model = ebnt.create_model('my_extended_model', 'model', 1)
# your extension code will be inside docker image in form of files if you have local files
# or requirement if you installed it from pip
image = ebnt.create_image(model, 'local_ext_model', builder_args={'force_overwrite': True})
ebnt.create_instance(image, 'local_ext_model').run(detach=False)
if __name__ == '__main__':
main()
|
py | b4078d7c7a0bb3fb5599f2bdd2ba0ee61614d7fd | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import unittest
from django.test import TestCase
from django.test.utils import override_settings
from test_haystack.core.models import (
AnotherMockModel,
CharPKMockModel,
MockModel,
UUIDMockModel,
)
from haystack import connections, indexes, reset_search_queries
from haystack.backends import SQ, BaseSearchQuery
from haystack.exceptions import FacetingError
from haystack.models import SearchResult
from haystack.query import (
EmptySearchQuerySet,
SearchQuerySet,
ValuesListSearchQuerySet,
ValuesSearchQuerySet,
)
from haystack.utils.loading import UnifiedIndex
from .mocks import (
MOCK_SEARCH_RESULTS,
CharPKMockSearchBackend,
MockSearchBackend,
MockSearchQuery,
ReadQuerySetMockSearchBackend,
UUIDMockSearchBackend,
)
from .test_indexes import (
GhettoAFifthMockModelSearchIndex,
TextReadQuerySetTestSearchIndex,
)
from .test_views import BasicAnotherMockModelSearchIndex, BasicMockModelSearchIndex
test_pickling = True
try:
import pickle
except ImportError:
test_pickling = False
class SQTestCase(TestCase):
def test_split_expression(self):
sq = SQ(foo="bar")
self.assertEqual(sq.split_expression("foo"), ("foo", "content"))
self.assertEqual(sq.split_expression("foo__exact"), ("foo", "exact"))
self.assertEqual(sq.split_expression("foo__content"), ("foo", "content"))
self.assertEqual(sq.split_expression("foo__contains"), ("foo", "contains"))
self.assertEqual(sq.split_expression("foo__lt"), ("foo", "lt"))
self.assertEqual(sq.split_expression("foo__lte"), ("foo", "lte"))
self.assertEqual(sq.split_expression("foo__gt"), ("foo", "gt"))
self.assertEqual(sq.split_expression("foo__gte"), ("foo", "gte"))
self.assertEqual(sq.split_expression("foo__in"), ("foo", "in"))
self.assertEqual(sq.split_expression("foo__startswith"), ("foo", "startswith"))
self.assertEqual(sq.split_expression("foo__endswith"), ("foo", "endswith"))
self.assertEqual(sq.split_expression("foo__range"), ("foo", "range"))
self.assertEqual(sq.split_expression("foo__fuzzy"), ("foo", "fuzzy"))
# Unrecognized filter. Fall back to exact.
self.assertEqual(sq.split_expression("foo__moof"), ("foo", "content"))
def test_repr(self):
self.assertEqual(repr(SQ(foo="bar")), "<SQ: AND foo__content=bar>")
self.assertEqual(repr(SQ(foo=1)), "<SQ: AND foo__content=1>")
self.assertEqual(
repr(SQ(foo=datetime.datetime(2009, 5, 12, 23, 17))),
"<SQ: AND foo__content=2009-05-12 23:17:00>",
)
def test_simple_nesting(self):
sq1 = SQ(foo="bar")
sq2 = SQ(foo="bar")
bigger_sq = SQ(sq1 & sq2)
self.assertEqual(
repr(bigger_sq), "<SQ: AND (foo__content=bar AND foo__content=bar)>"
)
another_bigger_sq = SQ(sq1 | sq2)
self.assertEqual(
repr(another_bigger_sq), "<SQ: AND (foo__content=bar OR foo__content=bar)>"
)
one_more_bigger_sq = SQ(sq1 & ~sq2)
self.assertEqual(
repr(one_more_bigger_sq),
"<SQ: AND (foo__content=bar AND NOT (foo__content=bar))>",
)
mega_sq = SQ(bigger_sq & SQ(another_bigger_sq | ~one_more_bigger_sq))
self.assertEqual(
repr(mega_sq),
"<SQ: AND ((foo__content=bar AND foo__content=bar) AND ((foo__content=bar OR foo__content=bar) OR NOT ((foo__content=bar AND NOT (foo__content=bar)))))>",
)
class BaseSearchQueryTestCase(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super(BaseSearchQueryTestCase, self).setUp()
self.bsq = BaseSearchQuery()
def test_get_count(self):
self.bsq.add_filter(SQ(foo="bar"))
self.assertRaises(NotImplementedError, self.bsq.get_count)
def test_build_query(self):
self.bsq.add_filter(SQ(foo="bar"))
self.assertRaises(NotImplementedError, self.bsq.build_query)
def test_add_filter(self):
self.assertEqual(len(self.bsq.query_filter), 0)
self.bsq.add_filter(SQ(foo="bar"))
self.assertEqual(len(self.bsq.query_filter), 1)
self.bsq.add_filter(SQ(foo__lt="10"))
self.bsq.add_filter(~SQ(claris="moof"))
self.bsq.add_filter(SQ(claris="moof"), use_or=True)
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: OR ((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof)>",
)
self.bsq.add_filter(SQ(claris="moof"))
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof)>",
)
self.bsq.add_filter(SQ(claris="wtf mate"))
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof AND claris__content=wtf mate)>",
)
def test_add_order_by(self):
self.assertEqual(len(self.bsq.order_by), 0)
self.bsq.add_order_by("foo")
self.assertEqual(len(self.bsq.order_by), 1)
def test_clear_order_by(self):
self.bsq.add_order_by("foo")
self.assertEqual(len(self.bsq.order_by), 1)
self.bsq.clear_order_by()
self.assertEqual(len(self.bsq.order_by), 0)
def test_add_model(self):
self.assertEqual(len(self.bsq.models), 0)
self.assertRaises(AttributeError, self.bsq.add_model, object)
self.assertEqual(len(self.bsq.models), 0)
self.bsq.add_model(MockModel)
self.assertEqual(len(self.bsq.models), 1)
self.bsq.add_model(AnotherMockModel)
self.assertEqual(len(self.bsq.models), 2)
def test_set_limits(self):
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
def test_clear_limits(self):
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
self.bsq.clear_limits()
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
def test_add_boost(self):
self.assertEqual(self.bsq.boost, {})
self.bsq.add_boost("foo", 10)
self.assertEqual(self.bsq.boost, {"foo": 10})
def test_add_highlight(self):
self.assertEqual(self.bsq.highlight, False)
self.bsq.add_highlight()
self.assertEqual(self.bsq.highlight, True)
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
msq = MockSearchQuery()
msq.backend = MockSearchBackend("mlt")
ui = connections["default"].get_unified_index()
bmmsi = BasicMockModelSearchIndex()
ui.build(indexes=[bmmsi])
bmmsi.update()
msq.more_like_this(mock)
self.assertEqual(msq.get_count(), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
def test_add_field_facet(self):
self.bsq.add_field_facet("foo")
self.assertEqual(self.bsq.facets, {"foo": {}})
self.bsq.add_field_facet("bar")
self.assertEqual(self.bsq.facets, {"foo": {}, "bar": {}})
def test_add_date_facet(self):
self.bsq.add_date_facet(
"foo",
start_date=datetime.date(2009, 2, 25),
end_date=datetime.date(2009, 3, 25),
gap_by="day",
)
self.assertEqual(
self.bsq.date_facets,
{
"foo": {
"gap_by": "day",
"start_date": datetime.date(2009, 2, 25),
"end_date": datetime.date(2009, 3, 25),
"gap_amount": 1,
}
},
)
self.bsq.add_date_facet(
"bar",
start_date=datetime.date(2008, 1, 1),
end_date=datetime.date(2009, 12, 1),
gap_by="month",
)
self.assertEqual(
self.bsq.date_facets,
{
"foo": {
"gap_by": "day",
"start_date": datetime.date(2009, 2, 25),
"end_date": datetime.date(2009, 3, 25),
"gap_amount": 1,
},
"bar": {
"gap_by": "month",
"start_date": datetime.date(2008, 1, 1),
"end_date": datetime.date(2009, 12, 1),
"gap_amount": 1,
},
},
)
def test_add_query_facet(self):
self.bsq.add_query_facet("foo", "bar")
self.assertEqual(self.bsq.query_facets, [("foo", "bar")])
self.bsq.add_query_facet("moof", "baz")
self.assertEqual(self.bsq.query_facets, [("foo", "bar"), ("moof", "baz")])
self.bsq.add_query_facet("foo", "baz")
self.assertEqual(
self.bsq.query_facets, [("foo", "bar"), ("moof", "baz"), ("foo", "baz")]
)
def test_add_stats(self):
self.bsq.add_stats_query("foo", ["bar"])
self.assertEqual(self.bsq.stats, {"foo": ["bar"]})
self.bsq.add_stats_query("moof", ["bar", "baz"])
self.assertEqual(self.bsq.stats, {"foo": ["bar"], "moof": ["bar", "baz"]})
def test_add_narrow_query(self):
self.bsq.add_narrow_query("foo:bar")
self.assertEqual(self.bsq.narrow_queries, set(["foo:bar"]))
self.bsq.add_narrow_query("moof:baz")
self.assertEqual(self.bsq.narrow_queries, set(["foo:bar", "moof:baz"]))
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
self.bsq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.bsq.result_class, IttyBittyResult))
# Reset to default.
self.bsq.set_result_class(None)
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
def test_run(self):
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
msq = connections["default"].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
# Restore.
connections["default"]._index = self.old_unified_index
def test_clone(self):
self.bsq.add_filter(SQ(foo="bar"))
self.bsq.add_filter(SQ(foo__lt="10"))
self.bsq.add_filter(~SQ(claris="moof"))
self.bsq.add_filter(SQ(claris="moof"), use_or=True)
self.bsq.add_order_by("foo")
self.bsq.add_model(MockModel)
self.bsq.add_boost("foo", 2)
self.bsq.add_highlight()
self.bsq.add_field_facet("foo")
self.bsq.add_date_facet(
"foo",
start_date=datetime.date(2009, 1, 1),
end_date=datetime.date(2009, 1, 31),
gap_by="day",
)
self.bsq.add_query_facet("foo", "bar")
self.bsq.add_stats_query("foo", "bar")
self.bsq.add_narrow_query("foo:bar")
clone = self.bsq._clone()
self.assertTrue(isinstance(clone, BaseSearchQuery))
self.assertEqual(len(clone.query_filter), 2)
self.assertEqual(len(clone.order_by), 1)
self.assertEqual(len(clone.models), 1)
self.assertEqual(len(clone.boost), 1)
self.assertEqual(clone.highlight, True)
self.assertEqual(len(clone.facets), 1)
self.assertEqual(len(clone.date_facets), 1)
self.assertEqual(len(clone.query_facets), 1)
self.assertEqual(len(clone.narrow_queries), 1)
self.assertEqual(clone.start_offset, self.bsq.start_offset)
self.assertEqual(clone.end_offset, self.bsq.end_offset)
self.assertEqual(clone.backend.__class__, self.bsq.backend.__class__)
def test_log_query(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
self.bmmsi.update()
with self.settings(DEBUG=False):
msq = connections["default"].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(len(connections["default"].queries), 0)
with self.settings(DEBUG=True):
# Redefine it to clear out the cached results.
msq2 = connections["default"].get_query()
self.assertEqual(len(msq2.get_results()), 23)
self.assertEqual(len(connections["default"].queries), 1)
self.assertEqual(connections["default"].queries[0]["query_string"], "")
msq3 = connections["default"].get_query()
msq3.add_filter(SQ(foo="bar"))
len(msq3.get_results())
self.assertEqual(len(connections["default"].queries), 2)
self.assertEqual(connections["default"].queries[0]["query_string"], "")
self.assertEqual(connections["default"].queries[1]["query_string"], "")
# Restore.
connections["default"]._index = self.old_unified_index
class CharPKMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="key")
def get_model(self):
return CharPKMockModel
class SimpleMockUUIDModelIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="characteristics")
def get_model(self):
return UUIDMockModel
@override_settings(DEBUG=True)
class SearchQuerySetTestCase(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super(SearchQuerySetTestCase, self).setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.uuidmmsi = SimpleMockUUIDModelIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi, self.uuidmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections["default"]._index = self.old_unified_index
super(SearchQuerySetTestCase, self).tearDown()
def test_len(self):
self.assertEqual(len(self.msqs), 23)
def test_repr(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
self.assertRegexpMatches(
repr(self.msqs),
r"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object"
r" at 0x[0-9A-Fa-f]+>, using=None>$",
)
def test_iter(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
msqs = self.msqs.all()
results = [int(res.pk) for res in iter(msqs)]
self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
self.assertEqual(len(connections["default"].queries), 3)
def test_slice(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
results = self.msqs.all()
self.assertEqual(
[int(res.pk) for res in results[1:11]],
[res.pk for res in MOCK_SEARCH_RESULTS[1:11]],
)
self.assertEqual(len(connections["default"].queries), 1)
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
results = self.msqs.all()
self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
self.assertEqual(len(connections["default"].queries), 1)
def test_manual_iter(self):
results = self.msqs.all()
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
check = [result.pk for result in results._manual_iter()]
self.assertEqual(
check,
[
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
],
)
self.assertEqual(len(connections["default"].queries), 3)
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
# Test to ensure we properly fill the cache, even if we get fewer
# results back (not a handled model) than the hit count indicates.
# This will hang indefinitely if broken.
# CharPK testing
old_ui = self.ui
self.ui.build(indexes=[self.cpkmmsi])
connections["default"]._index = self.ui
self.cpkmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(loaded, ["sometext", "1234"])
self.assertEqual(len(connections["default"].queries), 1)
# UUID testing
self.ui.build(indexes=[self.uuidmmsi])
connections["default"]._index = self.ui
self.uuidmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(
loaded,
[
"53554c58-7051-4350-bcc9-dad75eb248a9",
"77554c58-7051-4350-bcc9-dad75eb24888",
],
)
connections["default"]._index = old_ui
def test_cache_is_full(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
self.assertEqual(self.msqs._cache_is_full(), False)
results = self.msqs.all()
fire_the_iterator_and_fill_cache = list(results)
self.assertEqual(23, len(fire_the_iterator_and_fill_cache))
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections["default"].queries), 4)
def test_all(self):
sqs = self.msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
def test_filter(self):
sqs = self.msqs.filter(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_exclude(self):
sqs = self.msqs.exclude(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_order_by(self):
sqs = self.msqs.order_by("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue("foo" in sqs.query.order_by)
def test_models(self):
# Stow.
old_unified_index = connections["default"]._index
ui = UnifiedIndex()
bmmsi = BasicMockModelSearchIndex()
bammsi = BasicAnotherMockModelSearchIndex()
ui.build(indexes=[bmmsi, bammsi])
connections["default"]._index = ui
msqs = SearchQuerySet()
sqs = msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 0)
sqs = msqs.models(MockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
sqs = msqs.models(MockModel, AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 2)
# This will produce a warning.
ui.build(indexes=[bmmsi])
sqs = msqs.models(AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
def test_result_class(self):
sqs = self.msqs.all()
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
sqs = self.msqs.result_class(IttyBittyResult)
self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))
# Reset to default.
sqs = self.msqs.result_class(None)
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
def test_boost(self):
sqs = self.msqs.boost("foo", 10)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.boost.keys()), 1)
def test_highlight(self):
sqs = self.msqs.highlight()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.highlight, True)
def test_spelling_override(self):
sqs = self.msqs.filter(content="not the spellchecking query")
self.assertEqual(sqs.query.spelling_query, None)
sqs = self.msqs.set_spelling_query("override")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.spelling_query, "override")
def test_spelling_suggestions(self):
# Test the case where spelling support is disabled.
sqs = self.msqs.filter(content="Indx")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.spelling_suggestion(), None)
self.assertEqual(sqs.spelling_suggestion("indexy"), None)
def test_raw_search(self):
self.assertEqual(len(self.msqs.raw_search("foo")), 23)
self.assertEqual(
len(
self.msqs.raw_search("(content__exact:hello AND content__exact:world)")
),
23,
)
def test_load_all(self):
# Models with character primary keys.
sqs = SearchQuerySet()
sqs.query.backend = CharPKMockSearchBackend("charpk")
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# Models with uuid primary keys.
sqs = SearchQuerySet()
sqs.query.backend = UUIDMockSearchBackend("uuid")
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# If nothing is handled, you get nothing.
old_ui = connections["default"]._index
ui = UnifiedIndex()
ui.build(indexes=[])
connections["default"]._index = ui
sqs = self.msqs.load_all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs), 0)
connections["default"]._index = old_ui
# For full tests, see the solr_backend.
def test_load_all_read_queryset(self):
# Stow.
old_ui = connections["default"]._index
ui = UnifiedIndex()
gafmmsi = GhettoAFifthMockModelSearchIndex()
ui.build(indexes=[gafmmsi])
connections["default"]._index = ui
gafmmsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend("default")
results._fill_cache(0, 2)
# The deleted result isn't returned
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 1
)
# Register a SearchIndex with a read_queryset that returns deleted items
rqstsi = TextReadQuerySetTestSearchIndex()
ui.build(indexes=[rqstsi])
rqstsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend("default")
results._fill_cache(0, 2)
# Both the deleted and not deleted items are returned
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# Restore.
connections["default"]._index = old_ui
def test_auto_query(self):
sqs = self.msqs.auto_query("test search -stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
"<SQ: AND content__content=test search -stuff>",
)
sqs = self.msqs.auto_query('test "my thing" search -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND content__content=test "my thing" search -stuff>',
)
sqs = self.msqs.auto_query("test \"my thing\" search 'moar quotes' -stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
"<SQ: AND content__content=test \"my thing\" search 'moar quotes' -stuff>",
)
sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>',
)
sqs = self.msqs.auto_query("test - stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>"
)
# Ensure bits in exact matches get escaped properly as well.
sqs = self.msqs.auto_query('"pants:rule"')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">'
)
# Now with a different fieldname
sqs = self.msqs.auto_query("test search -stuff", fieldname="title")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>"
)
sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname="title")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND title__content=test "my thing" search -stuff>',
)
def test_count(self):
self.assertEqual(self.msqs.count(), 23)
def test_facet_counts(self):
self.assertEqual(self.msqs.facet_counts(), {})
def test_best_match(self):
self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))
def test_latest(self):
self.assertTrue(isinstance(self.msqs.latest("pub_date"), SearchResult))
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
self.assertEqual(len(self.msqs.more_like_this(mock)), 23)
def test_facets(self):
sqs = self.msqs.facet("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.facets), 1)
sqs2 = self.msqs.facet("foo").facet("bar")
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.facets), 2)
def test_date_facets(self):
try:
sqs = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="smarblaph",
)
self.fail()
except FacetingError as e:
self.assertEqual(
str(e),
"The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.",
)
sqs = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="month",
)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.date_facets), 1)
sqs2 = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="month",
).date_facet(
"bar",
start_date=datetime.date(2007, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="year",
)
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.date_facets), 2)
def test_query_facets(self):
sqs = self.msqs.query_facet("foo", "[bar TO *]")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_facets), 1)
sqs2 = self.msqs.query_facet("foo", "[bar TO *]").query_facet(
"bar", "[100 TO 499]"
)
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.query_facets), 2)
# Test multiple query facets on a single field
sqs3 = (
self.msqs.query_facet("foo", "[bar TO *]")
.query_facet("bar", "[100 TO 499]")
.query_facet("foo", "[1000 TO 1499]")
)
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.query_facets), 3)
def test_stats(self):
sqs = self.msqs.stats_facet("foo", "bar")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.stats), 1)
sqs2 = self.msqs.stats_facet("foo", "bar").stats_facet("foo", "baz")
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.stats), 1)
sqs3 = self.msqs.stats_facet("foo", "bar").stats_facet("moof", "baz")
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.stats), 2)
def test_narrow(self):
sqs = self.msqs.narrow("foo:moof")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
def test_clone(self):
results = self.msqs.filter(foo="bar", foo__lt="10")
clone = results._clone()
self.assertTrue(isinstance(clone, SearchQuerySet))
self.assertEqual(str(clone.query), str(results.query))
self.assertEqual(clone._result_cache, [])
self.assertEqual(clone._result_count, None)
self.assertEqual(clone._cache_full, False)
self.assertEqual(clone._using, results._using)
def test_using(self):
sqs = SearchQuerySet(using="default")
self.assertNotEqual(sqs.query, None)
self.assertEqual(sqs.query._using, "default")
def test_chaining(self):
sqs = self.msqs.filter(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
# A second instance should inherit none of the changes from above.
sqs = self.msqs.filter(content="bar")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_none(self):
sqs = self.msqs.none()
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test___and__(self):
sqs1 = self.msqs.filter(content="foo")
sqs2 = self.msqs.filter(content="bar")
sqs = sqs1 & sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test___or__(self):
sqs1 = self.msqs.filter(content="foo")
sqs2 = self.msqs.filter(content="bar")
sqs = sqs1 | sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test_and_or(self):
"""
Combining AND queries with OR should give
AND(OR(a, b), OR(c, d))
"""
sqs1 = self.msqs.filter(content="foo").filter(content="oof")
sqs2 = self.msqs.filter(content="bar").filter(content="rab")
sqs = sqs1 | sqs2
self.assertEqual(sqs.query.query_filter.connector, "OR")
self.assertEqual(
repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
)
self.assertEqual(
repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
)
def test_or_and(self):
"""
Combining OR queries with AND should give
OR(AND(a, b), AND(c, d))
"""
sqs1 = self.msqs.filter(content="foo").filter_or(content="oof")
sqs2 = self.msqs.filter(content="bar").filter_or(content="rab")
sqs = sqs1 & sqs2
self.assertEqual(sqs.query.query_filter.connector, "AND")
self.assertEqual(
repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
)
self.assertEqual(
repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
)
class ValuesQuerySetTestCase(SearchQuerySetTestCase):
def test_values_sqs(self):
sqs = self.msqs.auto_query("test").values("id")
self.assert_(isinstance(sqs, ValuesSearchQuerySet))
# We'll do a basic test to confirm that slicing works as expected:
self.assert_(isinstance(sqs[0], dict))
self.assert_(isinstance(sqs[0:5][0], dict))
def test_valueslist_sqs(self):
sqs = self.msqs.auto_query("test").values_list("id")
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
self.assert_(isinstance(sqs[0], (list, tuple)))
self.assert_(isinstance(sqs[0:1][0], (list, tuple)))
self.assertRaises(
TypeError,
self.msqs.auto_query("test").values_list,
"id",
"score",
flat=True,
)
flat_sqs = self.msqs.auto_query("test").values_list("id", flat=True)
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
# Note that this will actually be None because a mocked sqs lacks
# anything else:
self.assert_(flat_sqs[0] is None)
self.assert_(flat_sqs[0:1][0] is None)
class EmptySearchQuerySetTestCase(TestCase):
def setUp(self):
super(EmptySearchQuerySetTestCase, self).setUp()
self.esqs = EmptySearchQuerySet()
def test_get_count(self):
self.assertEqual(self.esqs.count(), 0)
self.assertEqual(len(self.esqs.all()), 0)
def test_filter(self):
sqs = self.esqs.filter(content="foo")
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_exclude(self):
sqs = self.esqs.exclude(content="foo")
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_slice(self):
sqs = self.esqs.filter(content="foo")
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
self.assertEqual(sqs[:10], [])
try:
sqs[4]
self.fail()
except IndexError:
pass
def test_dictionary_lookup(self):
"""
Ensure doing a dictionary lookup raises a TypeError so
EmptySearchQuerySets can be used in templates.
"""
self.assertRaises(TypeError, lambda: self.esqs["count"])
@unittest.skipUnless(test_pickling, "Skipping pickling tests")
@override_settings(DEBUG=True)
class PickleSearchQuerySetTestCase(TestCase):
fixtures = ["base_data"]
def setUp(self):
super(PickleSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections["default"]._index = self.old_unified_index
super(PickleSearchQuerySetTestCase, self).tearDown()
def test_pickling(self):
results = self.msqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
|
py | b4078e19606f938c2d919d1f05161cb215f977e4 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add max tries column to task instance
Revision ID: cc1e65623dc7
Revises: 127d2bf2dfa7
Create Date: 2017-06-19 16:53:12.851141
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy import Column, Integer, String
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.declarative import declarative_base
from airflow import settings
from airflow.models import DagBag
# revision identifiers, used by Alembic.
revision = 'cc1e65623dc7'
down_revision = '127d2bf2dfa7'
branch_labels = None
depends_on = None
Base = declarative_base()
BATCH_SIZE = 5000
class TaskInstance(Base): # type: ignore
"""Task Instance class."""
__tablename__ = "task_instance"
task_id = Column(String(), primary_key=True)
dag_id = Column(String(), primary_key=True)
execution_date = Column(sa.DateTime, primary_key=True)
max_tries = Column(Integer)
try_number = Column(Integer, default=0)
def upgrade():
op.add_column('task_instance', sa.Column('max_tries', sa.Integer, server_default="-1"))
# Check if table task_instance exist before data migration. This check is
# needed for database that does not create table until migration finishes.
# Checking task_instance table exists prevent the error of querying
# non-existing task_instance table.
connection = op.get_bind()
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names()
if 'task_instance' in tables:
# Get current session
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
if not bool(session.query(TaskInstance).first()):
return
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(TaskInstance.max_tries == -1)
# Separate db query in batch to prevent loading entire table
# into memory and cause out of memory error.
while query.scalar():
tis = session.query(TaskInstance).filter(TaskInstance.max_tries == -1).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
# task_instance table might not have the up-to-date
# information, i.e dag or task might be modified or
# deleted in dagbag but is reflected in task instance
# table. In this case we do not retry the task that can't
# be parsed.
ti.max_tries = ti.try_number
else:
task = dag.get_task(ti.task_id)
if task.retries:
ti.max_tries = task.retries
else:
ti.max_tries = ti.try_number
session.merge(ti)
session.commit()
# Commit the current session.
session.commit()
def downgrade():
engine = settings.engine
if engine.dialect.has_table(engine, 'task_instance'):
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(TaskInstance.max_tries != -1)
while query.scalar():
tis = session.query(TaskInstance).filter(TaskInstance.max_tries != -1).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
ti.try_number = 0
else:
task = dag.get_task(ti.task_id)
# max_tries - try_number is number of times a task instance
# left to retry by itself. So the current try_number should be
# max number of self retry (task.retries) minus number of
# times left for task instance to try the task.
ti.try_number = max(0, task.retries - (ti.max_tries - ti.try_number))
ti.max_tries = -1
session.merge(ti)
session.commit()
session.commit()
op.drop_column('task_instance', 'max_tries')
|
py | b4078e9cf67e771e4a24b96c69d77022875ebde0 | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3192
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceId(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'scope': 'str',
'code': 'str'
}
attribute_map = {
'scope': 'scope',
'code': 'code'
}
required_map = {
'scope': 'required',
'code': 'required'
}
def __init__(self, scope=None, code=None): # noqa: E501
"""
ResourceId - a model defined in OpenAPI
:param scope: (required)
:type scope: str
:param code: (required)
:type code: str
""" # noqa: E501
self._scope = None
self._code = None
self.discriminator = None
self.scope = scope
self.code = code
@property
def scope(self):
"""Gets the scope of this ResourceId. # noqa: E501
:return: The scope of this ResourceId. # noqa: E501
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this ResourceId.
:param scope: The scope of this ResourceId. # noqa: E501
:type: str
"""
if scope is None:
raise ValueError("Invalid value for `scope`, must not be `None`") # noqa: E501
if scope is not None and len(scope) > 512:
raise ValueError("Invalid value for `scope`, length must be less than or equal to `512`") # noqa: E501
if scope is not None and len(scope) < 1:
raise ValueError("Invalid value for `scope`, length must be greater than or equal to `1`") # noqa: E501
self._scope = scope
@property
def code(self):
"""Gets the code of this ResourceId. # noqa: E501
:return: The code of this ResourceId. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ResourceId.
:param code: The code of this ResourceId. # noqa: E501
:type: str
"""
if code is None:
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
if code is not None and len(code) > 512:
raise ValueError("Invalid value for `code`, length must be less than or equal to `512`") # noqa: E501
if code is not None and len(code) < 1:
raise ValueError("Invalid value for `code`, length must be greater than or equal to `1`") # noqa: E501
self._code = code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4078f66432a198294f2cfae77d59ce66694cc7b | import tornado.web
import tornado.gen
from cloudtunes.services.handlers import ServiceAuthHandler
from cloudtunes.users.models import User
from .client import AsyncLastfmClient
from .models import LastfmAccount
class LastfmAuthHandler(ServiceAuthHandler):
@tornado.gen.coroutine
def get(self):
token = self.get_argument('token', None)
client = AsyncLastfmClient()
if not token:
callback_url = self.get_absolute_url(self.reverse_url(
'lastfm' if not self.popup else 'lastfm_popup'))
self.redirect(client.get_auth_url(callback_url))
else:
session = yield client.auth.get_session(token=token)
client.session_key = session['key']
profile = yield client.user.get_info()
try:
user = User.objects.get(lastfm__name=session['name'])
except User.DoesNotExist:
if self.current_user:
# Connect
user = self.current_user
user.lastfm = LastfmAccount()
else:
user = User(
name=profile.get('realname', ''),
lastfm=LastfmAccount()
)
user.lastfm.session_key = session['key']
profile['subscriber'] = bool(int(profile['subscriber']))
user.lastfm.update_fields(**profile)
if not user.picture:
# noinspection PyUnresolvedReferences
user.picture = user.lastfm.get_picture()
user.save()
self.service_connected(user)
|
py | b4078f7eab75cb408a885e242cda2d4a1f4d25c7 | def countSort(arr):
output = [0 for i in range(len(arr))]
count = [0 for i in range(256)]
ans = ["" for _ in arr]
for i in arr:
count[ord(i)] += 1
for i in range(256):
count[i] += count[i-1]
for i in range(len(arr)):
output[count[ord(arr[i])]-1] = arr[i]
count[ord(arr[i])] -= 1
for i in range(len(arr)):
ans[i] = output[i]
return ans
n = int(input())
arr = list(input())
ans = countSort(arr)
print(ans) |
py | b40790a0c24700f1f561bd546147842d30720d56 | import numpy as np
import theano.tensor as T
import theano
import lasagne
import sys
from .. import Convolution2DSwitchLayer
from .. import DensePartialSwitchLayer
from .. import DenseSwitchLayer
from .. import DeepNetworks
from .. import DeepQTransferNetwork
def buildNeuralNetwork(batchSize, inputState, numOutputs, numTasks, convImplementation = "conv", layerNonlinearity = lasagne.nonlinearities.rectify):
transferLayers = []
networkInput = lasagne.layers.InputLayer(shape=((None,)+inputState))
if convImplementation == "conv" or convImplementation == "dnn":
if convImplementation == "conv":
convFunction = lasagne.layers.conv.Conv2DLayer
elif convImplementation == "dnn":
from lasagne.layers import dnn
convFunction = dnn.Conv2DDNNLayer
conv1 = convFunction(
networkInput,
num_filters = 32,
filter_size = (8,8),
stride=(4,4),
nonlinearity=layerNonlinearity,
W = lasagne.init.HeUniform(),
b = lasagne.init.Constant(.1))
conv2 = convFunction(
conv1,
num_filters = 64,
filter_size = (4,4),
stride=(2,2),
nonlinearity=layerNonlinearity,
W = lasagne.init.HeUniform(),
b = lasagne.init.Constant(.1))
conv3 = convFunction(
conv2,
num_filters = 64,
filter_size = (3,3),
stride=(1,1),
nonlinearity=layerNonlinearity,
W = lasagne.init.HeUniform(),
b = lasagne.init.Constant(.1))
elif convImplementation == "cuda":
from lasagne.layers import cuda_convnet
convFunction = cuda_convnet.Conv2DCCLayer
dimshuffle = True
c01b=True
conv1 = convFunction(
networkInput,
num_filters = 32,
filter_size = (8,8),
stride=(4,4),
nonlinearity=layerNonlinearity,
W = lasagne.init.HeUniform(c01b),
b = lasagne.init.Constant(.1),
dimshuffle=dimshuffle)
conv2 = convFunction(
conv1,
num_filters = 64,
filter_size = (4,4),
stride=(2,2),
nonlinearity=layerNonlinearity,
W = lasagne.init.HeUniform(c01b),
b = lasagne.init.Constant(.1),
dimshuffle=dimshuffle)
conv3 = convFunction(
conv2,
num_filters = 64,
filter_size = (3,3),
stride=(1,1),
nonlinearity=layerNonlinearity,
W = lasagne.init.HeUniform(c01b),
b = lasagne.init.Constant(.1),
dimshuffle=dimshuffle)
hiddenLayer = lasagne.layers.DenseLayer(
conv3,
num_units=512,
nonlinearity=layerNonlinearity,
W = lasagne.init.HeUniform(),
b = lasagne.init.Constant(.1))
outputLayer = DenseSwitchLayer.DenseSwitchLayer(
hiddenLayer,
numSwitchOptions = numTasks,
numSwitchedUnits = numOutputs,
switchDefault = 0,
W = lasagne.init.HeUniform(),
b = lasagne.init.Constant(.1),
nonlinearity=None)
transferLayers.append(outputLayer)
return outputLayer, transferLayers |
py | b40790d1082e557ce07be9e2761a2f055fea023c | from django.db import models
from django.contrib.auth import get_user_model
import django
from django.conf import settings
from django.urls.base import reverse_lazy
User = get_user_model()
class Article(models.Model):
NOT_APPROVED = 'Not Approved'
GOT_APPROVED = 'Approved'
REJECT = 'Rejectd'
STATUS_CHOICES = (
('u', NOT_APPROVED),
('a', GOT_APPROVED),
('r', REJECT),
)
title = models.CharField(max_length=255)
url = models.URLField()
description = models.TextField(blank=True)
status = models.CharField(choices=STATUS_CHOICES, default=NOT_APPROVED,
max_length=1)
owner = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='articles')
def get_absolute_url(self):
url = settings.SITE_URL
path = reverse_lazy('article-edit', args=[self.id])
return '{0}{1}'.format(url, path)
|
py | b407918c8b8cb9f52237a37d900035d054ccdcb3 | import random
import DistributedLawOfficeAI
import DistributedStageAI
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from toontown.coghq import StageLayout
from toontown.toonbase import ToontownGlobals
StageId2Layouts = {
ToontownGlobals.LawbotStageIntA: (0, 1, 2),
ToontownGlobals.LawbotStageIntB: (3, 4, 5),
ToontownGlobals.LawbotStageIntC: (6, 7, 8),
ToontownGlobals.LawbotStageIntD: (9, 10, 11)
}
class LawOfficeManagerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('LawOfficeManagerAI')
lawOfficeId = None
def __init__(self, air):
DirectObject.DirectObject.__init__(self)
self.air = air
def getDoId(self):
return 0
def createLawOffice(self, StageId, entranceId, players):
for avId in players:
if bboard.has('StageId-%s' % avId):
StageId = bboard.get('StageId-%s' % avId)
break
floor = 0
layoutIndex = None
for avId in players:
if bboard.has('stageRoom-%s' % avId):
roomId = bboard.get('stageRoom-%s' % avId)
for lt in StageId2Layouts[StageId]:
for i in xrange(StageLayout.getNumFloors(lt)):
layout = StageLayout.StageLayout(StageId, i, stageLayout = lt)
if roomId in layout.getRoomIds():
layoutIndex = lt
floor = i
else:
StageRoomSpecs = StageRoomSpecs
roomName = StageRoomSpecs.CashbotStageRoomId2RoomName[roomId]
LawOfficeManagerAI.notify.warning('room %s (%s) not found in any floor of Stage %s' % (roomId, roomName, StageId))
StageZone = self.air.allocateZone()
if layoutIndex is None:
layoutIndex = random.choice(StageId2Layouts[StageId])
Stage = DistributedStageAI.DistributedStageAI(self.air, StageId, StageZone, floor, players, layoutIndex)
Stage.generateWithRequired(StageZone)
return StageZone
|
py | b407920bd5978c121be4cc7ee032588712cf4e82 | """
Code ideas from https://github.com/Newmu/dcgan and tensorflow mnist dataset reader
"""
import numpy as np
import scipy.misc as misc
class BatchDatset:
files = []
images = []
annotations = []
image_options = {}
batch_offset = 0
epochs_completed = 0
def __init__(self, records_list, image_options={}):
"""
Intialize a generic file reader with batching for list of files
:param records_list: list of file records to read -
sample record: {'image': f, 'annotation': annotation_file, 'filename': filename}
:param image_options: A dictionary of options for modifying the output image
Available options:
resize = True/ False
resize_shape = shape of output image - does bilinear resize
color = True/False
infer = True/False, the mode to infer a picture from input;
if True, will have zero annotations everywhere
"""
print("Initializing Batch Dataset Reader...")
print(image_options)
self.files = records_list
self.image_options = image_options
self._read_images()
self.reset_batch_offset()
def _read_images(self):
self.__channels = True
self.images = np.array([self._transform(filename['image']) for filename in self.files])
print ('images shape: ', self.images.shape)
if self.image_options.get('infer', False):
self.annotations = np.zeros(self.images.shape[:3])
return
self.__channels = False
self.annotations = np.array(
[np.expand_dims(self._transform(filename['annotation']), axis=3) for filename in self.files])
print ('annotations shape:', self.annotations.shape)
def _transform(self, filename):
image = misc.imread(filename)
if self.__channels and len(image.shape) < 3: # make sure images are of shape(h,w,3)
image = np.array([image for i in range(3)])
if self.image_options.get("resize", False):
if 'resize_shape' not in self.image_options:
h_ = image.shape[0] / 32 * 32 # floor to multiple of 32
w_ = image.shape[1] / 32 * 32
self.image_options['resize_shape'] = (h_, w_)
print 'input image shape preprocess:', image.shape, '->', (h_, w_)
toShape = self.image_options['resize_shape']
resize_image = misc.imresize(image,
[toShape[0], toShape[1]], interp='nearest')
else:
resize_image = image
return np.array(resize_image)
def get_records(self):
return self.images, self.annotations
def reset_batch_offset(self, offset=0):
self.batch_offset = offset
def next_batch(self, batch_size):
start = self.batch_offset
self.batch_offset += batch_size
if self.batch_offset > self.images.shape[0]:
# Finished epoch
self.epochs_completed += 1
print("****************** Epochs completed: " + str(self.epochs_completed) + "******************")
# Shuffle the data
perm = np.arange(self.images.shape[0])
np.random.shuffle(perm)
self.images = self.images[perm]
self.annotations = self.annotations[perm]
# Start next epoch
start = 0
self.batch_offset = batch_size
end = self.batch_offset
return self.images[start:end], self.annotations[start:end]
def get_random_batch(self, batch_size):
indexes = np.random.randint(0, self.images.shape[0], size=[batch_size]).tolist()
return self.images[indexes], self.annotations[indexes]
def next_sequential_batch(self, batch_size):
start = self.batch_offset
self.batch_offset += batch_size
last_batch = (self.batch_offset >= self.images.shape[0])
if last_batch:
self.batch_offset = self.images.shape[0]
return self.images[start:self.batch_offset], self.annotations[start:self.batch_offset], last_batch
|
py | b4079235536f886280ae3188a08c38c0c7d610fa | import requests
import pytest
from kubernetes.client.rest import ApiException
from settings import TEST_DATA
from suite.custom_assertions import assert_event_and_get_count, assert_event_count_increased, assert_response_codes, \
assert_event, assert_event_starts_with_text_and_contains_errors, assert_vs_conf_not_exists
from suite.custom_resources_utils import get_vs_nginx_template_conf, patch_virtual_server_from_yaml, \
patch_virtual_server, generate_item_with_upstream_options
from suite.resources_utils import get_first_pod_name, wait_before_test, replace_configmap_from_yaml, get_events
@pytest.mark.vs
@pytest.mark.parametrize('crd_ingress_controller, virtual_server_setup',
[({"type": "complete", "extra_args": [f"-enable-custom-resources"]},
{"example": "virtual-server-upstream-options", "app_type": "simple"})],
indirect=True)
class TestVirtualServerUpstreamOptions:
def test_nginx_config_defaults(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup):
print("Case 1: no ConfigMap key, no options in VS")
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
assert "random two least_conn;" in config
assert "ip_hash;" not in config
assert "hash " not in config
assert "least_time " not in config
assert "proxy_connect_timeout 60s;" in config
assert "proxy_read_timeout 60s;" in config
assert "proxy_send_timeout 60s;" in config
assert "max_fails=1 fail_timeout=10s max_conns=0;" in config
assert "slow_start" not in config
assert "keepalive" not in config
assert 'set $default_connection_header "";' not in config
assert 'set $default_connection_header close;' in config
assert "proxy_set_header Upgrade $http_upgrade;" in config
assert "proxy_set_header Connection $vs_connection_header;" in config
assert "proxy_http_version 1.1;" in config
assert "proxy_next_upstream error timeout;" in config
assert "proxy_next_upstream_timeout 0s;" in config
assert "proxy_next_upstream_tries 0;" in config
assert "client_max_body_size 1m;" in config
assert "proxy_buffer_size" not in config
assert "proxy_buffering on;" in config
assert "proxy_buffers" not in config
assert "sticky cookie" not in config
@pytest.mark.parametrize('options, expected_strings', [
({"lb-method": "least_conn", "max-fails": 8,
"fail-timeout": "13s", "connect-timeout": "55s", "read-timeout": "1s", "send-timeout": "1h",
"keepalive": 54, "max-conns": 1048, "client-max-body-size": "1048K",
"buffering": True, "buffer-size": "2k", "buffers": {"number": 4, "size": "2k"}},
["least_conn;", "max_fails=8 ",
"fail_timeout=13s ", "proxy_connect_timeout 55s;", "proxy_read_timeout 1s;",
"proxy_send_timeout 1h;", "keepalive 54;", 'set $default_connection_header "";', "max_conns=1048;",
"client_max_body_size 1048K;",
"proxy_buffering on;", "proxy_buffer_size 2k;", "proxy_buffers 4 2k;"]),
({"lb-method": "ip_hash", "connect-timeout": "75", "read-timeout": "15", "send-timeout": "1h"},
["ip_hash;", "proxy_connect_timeout 75;", "proxy_read_timeout 15;", "proxy_send_timeout 1h;"]),
({"connect-timeout": "1m", "read-timeout": "1m", "send-timeout": "1s"},
["proxy_connect_timeout 1m;", "proxy_read_timeout 1m;", "proxy_send_timeout 1s;"]),
({"next-upstream": "error timeout non_idempotent", "next-upstream-timeout": "5s", "next-upstream-tries": 10},
["proxy_next_upstream error timeout non_idempotent;",
"proxy_next_upstream_timeout 5s;", "proxy_next_upstream_tries 10;"])
])
def test_when_option_in_v_s_only(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup,
options, expected_strings):
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"Configuration for {text} was added or updated"
events_vs = get_events(kube_apis.v1, virtual_server_setup.namespace)
initial_count = assert_event_and_get_count(vs_event_text, events_vs)
print(f"Case 2: no key in ConfigMap , option specified in VS")
new_body = generate_item_with_upstream_options(
f"{TEST_DATA}/virtual-server-upstream-options/standard/virtual-server.yaml",
options)
patch_virtual_server(kube_apis.custom_objects,
virtual_server_setup.vs_name, virtual_server_setup.namespace, new_body)
wait_before_test(1)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
resp_1 = requests.get(virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host})
resp_2 = requests.get(virtual_server_setup.backend_2_url,
headers={"host": virtual_server_setup.vs_host})
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event_count_increased(vs_event_text, initial_count, vs_events)
for _ in expected_strings:
assert _ in config
assert_response_codes(resp_1, resp_2)
@pytest.mark.parametrize('config_map_file, expected_strings, unexpected_strings', [
(f"{TEST_DATA}/virtual-server-upstream-options/configmap-with-keys.yaml",
["max_fails=3 ", "fail_timeout=33s ", "max_conns=0;",
"proxy_connect_timeout 44s;", "proxy_read_timeout 22s;", "proxy_send_timeout 55s;",
"keepalive 1024;", 'set $default_connection_header "";',
"client_max_body_size 3m;",
"proxy_buffering off;", "proxy_buffer_size 1k;", "proxy_buffers 8 1k;"],
["ip_hash;", "least_conn;", "random ", "hash", "least_time ",
"max_fails=1 ", "fail_timeout=10s ", "max_conns=1000;",
"proxy_connect_timeout 60s;", "proxy_read_timeout 60s;", "proxy_send_timeout 60s;",
"client_max_body_size 1m;", "slow_start=0s",
"proxy_buffering on;"]),
])
def test_when_option_in_config_map_only(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup, restore_configmap,
config_map_file, expected_strings, unexpected_strings):
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"Configuration for {text} was updated"
print(f"Case 3: key specified in ConfigMap, no option in VS")
patch_virtual_server_from_yaml(kube_apis.custom_objects, virtual_server_setup.vs_name,
f"{TEST_DATA}/virtual-server-upstream-options/standard/virtual-server.yaml",
virtual_server_setup.namespace)
config_map_name = ingress_controller_prerequisites.config_map["metadata"]["name"]
replace_configmap_from_yaml(kube_apis.v1, config_map_name,
ingress_controller_prerequisites.namespace,
config_map_file)
wait_before_test(1)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
resp_1 = requests.get(virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host})
resp_2 = requests.get(virtual_server_setup.backend_2_url,
headers={"host": virtual_server_setup.vs_host})
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event(vs_event_text, vs_events)
for _ in expected_strings:
assert _ in config
for _ in unexpected_strings:
assert _ not in config
assert_response_codes(resp_1, resp_2)
@pytest.mark.parametrize('options, expected_strings, unexpected_strings', [
({"lb-method": "least_conn", "max-fails": 12,
"fail-timeout": "1m", "connect-timeout": "1m", "read-timeout": "77s", "send-timeout": "23s",
"keepalive": 48, "client-max-body-size": "0",
"buffering": True, "buffer-size": "2k", "buffers": {"number": 4, "size": "2k"}},
["least_conn;", "max_fails=12 ",
"fail_timeout=1m ", "max_conns=0;", "proxy_connect_timeout 1m;", "proxy_read_timeout 77s;",
"proxy_send_timeout 23s;", "keepalive 48;", 'set $default_connection_header "";',
"client_max_body_size 0;",
"proxy_buffering on;", "proxy_buffer_size 2k;", "proxy_buffers 4 2k;"],
["ip_hash;", "random ", "hash", "least_time ", "max_fails=1 ",
"fail_timeout=10s ", "proxy_connect_timeout 44s;", "proxy_read_timeout 22s;",
"proxy_send_timeout 55s;", "keepalive 1024;",
"client_max_body_size 3m;", "client_max_body_size 1m;",
"proxy_buffering off;", "proxy_buffer_size 1k;", "proxy_buffers 8 1k;"])
])
def test_v_s_overrides_config_map(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup, restore_configmap,
options, expected_strings, unexpected_strings):
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"Configuration for {text} was added or updated"
events_vs = get_events(kube_apis.v1, virtual_server_setup.namespace)
initial_count = assert_event_and_get_count(vs_event_text, events_vs)
print(f"Case 4: key in ConfigMap, option specified in VS")
new_body = generate_item_with_upstream_options(
f"{TEST_DATA}/virtual-server-upstream-options/standard/virtual-server.yaml",
options)
patch_virtual_server(kube_apis.custom_objects,
virtual_server_setup.vs_name, virtual_server_setup.namespace, new_body)
config_map_name = ingress_controller_prerequisites.config_map["metadata"]["name"]
replace_configmap_from_yaml(kube_apis.v1, config_map_name,
ingress_controller_prerequisites.namespace,
f"{TEST_DATA}/virtual-server-upstream-options/configmap-with-keys.yaml")
wait_before_test(1)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
resp_1 = requests.get(virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host})
resp_2 = requests.get(virtual_server_setup.backend_2_url,
headers={"host": virtual_server_setup.vs_host})
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event_count_increased(vs_event_text, initial_count, vs_events)
for _ in expected_strings:
assert _ in config
for _ in unexpected_strings:
assert _ not in config
assert_response_codes(resp_1, resp_2)
@pytest.mark.vs
@pytest.mark.parametrize('crd_ingress_controller, virtual_server_setup',
[({"type": "complete", "extra_args": [f"-enable-custom-resources"]},
{"example": "virtual-server-upstream-options", "app_type": "simple"})],
indirect=True)
class TestVirtualServerUpstreamOptionValidation:
def test_event_message_and_config(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup):
invalid_fields = [
"upstreams[0].lb-method", "upstreams[0].fail-timeout",
"upstreams[0].max-fails", "upstreams[0].connect-timeout",
"upstreams[0].read-timeout", "upstreams[0].send-timeout",
"upstreams[0].keepalive", "upstreams[0].max-conns",
"upstreams[0].next-upstream",
"upstreams[0].next-upstream-timeout", "upstreams[0].next-upstream-tries",
"upstreams[0].client-max-body-size",
"upstreams[0].buffers.number", "upstreams[0].buffers.size", "upstreams[0].buffer-size",
"upstreams[1].lb-method", "upstreams[1].fail-timeout",
"upstreams[1].max-fails", "upstreams[1].connect-timeout",
"upstreams[1].read-timeout", "upstreams[1].send-timeout",
"upstreams[1].keepalive", "upstreams[1].max-conns",
"upstreams[1].next-upstream",
"upstreams[1].next-upstream-timeout", "upstreams[1].next-upstream-tries",
"upstreams[1].client-max-body-size",
"upstreams[1].buffers.number", "upstreams[1].buffers.size", "upstreams[1].buffer-size"
]
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"VirtualServer {text} is invalid and was rejected: "
vs_file = f"{TEST_DATA}/virtual-server-upstream-options/virtual-server-with-invalid-keys.yaml"
patch_virtual_server_from_yaml(kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_file,
virtual_server_setup.namespace)
wait_before_test(2)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event_starts_with_text_and_contains_errors(vs_event_text, vs_events, invalid_fields)
assert_vs_conf_not_exists(kube_apis, ic_pod_name, ingress_controller_prerequisites.namespace,
virtual_server_setup)
def test_openapi_validation_flow(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup):
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
invalid_fields = [
"upstreams.lb-method", "upstreams.fail-timeout",
"upstreams.max-fails", "upstreams.connect-timeout",
"upstreams.read-timeout", "upstreams.send-timeout",
"upstreams.keepalive", "upstreams.max-conns",
"upstreams.next-upstream",
"upstreams.next-upstream-timeout", "upstreams.next-upstream-tries",
"upstreams.client-max-body-size",
"upstreams.buffers.number", "upstreams.buffers.size", "upstreams.buffer-size",
"upstreams.buffering", "upstreams.tls"
]
config_old = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
vs_file = f"{TEST_DATA}/virtual-server-upstream-options/virtual-server-with-invalid-keys-openapi.yaml"
try:
patch_virtual_server_from_yaml(kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_file,
virtual_server_setup.namespace)
except ApiException as ex:
assert ex.status == 422
for item in invalid_fields:
assert item in ex.body
except Exception as ex:
pytest.fail(f"An unexpected exception is raised: {ex}")
else:
pytest.fail("Expected an exception but there was none")
wait_before_test(2)
config_new = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
assert config_old == config_new, "Expected: config doesn't change"
@pytest.mark.vs
@pytest.mark.skip_for_nginx_oss
@pytest.mark.parametrize('crd_ingress_controller, virtual_server_setup',
[({"type": "complete", "extra_args": [f"-enable-custom-resources"]},
{"example": "virtual-server-upstream-options", "app_type": "simple"})],
indirect=True)
class TestOptionsSpecificForPlus:
@pytest.mark.parametrize('options, expected_strings', [
({"lb-method": "least_conn",
"healthCheck": {"enable": True, "port": 8080},
"slow-start": "3h",
"queue": {"size": 100},
"sessionCookie": {"enable": True,
"name": "TestCookie",
"path": "/some-valid/path",
"expires": "max",
"domain": "virtual-server-route.example.com", "httpOnly": True, "secure": True}},
["health_check uri=/ port=8080 interval=5s jitter=0s", "fails=1 passes=1;",
"slow_start=3h", "queue 100 timeout=60s;",
"sticky cookie TestCookie expires=max domain=virtual-server-route.example.com httponly secure path=/some-valid/path;"]),
({"lb-method": "least_conn",
"healthCheck": {"enable": True, "path": "/health",
"interval": "15s", "jitter": "3",
"fails": 2, "passes": 2, "port": 8080,
"tls": {"enable": True}, "statusMatch": "200",
"connect-timeout": "35s", "read-timeout": "45s", "send-timeout": "55s",
"headers": [{"name": "Host", "value": "virtual-server.example.com"}]},
"queue": {"size": 1000, "timeout": "66s"},
"slow-start": "0s"},
["health_check uri=/health port=8080 interval=15s jitter=3", "fails=2 passes=2 match=",
"proxy_pass https://vs", "status 200;",
"proxy_connect_timeout 35s;", "proxy_read_timeout 45s;", "proxy_send_timeout 55s;",
'proxy_set_header Host "virtual-server.example.com";',
"slow_start=0s", "queue 1000 timeout=66s;"])
])
def test_config_and_events(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup,
options, expected_strings):
expected_strings.append(f"location @hc-vs_"
f"{virtual_server_setup.namespace}_{virtual_server_setup.vs_name}_backend1")
expected_strings.append(f"location @hc-vs_"
f"{virtual_server_setup.namespace}_{virtual_server_setup.vs_name}_backend2")
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"Configuration for {text} was added or updated"
events_vs = get_events(kube_apis.v1, virtual_server_setup.namespace)
initial_count = assert_event_and_get_count(vs_event_text, events_vs)
print(f"Case 1: option specified in VS")
new_body = generate_item_with_upstream_options(
f"{TEST_DATA}/virtual-server-upstream-options/standard/virtual-server.yaml",
options)
patch_virtual_server(kube_apis.custom_objects,
virtual_server_setup.vs_name, virtual_server_setup.namespace, new_body)
wait_before_test(1)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
resp_1 = requests.get(virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host})
resp_2 = requests.get(virtual_server_setup.backend_2_url,
headers={"host": virtual_server_setup.vs_host})
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event_count_increased(vs_event_text, initial_count, vs_events)
for _ in expected_strings:
assert _ in config
assert_response_codes(resp_1, resp_2)
@pytest.mark.parametrize('options', [{"slow-start": "0s"}])
def test_slow_start_warning(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup, options):
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"Configuration for {text} was added or updated with warning(s): Slow start will be disabled"
print(f"Case 0: verify a warning")
new_body = generate_item_with_upstream_options(
f"{TEST_DATA}/virtual-server-upstream-options/standard/virtual-server.yaml",
options)
patch_virtual_server(kube_apis.custom_objects,
virtual_server_setup.vs_name, virtual_server_setup.namespace, new_body)
wait_before_test(1)
config = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event(vs_event_text, vs_events)
assert "slow_start" not in config
def test_validation_flow(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup):
invalid_fields = [
"upstreams[0].healthCheck.path", "upstreams[0].healthCheck.interval", "upstreams[0].healthCheck.jitter",
"upstreams[0].healthCheck.fails", "upstreams[0].healthCheck.passes",
"upstreams[0].healthCheck.connect-timeout",
"upstreams[0].healthCheck.read-timeout", "upstreams[0].healthCheck.send-timeout",
"upstreams[0].healthCheck.headers[0].name", "upstreams[0].healthCheck.headers[0].value",
"upstreams[0].healthCheck.statusMatch",
"upstreams[0].slow-start",
"upstreams[0].queue.size", "upstreams[0].queue.timeout",
"upstreams[0].sessionCookie.name", "upstreams[0].sessionCookie.path",
"upstreams[0].sessionCookie.expires", "upstreams[0].sessionCookie.domain",
"upstreams[1].healthCheck.path", "upstreams[1].healthCheck.interval", "upstreams[1].healthCheck.jitter",
"upstreams[1].healthCheck.fails", "upstreams[1].healthCheck.passes",
"upstreams[1].healthCheck.connect-timeout",
"upstreams[1].healthCheck.read-timeout", "upstreams[1].healthCheck.send-timeout",
"upstreams[1].healthCheck.headers[0].name", "upstreams[1].healthCheck.headers[0].value",
"upstreams[1].healthCheck.statusMatch",
"upstreams[1].slow-start",
"upstreams[1].queue.size", "upstreams[1].queue.timeout",
"upstreams[1].sessionCookie.name", "upstreams[1].sessionCookie.path",
"upstreams[1].sessionCookie.expires", "upstreams[1].sessionCookie.domain"
]
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"VirtualServer {text} is invalid and was rejected: "
vs_file = f"{TEST_DATA}/virtual-server-upstream-options/plus-virtual-server-with-invalid-keys.yaml"
patch_virtual_server_from_yaml(kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_file,
virtual_server_setup.namespace)
wait_before_test(2)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event_starts_with_text_and_contains_errors(vs_event_text, vs_events, invalid_fields)
assert_vs_conf_not_exists(kube_apis, ic_pod_name, ingress_controller_prerequisites.namespace,
virtual_server_setup)
def test_openapi_validation_flow(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup):
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
invalid_fields = [
"upstreams.healthCheck.enable", "upstreams.healthCheck.path",
"upstreams.healthCheck.interval", "upstreams.healthCheck.jitter",
"upstreams.healthCheck.fails", "upstreams.healthCheck.passes",
"upstreams.healthCheck.port", "upstreams.healthCheck.connect-timeout",
"upstreams.healthCheck.read-timeout", "upstreams.healthCheck.send-timeout",
"upstreams.healthCheck.headers.name", "upstreams.healthCheck.headers.value",
"upstreams.healthCheck.statusMatch",
"upstreams.slow-start",
"upstreams.queue.size", "upstreams.queue.timeout",
"upstreams.sessionCookie.name", "upstreams.sessionCookie.path",
"upstreams.sessionCookie.expires", "upstreams.sessionCookie.domain",
"upstreams.sessionCookie.httpOnly", "upstreams.sessionCookie.secure"
]
config_old = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
vs_file = f"{TEST_DATA}/virtual-server-upstream-options/plus-virtual-server-with-invalid-keys-openapi.yaml"
try:
patch_virtual_server_from_yaml(kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_file,
virtual_server_setup.namespace)
except ApiException as ex:
assert ex.status == 422
for item in invalid_fields:
assert item in ex.body
except Exception as ex:
pytest.fail(f"An unexpected exception is raised: {ex}")
else:
pytest.fail("Expected an exception but there was none")
wait_before_test(2)
config_new = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
assert config_old == config_new, "Expected: config doesn't change"
|
py | b40792d072bef9db760d5b1ba8ef5783c2a020ab | ###########################
#
# #285 Pythagorean odds - Project Euler
# https://projecteuler.net/problem=285
#
# Code by Kevin Marciniak
#
###########################
|
py | b407937236e2bd0873edc044862f349cbf607aca | from __future__ import unicode_literals
from django.apps import AppConfig
class ElfinderConfig(AppConfig):
name = 'elfinder'
|
py | b407939bf7fe503e41b6c36432a958834080b776 | __version__ = "0.1.4"
__title__ = "pytest-it"
__description__ = "Pytest plugin to display test reports as a plaintext spec, inspired by Rspec: https://github.com/mattduck/pytest-it."
__doc__ = __description__
__uri__ = "https://github.com/mattduck/pytest-it"
__author__ = "Matt Duck"
__email__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2019 Matthew Duck"
|
py | b40793bc6d7bf0fc6b00289f0bb2b4185e925e7a | """"The nbgitpuller PyPI package SemVer version."""
__version__ = '0.7.2'
|
py | b40793e71e29087b0f39922b08415e9c4c552b68 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rajvi Dhimar <[email protected]>`
'''
# Import python libs
from __future__ import absolute_import, print_function
# Import test libs
from tests.support.mixins import LoaderModuleMockMixin, XMLEqualityMixin
from tests.support.mock import patch, mock_open
from tests.support.unit import skipIf, TestCase
# Import 3rd-party libs
try:
from lxml import etree
except ImportError:
from salt._compat import ElementTree as etree
try:
from jnpr.junos.utils.config import Config
from jnpr.junos.utils.sw import SW
from jnpr.junos.device import Device
HAS_JUNOS = True
except ImportError:
HAS_JUNOS = False
# Import salt modules
import salt.modules.junos as junos
@skipIf(not HAS_JUNOS, 'Install junos-eznc to be able to run this test.')
class Test_Junos_Module(TestCase, LoaderModuleMockMixin, XMLEqualityMixin):
def setup_loader_modules(self):
return {
junos: {
'__proxy__': {
'junos.conn': self.make_connect,
'junos.get_serialized_facts': self.get_facts
},
'__salt__': {'cp.get_template': self.mock_cp,
'cp.get_file': self.mock_cp}
}
}
def mock_cp(self, *args, **kwargs):
pass
def make_connect(self):
with patch('ncclient.manager.connect') as mock_connect:
self.dev = self.dev = Device(
host='1.1.1.1',
user='test',
password='test123',
gather_facts=False)
self.dev.open()
self.dev.timeout = 30
self.dev.bind(cu=Config)
self.dev.bind(sw=SW)
self.addCleanup(delattr, self, 'dev')
return self.dev
def raise_exception(self, *args, **kwargs):
raise Exception('Test exception')
def get_facts(self):
facts = {'2RE': True,
'HOME': '/var/home/regress',
'RE0': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK',
'up_time': '11 days, 23 hours, 16 minutes, 54 seconds'},
'RE1': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'backup',
'model': 'RE-VMX',
'status': 'OK',
'up_time': '11 days, 23 hours, 16 minutes, 41 seconds'},
'RE_hw_mi': False,
'current_re': ['re0', 'master', 'node', 'fwdd', 'member', 'pfem'],
'domain': 'englab.juniper.net',
'fqdn': 'R1_re0.englab.juniper.net',
'hostname': 'R1_re0',
'hostname_info': {'re0': 'R1_re0', 're1': 'R1_re01'},
'ifd_style': 'CLASSIC',
'junos_info': {'re0': {'object': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'text': '16.1I20160413_0837_aamish'},
're1': {'object': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'text': '16.1I20160413_0837_aamish'}},
'master': 'RE0',
'model': 'MX240',
'model_info': {'re0': 'MX240', 're1': 'MX240'},
'personality': 'MX',
're_info': {'default': {'0': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK'},
'1': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'backup',
'model': 'RE-VMX',
'status': 'OK'},
'default': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK'}}},
're_master': {'default': '0'},
'serialnumber': 'VMX4eaf',
'srx_cluster': None,
'switch_style': 'BRIDGE_DOMAIN',
'vc_capable': False,
'vc_fabric': None,
'vc_master': None,
'vc_mode': None,
'version': '16.1I20160413_0837_aamish',
'version_RE0': '16.1I20160413_0837_aamish',
'version_RE1': '16.1I20160413_0837_aamish',
'version_info': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'virtual': True}
return facts
def test_facts_refresh(self):
with patch('salt.modules.saltutil.sync_grains') as mock_sync_grains:
ret = dict()
ret['facts'] = {'2RE': True,
'HOME': '/var/home/regress',
'RE0': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK',
'up_time': '11 days, 23 hours, 16 minutes, 54 seconds'},
'RE1': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'backup',
'model': 'RE-VMX',
'status': 'OK',
'up_time': '11 days, 23 hours, 16 minutes, 41 seconds'},
'RE_hw_mi': False,
'current_re': ['re0', 'master', 'node', 'fwdd', 'member', 'pfem'],
'domain': 'englab.juniper.net',
'fqdn': 'R1_re0.englab.juniper.net',
'hostname': 'R1_re0',
'hostname_info': {'re0': 'R1_re0', 're1': 'R1_re01'},
'ifd_style': 'CLASSIC',
'junos_info': {'re0': {'object': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'text': '16.1I20160413_0837_aamish'},
're1': {'object': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'text': '16.1I20160413_0837_aamish'}},
'master': 'RE0',
'model': 'MX240',
'model_info': {'re0': 'MX240', 're1': 'MX240'},
'personality': 'MX',
're_info': {'default': {'0': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK'},
'1': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'backup',
'model': 'RE-VMX',
'status': 'OK'},
'default': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK'}}},
're_master': {'default': '0'},
'serialnumber': 'VMX4eaf',
'srx_cluster': None,
'switch_style': 'BRIDGE_DOMAIN',
'vc_capable': False,
'vc_fabric': None,
'vc_master': None,
'vc_mode': None,
'version': '16.1I20160413_0837_aamish',
'version_RE0': '16.1I20160413_0837_aamish',
'version_RE1': '16.1I20160413_0837_aamish',
'version_info': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'virtual': True}
ret['out'] = True
self.assertEqual(junos.facts_refresh(), ret)
def test_facts_refresh_exception(self):
with patch('jnpr.junos.device.Device.facts_refresh') as mock_facts_refresh:
mock_facts_refresh.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Execution failed due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.facts_refresh(), ret)
def test_facts(self):
ret = dict()
ret['facts'] = {'2RE': True,
'HOME': '/var/home/regress',
'RE0': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK',
'up_time': '11 days, 23 hours, 16 minutes, 54 seconds'},
'RE1': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'backup',
'model': 'RE-VMX',
'status': 'OK',
'up_time': '11 days, 23 hours, 16 minutes, 41 seconds'},
'RE_hw_mi': False,
'current_re': ['re0', 'master', 'node', 'fwdd', 'member', 'pfem'],
'domain': 'englab.juniper.net',
'fqdn': 'R1_re0.englab.juniper.net',
'hostname': 'R1_re0',
'hostname_info': {'re0': 'R1_re0', 're1': 'R1_re01'},
'ifd_style': 'CLASSIC',
'junos_info': {'re0': {'object': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'text': '16.1I20160413_0837_aamish'},
're1': {'object': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'text': '16.1I20160413_0837_aamish'}},
'master': 'RE0',
'model': 'MX240',
'model_info': {'re0': 'MX240', 're1': 'MX240'},
'personality': 'MX',
're_info': {'default': {'0': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK'},
'1': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'backup',
'model': 'RE-VMX',
'status': 'OK'},
'default': {'last_reboot_reason': '0x200:normal shutdown',
'mastership_state': 'master',
'model': 'RE-VMX',
'status': 'OK'}}},
're_master': {'default': '0'},
'serialnumber': 'VMX4eaf',
'srx_cluster': None,
'switch_style': 'BRIDGE_DOMAIN',
'vc_capable': False,
'vc_fabric': None,
'vc_master': None,
'vc_mode': None,
'version': '16.1I20160413_0837_aamish',
'version_RE0': '16.1I20160413_0837_aamish',
'version_RE1': '16.1I20160413_0837_aamish',
'version_info': {'build': None,
'major': (16, 1),
'minor': '20160413_0837_aamish',
'type': 'I'},
'virtual': True}
ret['out'] = True
self.assertEqual(junos.facts(), ret)
def test_facts_exception(self):
with patch.dict(junos.__proxy__, {'junos.get_serialized_facts': self.raise_exception}):
ret = dict()
ret['message'] = 'Could not display facts due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.facts(), ret)
def test_set_hostname_without_args(self):
ret = dict()
ret['message'] = 'Please provide the hostname.'
ret['out'] = False
self.assertEqual(junos.set_hostname(), ret)
def test_set_hostname_load_called_with_valid_name(self):
with patch('jnpr.junos.utils.config.Config.load') as mock_load:
junos.set_hostname('test-name')
mock_load.assert_called_with(
'set system host-name test-name', format='set')
def test_set_hostname_raise_exception_for_load(self):
with patch('jnpr.junos.utils.config.Config.load') as mock_load:
mock_load.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not load configuration due to error "Test exception"'
ret['out'] = False
self.assertEqual(junos.set_hostname('Test-name'), ret)
def test_set_hostname_raise_exception_for_commit_check(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check:
mock_commit_check.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not commit check due to error "Test exception"'
ret['out'] = False
self.assertEqual(junos.set_hostname('test-name'), ret)
def test_set_hostname_one_arg_parsed_correctly(self):
with patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit_check.return_value = True
args = {'comment': 'Committed via salt', '__pub_user': 'root',
'__pub_arg': ['test-name', {'comment': 'Committed via salt'}],
'__pub_fun': 'junos.set_hostname', '__pub_jid':
'20170220210915624885', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
junos.set_hostname('test-name', **args)
mock_commit.assert_called_with(comment='Committed via salt')
def test_set_hostname_more_than_one_args_parsed_correctly(self):
with patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit_check.return_value = True
args = {'comment': 'Committed via salt',
'__pub_user': 'root',
'__pub_arg': ['test-name',
{'comment': 'Committed via salt',
'confirm': 5}],
'__pub_fun': 'junos.set_hostname',
'__pub_jid': '20170220210915624885',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.set_hostname('test-name', **args)
mock_commit.assert_called_with(comment='Committed via salt', confirm=5)
def test_set_hostname_successful_return_message(self):
with patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit_check.return_value = True
args = {'comment': 'Committed via salt',
'__pub_user': 'root',
'__pub_arg': ['test-name',
{'comment': 'Committed via salt'}],
'__pub_fun': 'junos.set_hostname',
'__pub_jid': '20170220210915624885',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
ret = dict()
ret['message'] = 'Successfully changed hostname.'
ret['out'] = True
self.assertEqual(junos.set_hostname('test-name', **args), ret)
def test_set_hostname_raise_exception_for_commit(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Successfully loaded host-name but commit failed with "Test exception"'
ret['out'] = False
self.assertEqual(junos.set_hostname('test-name'), ret)
def test_set_hostname_fail_commit_check(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('salt.modules.junos.rollback') as mock_rollback:
mock_commit_check.return_value = False
ret = dict()
ret['out'] = False
ret['message'] = 'Successfully loaded host-name but pre-commit check failed.'
self.assertEqual(junos.set_hostname('test'), ret)
def test_commit_without_args(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit.return_value = True
mock_commit_check.return_value = True
ret = dict()
ret['message'] = 'Commit Successful.'
ret['out'] = True
self.assertEqual(junos.commit(), ret)
def test_commit_raise_commit_check_exeception(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check:
mock_commit_check.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not perform commit check due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.commit(), ret)
def test_commit_raise_commit_exception(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit_check.return_value = True
mock_commit.side_effect = self.raise_exception
ret = dict()
ret['out'] = False
ret['message'] = \
'Commit check succeeded but actual commit failed with "Test exception"'
self.assertEqual(junos.commit(), ret)
def test_commit_with_single_argument(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit_check.return_value = True
args = {'__pub_user': 'root',
'__pub_arg': [{'sync': True}],
'sync': True,
'__pub_fun': 'junos.commit',
'__pub_jid': '20170221182531323467',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.commit(**args)
mock_commit.assert_called_with(detail=False, sync=True)
def test_commit_with_multiple_arguments(
self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit_check.return_value = True
args = {'comment': 'comitted via salt',
'__pub_user': 'root',
'__pub_arg': [{'comment': 'comitted via salt',
'confirm': 3,
'detail': True}],
'confirm': 3,
'detail': True,
'__pub_fun': 'junos.commit',
'__pub_jid': '20170221182856987820',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.commit(**args)
mock_commit.assert_called_with(
comment='comitted via salt', detail=True, confirm=3)
def test_commit_pyez_commit_returning_false(
self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit:
mock_commit.return_value = False
mock_commit_check.return_value = True
ret = dict()
ret['message'] = 'Commit failed.'
ret['out'] = False
self.assertEqual(junos.commit(), ret)
def test_commit_pyez_commit_check_returns_false(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check:
mock_commit_check.return_value = False
ret = dict()
ret['out'] = False
ret['message'] = 'Pre-commit check failed.'
self.assertEqual(junos.commit(), ret)
def test_rollback_exception(self):
with patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_rollback.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Rollback failed due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.rollback(), ret)
def test_rollback_without_args_success(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = True
mock_rollback.return_value = True
ret = dict()
ret['message'] = 'Rollback successful'
ret['out'] = True
self.assertEqual(junos.rollback(), ret)
def test_rollback_without_args_fail(self):
with patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_rollback.return_value = False
ret = dict()
ret['message'] = 'Rollback failed'
ret['out'] = False
self.assertEqual(junos.rollback(), ret)
def test_rollback_with_id(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = True
junos.rollback(id=5)
mock_rollback.assert_called_with(5)
def test_rollback_with_id_and_single_arg(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = True
args = {'__pub_user': 'root', '__pub_arg': [2, {'confirm': 2}],
'confirm': 2, '__pub_fun': 'junos.rollback',
'__pub_jid': '20170221184518526067', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
junos.rollback(2, **args)
mock_rollback.assert_called_with(2)
mock_commit.assert_called_with(confirm=2)
def test_rollback_with_id_and_multiple_args(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = True
args = {'comment': 'Comitted via salt',
'__pub_user': 'root',
'dev_timeout': 40,
'__pub_arg': [2,
{'comment': 'Comitted via salt',
'timeout': 40,
'confirm': 1}],
'confirm': 1,
'__pub_fun': 'junos.rollback',
'__pub_jid': '20170221192708251721',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.rollback(id=2, **args)
mock_rollback.assert_called_with(2)
mock_commit.assert_called_with(
comment='Comitted via salt', confirm=1, timeout=40)
def test_rollback_with_only_single_arg(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = True
args = {'__pub_user': 'root',
'__pub_arg': [{'sync': True}],
'sync': True,
'__pub_fun': 'junos.rollback',
'__pub_jid': '20170221193615696475',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.rollback(**args)
mock_rollback.assert_called_once_with(0)
mock_commit.assert_called_once_with(sync=True)
def test_rollback_with_only_multiple_args_no_id(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = True
args = {'comment': 'Comitted via salt',
'__pub_user': 'root',
'__pub_arg': [{'comment': 'Comitted via salt',
'confirm': 3,
'sync': True}],
'confirm': 3,
'sync': True,
'__pub_fun': 'junos.rollback',
'__pub_jid': '20170221193945996362',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.rollback(**args)
mock_rollback.assert_called_with(0)
mock_commit.assert_called_once_with(
sync=True, confirm=3, comment='Comitted via salt')
def test_rollback_with_diffs_file_option_when_diff_is_None(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback, \
patch('salt.modules.junos.fopen') as mock_fopen, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff:
mock_commit_check.return_value = True
mock_diff.return_value = 'diff'
args = {'__pub_user': 'root',
'__pub_arg': [{'diffs_file': '/home/regress/diff',
'confirm': 2}],
'confirm': 2,
'__pub_fun': 'junos.rollback',
'__pub_jid': '20170221205153884009',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': '',
'diffs_file': '/home/regress/diff'}
junos.rollback(**args)
mock_fopen.assert_called_with('/home/regress/diff', 'w')
def test_rollback_with_diffs_file_option(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback, \
patch('salt.modules.junos.fopen') as mock_fopen, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff:
mock_commit_check.return_value = True
mock_diff.return_value = None
args = {'__pub_user': 'root',
'__pub_arg': [{'diffs_file': '/home/regress/diff',
'confirm': 2}],
'confirm': 2,
'__pub_fun': 'junos.rollback',
'__pub_jid': '20170221205153884009',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': '',
'diffs_file': '/home/regress/diff'}
junos.rollback(**args)
assert not mock_fopen.called
def test_rollback_commit_check_exception(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not commit check due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.rollback(), ret)
def test_rollback_commit_exception(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = True
mock_commit.side_effect = self.raise_exception
ret = dict()
ret['message'] = \
'Rollback successful but commit failed with error "Test exception"'
ret['out'] = False
self.assertEqual(junos.rollback(), ret)
def test_rollback_commit_check_fails(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.rollback') as mock_rollback:
mock_commit_check.return_value = False
ret = dict()
ret['message'] = 'Rollback succesfull but pre-commit check failed.'
ret['out'] = False
self.assertEqual(junos.rollback(), ret)
def test_diff_without_args(self):
with patch('jnpr.junos.utils.config.Config.diff') as mock_diff:
junos.diff()
mock_diff.assert_called_with(rb_id=0)
def test_diff_with_arg(self):
with patch('jnpr.junos.utils.config.Config.diff') as mock_diff:
junos.diff(2)
mock_diff.assert_called_with(rb_id=2)
def test_diff_exception(self):
with patch('jnpr.junos.utils.config.Config.diff') as mock_diff:
mock_diff.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not get diff with error "Test exception"'
ret['out'] = False
self.assertEqual(junos.diff(), ret)
def test_ping_without_args(self):
ret = dict()
ret['message'] = 'Please specify the destination ip to ping.'
ret['out'] = False
self.assertEqual(junos.ping(), ret)
def test_ping(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
junos.ping('1.1.1.1')
args = mock_execute.call_args
rpc = '<ping><count>5</count><host>1.1.1.1</host></ping>'
self.assertEqualXML(args[0][0], rpc)
def test_ping_ttl(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
args = {'__pub_user': 'sudo_drajvi',
'__pub_arg': ['1.1.1.1',
{'ttl': 3}],
'__pub_fun': 'junos.ping',
'__pub_jid': '20170306165237683279',
'__pub_tgt': 'mac_min',
'ttl': 3,
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.ping('1.1.1.1', **args)
exec_args = mock_execute.call_args
rpc = '<ping><count>5</count><host>1.1.1.1</host><ttl>3</ttl></ping>'
self.assertEqualXML(exec_args[0][0], rpc)
def test_ping_exception(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
mock_execute.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Execution failed due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.ping('1.1.1.1'), ret)
def test_cli_without_args(self):
ret = dict()
ret['message'] = 'Please provide the CLI command to be executed.'
ret['out'] = False
self.assertEqual(junos.cli(), ret)
def test_cli_with_format_as_empty_string(self):
with patch('jnpr.junos.device.Device.cli') as mock_cli:
junos.cli('show version', '')
mock_cli.assert_called_with('show version', 'text', warning=False)
def test_cli(self):
with patch('jnpr.junos.device.Device.cli') as mock_cli:
mock_cli.return_vale = 'CLI result'
ret = dict()
ret['message'] = 'CLI result'
ret['out'] = True
junos.cli('show version')
mock_cli.assert_called_with('show version', 'text', warning=False)
def test_cli_format_xml(self):
with patch('salt.modules.junos.jxmlease.parse') as mock_jxml, \
patch('salt.modules.junos.etree.tostring') as mock_to_string, \
patch('jnpr.junos.device.Device.cli') as mock_cli:
mock_cli.return_value = '<root><a>test</a></root>'
mock_jxml.return_value = '<root><a>test</a></root>'
args = {'__pub_user': 'root',
'__pub_arg': [{'format': 'xml'}],
'format': 'xml',
'__pub_fun': 'junos.cli',
'__pub_jid': '20170221182531323467',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
ret = dict()
ret['message'] = '<root><a>test</a></root>'
ret['out'] = True
self.assertEqual(junos.cli('show version', **args), ret)
mock_cli.assert_called_with('show version', 'xml', warning=False)
mock_to_string.assert_called_once_with('<root><a>test</a></root>')
assert mock_jxml.called
def test_cli_exception_in_cli(self):
with patch('jnpr.junos.device.Device.cli') as mock_cli:
mock_cli.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Execution failed due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.cli('show version'), ret)
def test_cli_write_output(self):
with patch('salt.modules.junos.fopen') as mock_fopen, \
patch('jnpr.junos.device.Device.cli') as mock_cli:
mock_cli.return_vale = 'cli text output'
args = {'__pub_user': 'root',
'__pub_arg': [{'dest': 'copy/output/here'}],
'dest': 'copy/output/here',
'__pub_fun': 'junos.cli',
'__pub_jid': '20170221182531323467',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
ret = dict()
ret['message'] = 'cli text output'
ret['out'] = True
junos.cli('show version', **args)
mock_fopen.assert_called_with('copy/output/here', 'w')
def test_shutdown_without_args(self):
ret = dict()
ret['message'] = \
'Provide either one of the arguments: shutdown or reboot.'
ret['out'] = False
self.assertEqual(junos.shutdown(), ret)
def test_shutdown_with_reboot_args(self):
with patch('salt.modules.junos.SW.reboot') as mock_reboot:
ret = dict()
ret['message'] = 'Successfully powered off/rebooted.'
ret['out'] = True
args = {'__pub_user': 'root', '__pub_arg': [{'reboot': True}],
'reboot': True, '__pub_fun': 'junos.shutdown',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
self.assertEqual(junos.shutdown(**args), ret)
assert mock_reboot.called
def test_shutdown_with_poweroff_args(self):
with patch('salt.modules.junos.SW.poweroff') as mock_poweroff:
ret = dict()
ret['message'] = 'Successfully powered off/rebooted.'
ret['out'] = True
args = {'__pub_user': 'root', '__pub_arg': [{'shutdown': True}],
'reboot': True, '__pub_fun': 'junos.shutdown',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
self.assertEqual(junos.shutdown(**args), ret)
assert mock_poweroff.called
def test_shutdown_with_shutdown_as_false(self):
ret = dict()
ret['message'] = 'Nothing to be done.'
ret['out'] = False
args = {'__pub_user': 'root', '__pub_arg': [{'shutdown': False}],
'reboot': True, '__pub_fun': 'junos.shutdown',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
self.assertEqual(junos.shutdown(**args), ret)
def test_shutdown_with_in_min_arg(self):
with patch('salt.modules.junos.SW.poweroff') as mock_poweroff:
args = {'__pub_user': 'root',
'in_min': 10,
'__pub_arg': [{'in_min': 10,
'shutdown': True}],
'reboot': True,
'__pub_fun': 'junos.shutdown',
'__pub_jid': '20170222231445709212',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.shutdown(**args)
mock_poweroff.assert_called_with(in_min=10)
def test_shutdown_with_at_arg(self):
with patch('salt.modules.junos.SW.reboot') as mock_reboot:
args = {'__pub_user': 'root',
'__pub_arg': [{'at': '12:00 pm',
'reboot': True}],
'reboot': True,
'__pub_fun': 'junos.shutdown',
'__pub_jid': '201702276857',
'at': '12:00 pm',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.shutdown(**args)
mock_reboot.assert_called_with(at='12:00 pm')
def test_shutdown_fail_with_exception(self):
with patch('salt.modules.junos.SW.poweroff') as mock_poweroff:
mock_poweroff.side_effect = self.raise_exception
args = {'__pub_user': 'root', '__pub_arg': [{'shutdown': True}],
'shutdown': True, '__pub_fun': 'junos.shutdown',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
ret = dict()
ret['message'] = 'Could not poweroff/reboot beacause "Test exception"'
ret['out'] = False
self.assertEqual(junos.shutdown(**args), ret)
def test_install_config_without_args(self):
ret = dict()
ret['message'] = \
'Please provide the salt path where the configuration is present'
ret['out'] = False
self.assertEqual(junos.install_config(), ret)
def test_install_config_cp_fails(self):
with patch('os.path.isfile') as mock_isfile:
mock_isfile.return_value = False
ret = dict()
ret['message'] = 'Invalid file path.'
ret['out'] = False
self.assertEqual(junos.install_config('path'), ret)
def test_install_config_file_cp_fails(self):
with patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 0
ret = dict()
ret['message'] = 'Template failed to render'
ret['out'] = False
self.assertEqual(junos.install_config('path'), ret)
def test_install_config(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(junos.install_config('actual/path/config.set'), ret)
mock_load.assert_called_with(path='test/path/config', format='set')
def test_install_config_xml_file(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(junos.install_config('actual/path/config.xml'), ret)
mock_load.assert_called_with(path='test/path/config', format='xml')
def test_install_config_text_file(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(junos.install_config('actual/path/config'), ret)
mock_load.assert_called_with(path='test/path/config', format='text')
def test_install_config_replace(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
args = {'__pub_user': 'root', '__pub_arg': [{'replace': True}],
'replace': True, '__pub_fun': 'junos.install_config',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(
junos.install_config(
'actual/path/config.set',
**args),
ret)
mock_load.assert_called_with(
path='test/path/config',
format='set',
merge=False)
def test_install_config_overwrite(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
args = {'__pub_user': 'root', '__pub_arg': [{'overwrite': True}],
'overwrite': True, '__pub_fun': 'junos.install_config',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(
junos.install_config(
'actual/path/config.xml',
**args),
ret)
mock_load.assert_called_with(
path='test/path/config',
format='xml',
overwrite=True)
def test_install_config_overwrite_false(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
args = {'__pub_user': 'root', '__pub_arg': [{'overwrite': False}],
'overwrite': False, '__pub_fun': 'junos.install_config',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(
junos.install_config(
'actual/path/config',
**args),
ret)
mock_load.assert_called_with(
path='test/path/config', format='text', merge=True)
def test_install_config_load_causes_exception(self):
with patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_load.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not load configuration due to : "Test exception"'
ret['format'] = 'set'
ret['out'] = False
self.assertEqual(
junos.install_config(
path='actual/path/config.set'), ret)
def test_install_config_no_diff(self):
with patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = None
ret = dict()
ret['message'] = 'Configuration already applied!'
ret['out'] = True
self.assertEqual(junos.install_config('actual/path/config'), ret)
def test_install_config_write_diff(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('salt.modules.junos.fopen') as mock_fopen, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
args = {'__pub_user': 'root',
'__pub_arg': [{'diffs_file': 'copy/config/here'}],
'diffs_file': 'copy/config/here',
'__pub_fun': 'junos.install_config',
'__pub_jid': '20170222213858582619',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(
junos.install_config(
'actual/path/config',
**args),
ret)
mock_fopen.assert_called_with('copy/config/here', 'w')
def test_install_config_write_diff_exception(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('salt.modules.junos.fopen') as mock_fopen, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
mock_fopen.side_effect = self.raise_exception
args = {'__pub_user': 'root',
'__pub_arg': [{'diffs_file': 'copy/config/here'}],
'diffs_file': 'copy/config/here',
'__pub_fun': 'junos.install_config',
'__pub_jid': '20170222213858582619',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
ret = dict()
ret['message'] = 'Could not write into diffs_file due to: "Test exception"'
ret['out'] = False
self.assertEqual(
junos.install_config(
'actual/path/config',
**args),
ret)
mock_fopen.assert_called_with('copy/config/here', 'w')
def test_install_config_commit_params(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
args = {'comment': 'comitted via salt',
'__pub_user': 'root',
'__pub_arg': [{'comment': 'comitted via salt',
'confirm': 3}],
'confirm': 3,
'__pub_fun': 'junos.commit',
'__pub_jid': '20170221182856987820',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
ret = dict()
ret['message'] = 'Successfully loaded and committed!'
ret['out'] = True
self.assertEqual(
junos.install_config(
'actual/path/config',
**args),
ret)
mock_commit.assert_called_with(comment='comitted via salt', confirm=3)
def test_install_config_commit_check_exception(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Commit check threw the following exception: "Test exception"'
ret['out'] = False
self.assertEqual(junos.install_config('actual/path/config.xml'), ret)
def test_install_config_commit_check_fails(self):
with patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = False
ret = dict()
ret['message'] = 'Loaded configuration but commit check failed.'
ret['out'] = False
self.assertEqual(junos.install_config('actual/path/config.xml'), ret)
def test_install_config_commit_exception(self):
with patch('jnpr.junos.utils.config.Config.commit') as mock_commit, \
patch('jnpr.junos.utils.config.Config.commit_check') as mock_commit_check, \
patch('jnpr.junos.utils.config.Config.diff') as mock_diff, \
patch('jnpr.junos.utils.config.Config.load') as mock_load, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_isfile.return_value = True
mock_getsize.return_value = 10
mock_mkstemp.return_value = 'test/path/config'
mock_diff.return_value = 'diff'
mock_commit_check.return_value = True
mock_commit.side_effect = self.raise_exception
ret = dict()
ret['message'] = \
'Commit check successful but commit failed with "Test exception"'
ret['out'] = False
self.assertEqual(junos.install_config('actual/path/config'), ret)
def test_zeroize(self):
with patch('jnpr.junos.device.Device.cli') as mock_cli:
result = junos.zeroize()
ret = dict()
ret['out'] = True
ret['message'] = 'Completed zeroize and rebooted'
mock_cli.assert_called_once_with('request system zeroize')
self.assertEqual(result, ret)
def test_zeroize_throw_exception(self):
with patch('jnpr.junos.device.Device.cli') as mock_cli:
mock_cli.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not zeroize due to : "Test exception"'
ret['out'] = False
self.assertEqual(junos.zeroize(), ret)
def test_install_os_without_args(self):
ret = dict()
ret['message'] = \
'Please provide the salt path where the junos image is present.'
ret['out'] = False
self.assertEqual(junos.install_os(), ret)
def test_install_os_cp_fails(self):
with patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value = 10
mock_isfile.return_value = False
ret = dict()
ret['message'] = 'Invalid image path.'
ret['out'] = False
self.assertEqual(junos.install_os('/image/path/'), ret)
def test_install_os_image_cp_fails(self):
with patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value = 0
mock_isfile.return_value = True
ret = dict()
ret['message'] = 'Failed to copy image'
ret['out'] = False
self.assertEqual(junos.install_os('/image/path/'), ret)
def test_install_os(self):
with patch('jnpr.junos.utils.sw.SW.install') as mock_install, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value = 10
mock_isfile.return_value = True
ret = dict()
ret['out'] = True
ret['message'] = 'Installed the os.'
self.assertEqual(junos.install_os('path'), ret)
def test_install_os_with_reboot_arg(self):
with patch('jnpr.junos.utils.sw.SW.install') as mock_install, \
patch('jnpr.junos.utils.sw.SW.reboot') as mock_reboot, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value = 10
mock_isfile.return_value = True
args = {'__pub_user': 'root', '__pub_arg': [{'reboot': True}],
'reboot': True, '__pub_fun': 'junos.install_os',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
ret = dict()
ret['message'] = 'Successfully installed and rebooted!'
ret['out'] = True
self.assertEqual(junos.install_os('path', **args), ret)
def test_install_os_pyez_install_throws_exception(self):
with patch('jnpr.junos.utils.sw.SW.install') as mock_install, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value = 10
mock_isfile.return_value = True
mock_install.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Installation failed due to: "Test exception"'
ret['out'] = False
self.assertEqual(junos.install_os('path'), ret)
def test_install_os_with_reboot_raises_exception(self):
with patch('jnpr.junos.utils.sw.SW.install') as mock_install, \
patch('jnpr.junos.utils.sw.SW.reboot') as mock_reboot, \
patch('salt.modules.junos.safe_rm') as mock_safe_rm, \
patch('salt.modules.junos.files.mkstemp') as mock_mkstemp, \
patch('os.path.isfile') as mock_isfile, \
patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value = 10
mock_isfile.return_value = True
mock_reboot.side_effect = self.raise_exception
args = {'__pub_user': 'root', '__pub_arg': [{'reboot': True}],
'reboot': True, '__pub_fun': 'junos.install_os',
'__pub_jid': '20170222213858582619', '__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob', '__pub_ret': ''}
ret = dict()
ret['message'] = \
'Installation successful but reboot failed due to : "Test exception"'
ret['out'] = False
self.assertEqual(junos.install_os('path', **args), ret)
def test_file_copy_without_args(self):
ret = dict()
ret['message'] = \
'Please provide the absolute path of the file to be copied.'
ret['out'] = False
self.assertEqual(junos.file_copy(), ret)
def test_file_copy_invalid_src(self):
with patch('os.path.isfile') as mock_isfile:
mock_isfile.return_value = False
ret = dict()
ret['message'] = 'Invalid source file path'
ret['out'] = False
self.assertEqual(junos.file_copy('invalid/file/path', 'file'), ret)
def test_file_copy_without_dest(self):
ret = dict()
ret['message'] = \
'Please provide the absolute path of the destination where the file is to be copied.'
ret['out'] = False
with patch('salt.modules.junos.os.path.isfile') as mck:
mck.return_value = True
self.assertEqual(junos.file_copy('/home/user/config.set'), ret)
def test_file_copy(self):
with patch('salt.modules.junos.SCP') as mock_scp, \
patch('os.path.isfile') as mock_isfile:
mock_isfile.return_value = True
ret = dict()
ret['message'] = 'Successfully copied file from test/src/file to file'
ret['out'] = True
self.assertEqual(
junos.file_copy(
dest='file',
src='test/src/file'),
ret)
def test_file_copy_exception(self):
with patch('salt.modules.junos.SCP') as mock_scp, \
patch('os.path.isfile') as mock_isfile:
mock_isfile.return_value = True
mock_scp.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'Could not copy file : "Test exception"'
ret['out'] = False
self.assertEqual(
junos.file_copy(
dest='file',
src='test/src/file'),
ret)
# These test cases test the __virtual__ function, used internally by salt
# to check if the given module is loadable. This function is not used by
# an external user.
def test_virtual_proxy_unavailable(self):
with patch.dict(junos.__opts__, {}):
res = (False, 'The junos module could not be '
'loaded: junos-eznc or jxmlease or proxy could not be loaded.')
self.assertEqual(junos.__virtual__(), res)
def test_virtual_all_true(self):
with patch.dict(junos.__opts__, {'proxy': 'test'}):
self.assertEqual(junos.__virtual__(), 'junos')
def test_rpc_without_args(self):
ret = dict()
ret['message'] = 'Please provide the rpc to execute.'
ret['out'] = False
self.assertEqual(junos.rpc(), ret)
def test_rpc_get_config_exception(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
mock_execute.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'RPC execution failed due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.rpc('get_config'), ret)
def test_rpc_get_config_filter(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
mock_execute.return_value = etree.XML('<reply><rpc/></reply>')
args = {'__pub_user': 'root',
'__pub_arg': ['get-config',
{'filter': '<configuration><system/></configuration>'}],
'__pub_fun': 'junos.rpc',
'__pub_jid': '20170314162715866528',
'__pub_tgt': 'mac_min',
'__pub_tgt_type': 'glob',
'filter': '<configuration><system/></configuration>',
'__pub_ret': ''}
junos.rpc('get-config', **args)
exec_args = mock_execute.call_args
expected_rpc = '<get-configuration dev_timeout="30" ' \
'format="xml"><configuration><system/></configuration></get-configuration>'
self.assertEqualXML(exec_args[0][0], expected_rpc)
def test_rpc_get_interface_information(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
junos.rpc('get-interface-information', format='json')
args = mock_execute.call_args
expected_rpc = '<get-interface-information format="json"/>'
self.assertEqualXML(args[0][0], expected_rpc)
self.assertEqual(args[1], {'dev_timeout': 30})
def test_rpc_get_interface_information_with_kwargs(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
args = {'__pub_user': 'root',
'__pub_arg': ['get-interface-information',
'',
'text',
{'terse': True,
'interface_name': 'lo0'}],
'terse': True,
'__pub_fun': 'junos.rpc',
'__pub_jid': '20170314160943363563',
'__pub_tgt': 'mac_min',
'interface_name': 'lo0',
'__pub_tgt_type': 'glob',
'__pub_ret': ''}
junos.rpc('get-interface-information', format='text', **args)
args = mock_execute.call_args
expected_rpc = (
'<get-interface-information format="text">'
'<terse/><interface-name>lo0</interface-name></get-interface-information>'
)
self.assertEqualXML(etree.tostring(args[0][0]), expected_rpc)
def test_rpc_get_chassis_inventory_filter_as_arg(self):
with patch('salt.modules.junos.jxmlease.parse') as mock_jxmlease, \
patch('salt.modules.junos.etree.tostring') as mock_tostring, \
patch('salt.modules.junos.logging.Logger.warning') as mock_warning, \
patch('jnpr.junos.device.Device.execute') as mock_execute:
junos.rpc(
'get-chassis-inventory',
filter='<configuration><system/></configuration>')
mock_warning.assert_called_with(
'Filter ignored as it is only used with "get-config" rpc')
def test_rpc_get_interface_information_exception(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
mock_execute.side_effect = self.raise_exception
ret = dict()
ret['message'] = 'RPC execution failed due to "Test exception"'
ret['out'] = False
self.assertEqual(junos.rpc('get_interface_information'), ret)
def test_rpc_write_file_format_text(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute:
mock_execute.return_value = etree.XML(
'<rpc-reply>text rpc reply</rpc-reply>')
m = mock_open()
with patch('salt.modules.junos.fopen', m, create=True):
junos.rpc('get-chassis-inventory', '/path/to/file', 'text')
handle = m()
handle.write.assert_called_with('text rpc reply')
def test_rpc_write_file_format_json(self):
with patch('jnpr.junos.device.Device.execute') as mock_execute, \
patch('salt.modules.junos.json.dumps') as mock_dumps:
mock_dumps.return_value = 'json rpc reply'
m = mock_open()
with patch('salt.modules.junos.fopen', m, create=True):
junos.rpc('get-chassis-inventory', '/path/to/file', format='json')
handle = m()
handle.write.assert_called_with('json rpc reply')
def test_rpc_write_file(self):
with patch('salt.modules.junos.jxmlease.parse') as mock_parse, \
patch('salt.modules.junos.etree.tostring') as mock_tostring, \
patch('jnpr.junos.device.Device.execute') as mock_execute:
mock_tostring.return_value = 'xml rpc reply'
m = mock_open()
with patch('salt.modules.junos.fopen', m, create=True):
junos.rpc('get-chassis-inventory', '/path/to/file')
handle = m()
handle.write.assert_called_with('xml rpc reply')
|
py | b40796472224312626ca68b201e7897e43abe589 |
class Solution:
def commonPrefix(self, a: str, b: str) -> str:
lengthA = len(a)
lengthB = len(b)
length = lengthA if lengthA >= lengthB else lengthB
for i in range(length):
if a[i] != b[i]:
break
return a[:i]
def longestCommonPrefix(self, strs: List[str]) -> str:
length = len(strs)
if length == 0:
return ""
common = strs[0]
for i in range(1, length):
common = commonPrefix(strs[i], common)
if common == "":
return common
return common
|
py | b40796bf9ed390f79318714faf5872c9e558a3e3 |
from ..utils import Object
class ChatListMain(Object):
"""
A main list of chats
Attributes:
ID (:obj:`str`): ``ChatListMain``
No parameters required.
Returns:
ChatList
Raises:
:class:`telegram.Error`
"""
ID = "chatListMain"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "ChatListMain":
return ChatListMain()
|
py | b40797397d33f0a8c385ecb09957b276fddb12d8 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from copy import deepcopy
import numpy as np
from extensions.back.ChangeOutputTypeAttributes import ChangeOutputTypeAttributes
from extensions.ops.Cast import Cast
from extensions.ops.range import Range
from mo.front.common.partial_infer.utils import float32_array
from mo.middle.passes.convert_data_type import convert_blobs, data_type_str_to_np
from mo.middle.passes.infer import partial_infer
from mo.utils.error import Error
from mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, result, regular_op_with_empty_data, connect
from unit_tests.utils.graph import valued_const_with_data
class ChangeOutputTypeAttributesTests(unittest.TestCase):
def test_range_correct_case(self):
graph, graph_ref = build_range_test_graphs(start=0, limit=10, delta=1, dst_type_str='FP16')
ChangeOutputTypeAttributes().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True)
self.assertTrue(flag, resp)
# starting from ~1000 FP16 absolute difference between neighbor values is more than 1
# fails because of shape inconsistency
def test_range_different_values(self):
graph, graph_ref = build_range_test_graphs(start=0, limit=50000, delta=1, dst_type_str='FP16')
self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph)
def test_range_out_of_fp16_max(self):
graph, graph_ref = build_range_test_graphs(start=0, limit=100000, delta=1, dst_type_str='FP16')
self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph)
def test_range_out_of_fp16_min(self):
graph, graph_ref = build_range_test_graphs(start=0, limit=-100000, delta=-1, dst_type_str='FP16')
self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph)
def test_cast_correct_case(self):
input_data = np.array([0, 1000, 4, 9, 0])
graph, graph_ref = build_cast_test_graphs(input_data, dst_type_str='FP16')
ChangeOutputTypeAttributes().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_cast_out_of_fp16_max(self):
input_data = np.array([0, 100000, 4, 9, 0])
graph, graph_ref = build_cast_test_graphs(input_data, dst_type_str='FP16')
self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph)
def test_cast_out_of_fp16_min(self):
input_data = np.array([0, -100000, 4, 9, 0])
graph, graph_ref = build_cast_test_graphs(input_data, dst_type_str='FP16')
self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph)
def build_range_test_graphs(start=0, limit=10, delta=1, dst_type_str='FP16'):
nodes = {
**valued_const_with_data('start', float32_array(start)),
**valued_const_with_data('limit', float32_array(limit)),
**valued_const_with_data('delta', float32_array(delta)),
**regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range',
'output_type': np.float32,
'infer': Range.infer}),
**result('res'),
}
nodes_ref = deepcopy(nodes)
nodes_ref.update({
**regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range',
'output_type': data_type_str_to_np(dst_type_str),
'infer': Range.infer}),
})
edges = [
*connect('start', '0:range'),
*connect('limit', '1:range'),
*connect('delta', '2:range'),
*connect('range', 'res'),
]
graph = build_graph(nodes, edges)
graph_ref = build_graph(nodes_ref, edges)
graph = partial_infer(graph)
graph.graph['cmd_params'].data_type = dst_type_str
convert_blobs(graph, dst_type_str)
return graph, graph_ref
def build_cast_test_graphs(input_data, dst_type_str='FP16'):
nodes = {
**valued_const_with_data('input', float32_array(input_data)),
**regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast',
'dst_type': np.float32,
'infer': Cast.infer}),
**result('res'),
}
nodes_ref = deepcopy(nodes)
nodes_ref.update({
**regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast',
'dst_type': data_type_str_to_np(dst_type_str),
'infer': Cast.infer}),
})
edges = [
*connect('input', 'cast'),
*connect('cast', 'res'),
]
graph = build_graph(nodes, edges)
graph_ref = build_graph(nodes_ref, edges)
graph = partial_infer(graph)
graph.graph['cmd_params'].data_type = dst_type_str
convert_blobs(graph, dst_type_str)
return graph, graph_ref
|
py | b40797afa3ef95731f35730f3d1a777d8327a004 | import cv2
import numpy as np
img = cv2.imread('Soru2.tif',0)
# global thresholding
ret1,th1 = cv2.threshold(img,200,255,cv2.THRESH_BINARY)
#show tresholded image
cv2.imshow("threshold", th1)
#create a 5x5 kernal component
kernel = np.ones((5,5),np.uint8)
# erode component 1 time and look for 8 connectivity
erosion = cv2.erode(th1,kernel,iterations = 1)
output = cv2.connectedComponentsWithStats(erosion, connectivity=8)
#counter for identifical pixel count
count = 0
#for each connected component
print("Connected \t\t Num of\nComponent \t\t Pixel")
for i in range(output[0]-1):
print(i+1, "\t\t\t\t", output[2][i][4])
# if pixel is bigger than it is identical
if output[2][i][4] > 100:
count += 1
print("\n\nNumber of identical component count is ",count)
cv2.imshow("erosion",erosion)
cv2.imwrite("answer2-erosion.png", erosion)
cv2.imwrite("answer2-threshold.png",th1)
cv2.waitKey(0)
|
py | b4079810aebc03afcda1c887089b666a79fc0fde | import time
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.buffers import DictRolloutBuffer, RolloutBuffer
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.policies import ActorCriticPolicy, BasePolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import obs_as_tensor, safe_mean
from stable_baselines3.common.vec_env import VecEnv
class OnPolicyAlgorithm(BaseAlgorithm):
"""
The base for On-Policy algorithms (ex: A2C/PPO).
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator.
Equivalent to classic advantage when set to 1.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param policy_base: The base policy used by this method
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
:param supported_action_spaces: The action spaces supported by the algorithm.
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule],
n_steps: int,
gamma: float,
gae_lambda: float,
use_n_step_advantage: bool,
ent_coef: float,
vf_coef: float,
max_grad_norm: float,
use_sde: bool,
sde_sample_freq: int,
policy_base: Type[BasePolicy] = ActorCriticPolicy,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
supported_action_spaces: Optional[Tuple[gym.spaces.Space, ...]] = None,
):
super(OnPolicyAlgorithm, self).__init__(
policy=policy,
env=env,
policy_base=policy_base,
learning_rate=learning_rate,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
create_eval_env=create_eval_env,
support_multi_env=True,
seed=seed,
tensorboard_log=tensorboard_log,
supported_action_spaces=supported_action_spaces,
)
self.n_steps = n_steps
self.gamma = gamma
self.gae_lambda = gae_lambda
self.use_n_step_advantage = use_n_step_advantage
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.rollout_buffer = None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
self._setup_lr_schedule()
self.set_random_seed(self.seed)
buffer_cls = DictRolloutBuffer if isinstance(self.observation_space, gym.spaces.Dict) else RolloutBuffer
self.rollout_buffer = buffer_cls(
self.n_steps,
self.observation_space,
self.action_space,
self.device,
gamma=self.gamma,
gae_lambda=self.gae_lambda,
use_n_step_advantage=self.use_n_step_advantage,
n_envs=self.n_envs,
)
self.policy = self.policy_class( # pytype:disable=not-instantiable
self.observation_space,
self.action_space,
self.lr_schedule,
use_sde=self.use_sde,
**self.policy_kwargs # pytype:disable=not-instantiable
)
self.policy = self.policy.to(self.device)
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
rollout_buffer: RolloutBuffer,
n_rollout_steps: int,
) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert self._last_obs is not None, "No previous observation was provided"
# Switch to eval mode (this affects batch norm / dropout)
self.policy.set_training_mode(False)
n_steps = 0
rollout_buffer.reset()
# Sample new weights for the state dependent exploration
if self.use_sde:
self.policy.reset_noise(env.num_envs)
callback.on_rollout_start()
while n_steps < n_rollout_steps:
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.policy.reset_noise(env.num_envs)
with th.no_grad():
# Convert to pytorch tensor or to TensorDict
obs_tensor = obs_as_tensor(self._last_obs, self.device)
actions, values, log_probs = self.policy.forward(obs_tensor)
actions = actions.cpu().numpy()
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
new_obs, rewards, dones, infos = env.step(clipped_actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, gym.spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
rollout_buffer.add(self._last_obs, actions, rewards, self._last_episode_starts, values, log_probs)
self._last_obs = new_obs
self._last_episode_starts = dones
with th.no_grad():
# Compute value for the last timestep
obs_tensor = obs_as_tensor(new_obs, self.device)
_, values, _ = self.policy.forward(obs_tensor)
rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)
callback.on_rollout_end()
return True
def train(self) -> None:
"""
Consume current rollout data and update policy parameters.
Implemented by individual algorithms.
"""
raise NotImplementedError
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "OnPolicyAlgorithm",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "OnPolicyAlgorithm":
iteration = 0
total_timesteps, callback = self._setup_learn(
total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name
)
callback.on_training_start(locals(), globals())
while self.num_timesteps < total_timesteps:
continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)
if continue_training is False:
break
iteration += 1
self._update_current_progress_remaining(self.num_timesteps, total_timesteps)
# Display training infos
if log_interval is not None and iteration % log_interval == 0:
fps = int(self.num_timesteps / (time.time() - self.start_time))
self.logger.record("time/iterations", iteration, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
self.logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
self.logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]))
self.logger.record("time/fps", fps)
self.logger.record("time/time_elapsed", int(time.time() - self.start_time), exclude="tensorboard")
self.logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard")
self.logger.dump(step=self.num_timesteps)
self.train()
callback.on_training_end()
return self
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
|
py | b407983503d652b24285404cbaf487fd542805c3 | """empty message
Revision ID: 051062615c0a
Revises: 62f81f2999c9
Create Date: 2020-09-22 13:05:22.093628
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '051062615c0a'
down_revision = '62f81f2999c9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('about_me', sa.Text(), nullable=True))
op.add_column('users', sa.Column('avatar_hash', sa.String(length=32), nullable=True))
op.add_column('users', sa.Column('location', sa.String(length=64), nullable=True))
op.add_column('users', sa.Column('name', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'name')
op.drop_column('users', 'location')
op.drop_column('users', 'avatar_hash')
op.drop_column('users', 'about_me')
# ### end Alembic commands ###
|
py | b4079850ff7d5f6712cc33b5117fbb0609f5a4ac | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import pkg_resources
import six
import tests
class SanityTest(unittest.TestCase):
maxDiff = 32768
def testTestsJsonUpToDate(self):
"""Autodiscovers all test suites and checks that tests.json is up to date"""
loader = tests.Loader()
loader.loadTestsFromNames(['tests'])
test_suite_names = sorted({
test_case_class.id().rsplit('.', 1)[0]
for test_case_class in tests._loader.iterate_suite_cases(
loader.suite)
})
tests_json_string = pkg_resources.resource_string('tests', 'tests.json')
tests_json = json.loads(tests_json_string.decode()
if six.PY3 else tests_json_string)
self.assertSequenceEqual(tests_json, test_suite_names)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.