filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_3493 | # Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Kuhn Poker implemented in Python.
This is a simple demonstration of implementing a game in Python, featuring
chance and imperfect information.
Python games are significantly slower than C++, but it may still be suitable
for prototyping or for small games.
It is possible to run C++ algorithms on Python implemented games, This is likely
to have good performance if the algorithm simply extracts a game tree and then
works with that. It is likely to be poor if the algorithm relies on processing
and updating states as it goes, e.g. MCTS.
"""
import enum
import numpy as np
import pyspiel
from open_spiel.python.games.tt_utils import *
_NUM_PLAYERS = 2
_NUM_ACTIONS = (len(TITAN_IDS) + NUM_TILES)*MAX_TITANS
_MAX_GAME_LENGTH = 48
# r1: (2 titans + 2 tiles) * 2 players
# r2: (1 titan + 3 tiles) * 2 players
# r3: (1 titan + 4 tiles) * 2 players
# r4: (1 titan + 5 tiles) * 2 players
# r5: (5 tiles) * 2 players
_GAME_TYPE = pyspiel.GameType(
short_name="tt",
long_name="Tiny Titans",
dynamics=pyspiel.GameType.Dynamics.SEQUENTIAL,
chance_mode=pyspiel.GameType.ChanceMode.DETERMINISTIC,
information=pyspiel.GameType.Information.IMPERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.ZERO_SUM,
reward_model=pyspiel.GameType.RewardModel.TERMINAL,
max_num_players=_NUM_PLAYERS,
min_num_players=_NUM_PLAYERS,
provides_information_state_string=True,
provides_information_state_tensor=True,
provides_observation_string=True,
provides_observation_tensor=True,
provides_factored_observation_string=True)
_GAME_INFO = pyspiel.GameInfo(
num_distinct_actions=_NUM_ACTIONS,
max_chance_outcomes=0,
num_players=_NUM_PLAYERS,
min_utility=-1.03,
max_utility=1.03,
utility_sum=0.0,
max_game_length=_MAX_GAME_LENGTH)
class TTGame(pyspiel.Game):
"""A Python version of Tiny Titans."""
def __init__(self, params=None):
super().__init__(_GAME_TYPE, _GAME_INFO, params or dict())
def new_initial_state(self):
"""Returns a state corresponding to the start of a game."""
return TTState(self)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
return TTObserver(
iig_obs_type or pyspiel.IIGObservationType(perfect_recall=False),
params)
class TTState(pyspiel.State):
"""A python version of the tt state."""
def __init__(self, game):
"""Constructor; should only be called by Game.new_initial_state."""
super().__init__(game)
self.score = [0, 0]
self.titans = [[], []]
self.tiles = [[], []]
self.last_tiles = [[], []] # needed because we wipe placements on new rounds
self.round = 0 # represents the group of turns that leads into a battle
self.actions = []
self._next_player = 0
self._game_over = False
def _cur_max_titans(self):
return min(self.round+2, MAX_TITANS)
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every sequential-move game with chance.
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self._game_over:
return pyspiel.PlayerId.TERMINAL
else:
return self._next_player
def _legal_actions(self, player):
"""Returns a list of legal actions, sorted in ascending order."""
assert player >= 0
ret = []
my_titans = self.titans[player]
my_tiles = self.tiles[player]
used_titans = set(my_titans)
used_tiles = set(my_tiles)
if len(my_titans) < self._cur_max_titans():
base_index = len(my_titans)*len(TITAN_IDS)
for titan_index in range(len(TITAN_IDS)):
if titan_index not in used_titans:
ret.append((base_index+titan_index))
return ret
else: # tile index
base_index = MAX_TITANS*len(TITAN_IDS) + len(my_tiles)*NUM_TILES
for tile_index in range(NUM_TILES):
if tile_index not in used_tiles:
ret.append((base_index+tile_index))
return ret
def chance_outcomes(self):
"""Returns the possible chance outcomes and their probabilities."""
assert self.is_chance_node()
assert False, "not implemented"
return 0
# either apply next titan slot, or placement slot
def _parse_action(self, action):
next_titan = None
next_tile = None
my_titans = self.titans[self._next_player]
my_tiles = self.tiles[self._next_player]
base_tile_index = MAX_TITANS*len(TITAN_IDS)
if action < base_tile_index: # create titan
assert len(my_titans) < self._cur_max_titans()
titan_slot = action//len(TITAN_IDS)
assert titan_slot == len(my_titans)
next_titan = action % len(TITAN_IDS)
else: # set tile
assert len(my_tiles) < len(my_titans)
tile_slot = (action-base_tile_index)//NUM_TILES
assert tile_slot == len(my_tiles)
next_tile = (action-base_tile_index) % NUM_TILES
return next_titan, next_tile
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self.is_chance_node():
assert False, "Not Implemented"
return
else:
self.actions.append(action)
my_titans = self.titans[self._next_player]
my_tiles = self.tiles[self._next_player]
next_titan, next_tile = self._parse_action(action)
if next_titan is not None:
my_titans.append(next_titan)
else:
my_tiles.append(next_tile)
# self round placement still incomplete
if len(my_titans) < self._cur_max_titans() or len(my_tiles) < len(my_titans):
return
# player 0 done, player 1 turn
if self._next_player == 0:
self._next_player = 1
return
# both done, play a game
is_p0_win = check_server_win(self.titans, self.tiles)
if is_p0_win:
self.score[0] += 1
else:
self.score[1] += 1
# if a round ended
if self.score[0] != 3 and self.score[1] != 3:
self.round += 1
self._next_player = 0
self.last_tiles = self.tiles
self.tiles = [[], []]
return
# if is complete
self._game_over = True
def _action_to_string(self, player, action):
"""Action -> string."""
# TODO: toname and totile functions
next_titan, next_tile = self._parse_action(action)
if next_titan is not None:
cmd = TITAN_ID_TO_NAME[TITAN_IDS[next_titan]]
else:
cmd = next_tile+1
return f"{player}({cmd})"
def is_terminal(self):
"""Returns True if the game is over."""
return self._game_over
def returns(self):
"""Total reward for each player over the course of the game so far."""
points_0 = self.score[0]//3 + self.score[0]*0.01
points_1 = self.score[1]//3 + self.score[1]*0.01
return [points_0-points_1, points_1-points_0]
def __str__(self):
"""String for debug purposes. No particular semantics are required."""
"""Observation of `state` from the PoV of `player`, as a string."""
pieces = []
pieces.append(f"round {self.round}")
pieces.append(f"score {self.score}")
for cur_player in range(2):
titans = self.titans[cur_player]
titans = [f"{TITAN_ID_TO_NAME[TITAN_IDS[tindex]]}({TITAN_IDS[tindex]})" for tindex in titans]
pieces.append(f"private titans p{cur_player} {titans}")
for cur_player in range(2):
pieces.append(f"private tiles p{cur_player} {self.tiles[cur_player]}")
return "\n".join(pieces)
class TTObserver:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, iig_obs_type, params):
"""Initializes an empty observation tensor."""
if params:
raise ValueError(f"Observation parameters not supported; passed {params}")
# Determine which observation pieces we want to include.
pieces = [("player", 2, (2,)), ("round", 1, (1,))]
if iig_obs_type.private_info == pyspiel.PrivateInfoType.SINGLE_PLAYER:
pieces.append(("private_titans", MAX_TITANS * len(TITAN_IDS), (MAX_TITANS, len(TITAN_IDS))))
pieces.append(("private_tiles", MAX_TITANS * NUM_TILES, (MAX_TITANS, NUM_TILES)))
if iig_obs_type.public_info:
if iig_obs_type.perfect_recall:
pieces.append(("actions", _MAX_GAME_LENGTH*_NUM_ACTIONS, (_MAX_GAME_LENGTH, _NUM_ACTIONS)))
else:
pieces.append(("score", 2, (2,)))
pieces.append(("public_titans", MAX_TITANS * len(TITAN_IDS) * 2, (MAX_TITANS, len(TITAN_IDS), 2)))
pieces.append(("public_tiles", MAX_TITANS * NUM_TILES * 2, (MAX_TITANS, NUM_TILES, 2)))
# Build the single flat tensor.
total_size = sum(size for name, size, shape in pieces)
self.tensor = np.zeros(total_size, np.float32)
# Build the named & reshaped views of the bits of the flat tensor.
self.dict = {}
index = 0
for name, size, shape in pieces:
self.dict[name] = self.tensor[index:index + size].reshape(shape)
index += size
def set_from(self, state: TTState, player):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
self.tensor.fill(0)
if "player" in self.dict:
self.dict["player"][player] = 1
if "round" in self.dict:
self.dict["round"][0] = state.round
if "score" in self.dict:
self.dict["score"][0] = state.score[0]
self.dict["score"][1] = state.score[1]
if "private_titans" in self.dict:
for i, titan in enumerate(state.titans[player]):
self.dict["private_titans"][i][titan] = 1
if "private_tiles" in self.dict:
for i, tile in enumerate(state.tiles[player]):
self.dict["private_tiles"][i][tile] = 1
if "public_titans" in self.dict:
for cur_player in range(2):
for i, titan in enumerate(state.titans[cur_player][:len(state.last_tiles[cur_player])]):
self.dict["public_titans"][i][titan][cur_player] = 1
if "public_tiles" in self.dict:
for cur_player in range(2):
for i, tile in enumerate(state.last_tiles[cur_player]):
self.dict["public_tiles"][i][tile][cur_player] = 1
if "actions" in self.dict:
for turn, action in enumerate(state.actions):
self.dict["actions"][turn, action] = 1
def string_from(self, state: TTState, player):
"""Observation of `state` from the PoV of `player`, as a string."""
pieces = []
if "player" in self.dict:
pieces.append(f"p{player}")
if "round" in self.dict:
pieces.append(f"round {state.round}")
if "score" in self.dict:
pieces.append(f"score {state.score}")
if "private_titans" in self.dict:
pieces.append(f"private titans {state.titans[player]}")
if "private_tiles" in self.dict:
pieces.append(f"private tiles {state.tiles[player]}")
if "public_titans" in self.dict:
for cur_player in range(2):
pieces.append(f"public titans p{cur_player} {state.titans[cur_player][:len(state.last_tiles[cur_player])]}")
if "public_tiles" in self.dict:
for cur_player in range(2):
pieces.append(f"private tiles p{cur_player} {state.last_tiles[cur_player]}")
if "actions" in self.dict:
pieces.append(f"action history {self.dict['actions']}")
return " ".join(str(p) for p in pieces)
# Register the game with the OpenSpiel library
pyspiel.register_game(_GAME_TYPE, TTGame)
|
the-stack_0_3495 | #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# collectd implementation of:
# https://github.com/BrightcoveOS/Diamond/blob/master/src/collectors/tcp/tcp.py
import collectd
import os
class Tcp(object):
PROC = ['/proc/net/netstat', '/proc/net/snmp']
GAUGES = ['CurrEstab', 'MaxConn']
def __init__(self):
self.plugin_name = "tcp"
self.allowed_metrics = []
def config(self, obj):
for node in obj.children:
if node.key == 'Metrics':
self.allowed_metrics = node.values
def log(self, t, message):
if t == 'err':
collectd.error('%s: %s' %(self.plugin_name, message))
elif t == 'warn':
collectd.warning('%s: %s' %(self.plugin_name, message))
elif t == 'verb':
collectd.info('%s: %s' %(self.plugin_name, message))
else:
collectd.info('%s: %s' %(self.plugin_name, message))
def submit(self, metric_name, value, type):
v = collectd.Values()
v.plugin = self.plugin_name
v.type = type
v.type_instance = metric_name
v.values = [int(value)]
v.dispatch()
def collect(self):
metrics = {}
for filepath in self.PROC:
if not os.access(filepath, os.R_OK):
self.log('error', 'Permission to access %s denied' %filepath)
continue
header = ''
data = ''
# Seek the file for the lines that start with Tcp
file = open(filepath)
if not file:
self.log('error', 'Failed to open %s' %filepath)
continue
while True:
line = file.readline()
# Reached EOF?
if len(line) == 0:
break
# Line has metrics?
if line.startswith("Tcp"):
header = line
data = file.readline()
break
file.close()
# No data from the file?
if header == '' or data == '':
self.log('error', '%s has no lines with Tcp' %filepath)
continue
header = header.split()
data = data.split()
for i in xrange(1, len(header)):
metrics[header[i]] = data[i]
#Send TCP stats to collectd
allowed_metrics = set(self.allowed_metrics).intersection(metrics.keys())
for metric_name in metrics:
if metric_name in allowed_metrics:
value = long(metrics[metric_name])
if metric_name in self.GAUGES:
self.submit(metric_name, value, 'gauge')
else:
self.submit(metric_name, value, 'counter')
tcp = Tcp()
collectd.register_read(tcp.collect)
collectd.register_config(tcp.config)
|
the-stack_0_3496 | # -*- coding: utf-8 -*-
"""Tests for thanks-related code."""
#
# (C) Pywikibot team, 2016-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from pywikibot.flow import Topic
from tests.aspects import TestCase
from tests import unittest
NO_THANKABLE_POSTS = 'There is no recent post which can be test thanked.'
class TestThankFlowPost(TestCase):
"""Test thanks for Flow posts."""
family = 'test'
code = 'test'
write = True
@classmethod
def setUpClass(cls):
"""Set up class."""
super(TestThankFlowPost, cls).setUpClass()
cls._topic_title = 'Topic:Tvkityksg1ukyrrw'
def test_thank_post(self):
"""Test thanks for Flow posts."""
found_log = False
site = self.get_site()
topic = Topic(site, self._topic_title)
for post in reversed(topic.replies()):
user = post.creator
if site.user() == user.username:
continue
if user.is_thankable:
break
else:
self.skipTest(NO_THANKABLE_POSTS)
before_time = site.getcurrenttimestamp()
post.thank()
log_entries = site.logevents(logtype='thanks', total=5, page=user,
start=before_time, reverse=True)
try:
next(iter(log_entries))
except StopIteration:
found_log = False
else:
found_log = True
self.assertTrue(found_log)
def test_self_thank(self):
"""Test that thanking one's own Flow post causes an error."""
site = self.get_site()
topic = Topic(site, self._topic_title)
my_reply = topic.reply('My attempt to thank myself.')
self.assertAPIError('invalidrecipient', None, my_reply.thank)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
the-stack_0_3497 | # -*- coding: utf-8 -*-
# Copyright 2018 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Module uses ideas from "Basic circuit compilation techniques
# for an ion-trap quantum machine" by Dmitri Maslov (2017) at
# https://iopscience.iop.org/article/10.1088/1367-2630/aa5e47
"""
Registers a decomposition to for a CNOT gate in terms of Rxx, Rx and Ry gates.
"""
import math
from projectq.cengines import DecompositionRule
from projectq.meta import get_control_count
from projectq.ops import Ph, Rxx, Ry, Rx, X
def _decompose_cnot2rxx_M(cmd): # pylint: disable=invalid-name
"""Decompose CNOT gate into Rxx gate."""
# Labelled 'M' for 'minus' because decomposition ends with a Ry(-pi/2)
ctrl = cmd.control_qubits
Ry(math.pi / 2) | ctrl[0]
Ph(7 * math.pi / 4) | ctrl[0]
Rx(-math.pi / 2) | ctrl[0]
Rx(-math.pi / 2) | cmd.qubits[0][0]
Rxx(math.pi / 2) | (ctrl[0], cmd.qubits[0][0])
Ry(-1 * math.pi / 2) | ctrl[0]
def _decompose_cnot2rxx_P(cmd): # pylint: disable=invalid-name
"""Decompose CNOT gate into Rxx gate."""
# Labelled 'P' for 'plus' because decomposition ends with a Ry(+pi/2)
ctrl = cmd.control_qubits
Ry(-math.pi / 2) | ctrl[0]
Ph(math.pi / 4) | ctrl[0]
Rx(-math.pi / 2) | ctrl[0]
Rx(math.pi / 2) | cmd.qubits[0][0]
Rxx(math.pi / 2) | (ctrl[0], cmd.qubits[0][0])
Ry(math.pi / 2) | ctrl[0]
def _recognize_cnot2(cmd):
"""Identify that the command is a CNOT gate (control - X gate)"""
return get_control_count(cmd) == 1
#: Decomposition rules
all_defined_decomposition_rules = [
DecompositionRule(X.__class__, _decompose_cnot2rxx_M, _recognize_cnot2),
DecompositionRule(X.__class__, _decompose_cnot2rxx_P, _recognize_cnot2),
]
|
the-stack_0_3498 | import os
import utils
def main():
src_file = os.path.join(os.getcwd(), "deploy/roles/default_role.yaml")
dst_file = os.path.join(os.getcwd(), "build/default_role.yaml")
with open(src_file, "r") as src:
with open(dst_file, "w+") as dst:
data = src.read()
print("Deploying {}".format(dst_file))
dst.write(data)
utils.apply(dst_file)
if __name__ == "__main__":
main()
|
the-stack_0_3500 | from flask import render_template,request,redirect,url_for,abort
from . import main
from .forms import UpdateProfile
from ..models import User
from flask_login import login_required,current_user
from .. import db,photos
import markdown2
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to pomodoro'
content = "WELCOME TO POMODORO APP"
return render_template('index.html', title = title,content = content)
@main.route('/about')
def about():
return render_template('about.html', title = 'About')
@main.route('/pomodoro')
@login_required
def pomodoro():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to pomodoro'
content = "WELCOME TO POMODORO APP"
return render_template('pomodoro.html', title = title,content = content)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/reason/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_reason(id):
form = ReasonForm()
reason = get_reason(id)
if form.validate_on_submit():
title = form.title.data
reason = form.reason.data
# Updated reason instance
new_reason = Reason(reason_id=reason.id,reason_title=title,reason=reason,user=current_user)
# save reason method
new_reason.save_reason()
return redirect(url_for('.reason',id = reason.id ))
title = f'{reason.title} reason'
return render_template('new_reason.html',title = title, reason_form=form, reason=reason) |
the-stack_0_3501 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import copy
import grpc
import six
import threading
from concurrent import futures
from edl.utils import common_pb2
from edl.utils import data_server_pb2
from edl.utils import data_server_pb2_grpc
from edl.utils import error_utils
from edl.utils import exceptions
from edl.utils.log_utils import logger
class PodData(object):
"""
Manage pod's data:
batch_data_ids, file_list, data_server_endpoint
"""
def __init__(self, pod_id, data_server_endpoint):
# batch_data_ids
self._pod_id = pod_id
self._data_server_endpoint = data_server_endpoint
# total ids for filter
self._batch_data_ids = set()
self._queue = collections.deque()
# data_server_pb2.FileListElement
self._file_list_slice = []
self._reach_data_end = False
def append_file_list_element(self, element):
self._file_list_slice.append(element)
@property
def reach_data_end(self):
return self._reach_data_end
@reach_data_end.setter
def reach_data_end(self, r):
self._reach_data_end = r
def get_size(self):
return len(self._queue)
def pop(self, num):
a = []
while len(self._queue) > 0:
if (num > 0 and len(a) < num) or num <= 0:
batch_data_id = self._queue.popleft()
a.append(batch_data_id)
else:
break
logger.debug(
"batch_data_ids:{}, queue:{}".format(
len(self._batch_data_ids), len(self._queue)
)
)
return a
def put(self, data_server_endpoint, batch_data_ids):
self._data_server_endpoint = data_server_endpoint
for batch_data_id in batch_data_ids:
if batch_data_id in self._batch_data_ids:
continue
self._queue.append(batch_data_id)
self._batch_data_ids.add(batch_data_id)
logger.debug(
"batch_data_ids:{}, queue:{}".format(
len(self._batch_data_ids), len(self._queue)
)
)
class PodsData(object):
"""
Reader's pods data
pod_id=>PodData
"""
def __init__(self, reader_name, file_list, pod_ids):
self._reader_name = reader_name
# pod_id => PodData
self._pod_data = {}
# pod_id => BalanceBatchData
self._balanced_batch_data = {}
self._barrier_ids = set()
self._reach_data_end_ids = set()
self._lock = threading.Lock()
# string list
self._file_list = file_list
self._pod_ids = set(pod_ids)
self._init()
self._total = 0
def _init(self):
for pod_id in self._pod_ids:
self._pod_data[pod_id] = PodData(pod_id, None)
self._balanced_batch_data[pod_id] = [] # array of BatchDataMeta
i = 0
while i < len(self._file_list):
for pod_id in self._pod_ids:
m = data_server_pb2.FileListElement()
m.idx = i
m.path = self._file_list[i]
self._pod_data[pod_id].append_file_list_element(m)
i += 1
if i >= len(self._file_list):
break
def get_pod_file_list(self, pod_id):
pod_data = self._pod_data[pod_id]
return pod_data._file_list_slice
def set_data_end(self, pod_id):
with self._lock:
pod_data = self._pod_data[pod_id]
pod_data.reach_data_end()
self._reach_data_end_ids.add(pod_id)
def _get_batch_data_id_from_others(self, avg_num, need_num):
ret = []
for pod_id in self._pod_ids:
src = self._pod_data[pod_id]
if src.get_size() < avg_num:
continue
dst = data_server_pb2.BatchDataMeta()
dst.reader_name = self._reader_name
dst.producer_pod_id = src._pod_id
dst.data_server_endpoint = src._data_server_endpoint
pop_num = src.get_size() - avg_num
ids = src.pop(pop_num)
if len(ids) <= 0:
continue
dst.extend(ids)
ret.append(dst)
need_num -= len(ids)
if need_num <= 0:
break
return ret
def put(self, pod_id, data_server_endpoint, batch_data_ids):
with self._lock:
pod_data = self._pod_data[pod_id]
pod_data.put(data_server_endpoint, batch_data_ids)
total = 0
for _, pod_data in six.iteritems(self._pod_data):
total += pod_data.get_size()
self._barrier_ids.add(pod_id)
if (self._barrier_ids | self._reach_data_end_ids) != self._pod_ids:
logger.debug(
"barrier_ids:{} readch_data_end_ids:{}".format(
len(self._barrier_ids), len(self._reach_data_end_ids)
)
)
return
avg_num = total / len(self._pod_ids)
logger.debug("total:{} avg_num:{}".format(total, avg_num))
if avg_num < 1:
return
# get batch_data_ids from pods_data to balance_batch_data
for pod_id in self._pod_ids:
src = self._pod_data[pod_id]
dst = data_server_pb2.BatchDataMeta()
dst.reader_name = self._reader_name
dst.producer_pod_id = src._pod_id
dst.data_server_endpoint = src._data_server_endpoint
ids = src.pop(num=avg_num)
if len(ids) >= avg_num:
dst.batch_data_ids.extend(ids)
self._balanced_batch_data[pod_id].append(dst)
logger.debug(
"balance_data_ids:{}".format(
len(self._balanced_batch_data[pod_id])
)
)
else:
need_num = avg_num - len(ids)
ret = self._get_batch_data_id_from_others(avg_num, need_num)
if len(ret) <= 0:
continue
self._balanced_batch_data[pod_id].extend(ret)
logger.debug(
"balance_data_ids:{}".format(
len(self._balanced_batch_data[pod_id])
)
)
self._barrier_ids = set()
def _is_all_reach_data_end(self):
for _, pod_data in six.iteritems(self._pod_data):
if not pod_data.reach_data_end:
return False
return True
# FIXME(gongwb): avoid global lock of all pods
@error_utils.handle_errors_until_timeout
def pop(self, pod_id, ret, timeout=60):
with self._lock:
balanced_data = self._balanced_batch_data[pod_id]
if len(balanced_data) > 0:
for data in balanced_data:
ret.append(copy.copy(data))
return ret
if self._is_all_reach_data_end():
return None
raise exceptions.EdlDataGenerateError("wait to generate more data")
class DataServerServicer(data_server_pb2_grpc.DataServerServicer):
def __init__(self, trainer_env, reader_name, file_list, pod_ids, local_reader):
self._lock = threading.Lock()
self._trainer_env = trainer_env
# string list
self._file_list = file_list
self._pod_ids = pod_ids
self._local_reader = local_reader
self._reader_name = reader_name
# reader_name=>PodData
self._pod_data = PodsData(reader_name, file_list, pod_ids)
def _check_leader(self):
if self._trainer_env.global_rank != 0:
raise exceptions.EdlNotLeaderError(
"This server rank:{} is not Leader".format(
self._trainer_env.global_rank
)
)
# only leader can do this
def ReportBatchDataMeta(self, request, context):
res = common_pb2.EmptyRet()
try:
self._check_leader()
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
if len(request.batch_data_ids) > 0:
self._pod_data.put(
request.pod_id, request.data_server_endpoint, request.batch_data_ids
)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
def ReachDataEnd(self, request, context):
res = common_pb2.EmptyRet()
try:
self._check_leader()
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
self._pod_data.set_data_end(request.pod_id)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
# only leader can do this
def GetBatchDataMeta(self, request, context):
res = data_server_pb2.BatchDataMetaResponse()
try:
self._check_leader()
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
self._pod_data.pop(request.pod_id, res.data, timeout=60)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
def GetBatchData(self, request, context):
res = data_server_pb2.BatchDataResponse()
try:
datas = self._local_reader.get_local_batch_data(request)
for data in datas:
b = copy.copy(data)
res.datas.append(b)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
def _check_file_list(self, file_list):
for i, ele in enumerate(file_list):
if self._file_list[i] != ele.path:
raise exceptions.EdlFileListNotMatchError(
"client:{} server:{}".format(file_list, self._file_list)
)
def _check_pod_id(self, pod_id):
if pod_id not in self._pod_ids:
raise exceptions.EdlPodIDNotExistError(
"pod_id:{} not exist in {}".format(pod_id, self._pod_ids)
)
def _check_reader_name(self, reader_name):
if reader_name != self._reader_name:
raise exceptions.EdlReaderNameError(
"{} not equal {}".format(reader_name, self._reader_name)
)
# only leader can do this
def GetFileList(self, request, context):
"""
Get slice of file list for a pod by pod_id
Need not lock because there are readonly
"""
res = data_server_pb2.FileListResponse()
try:
self._check_leader()
self._check_file_list(request.file_list)
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
file_list = self._pod_data.get_pod_file_list(request.pod_id)
for m in file_list:
res.file_list.append(m)
return res
except exceptions.EdlException as e:
exceptions.serialize(res, e)
return res
class DataServer(object):
def __init__(self, trainer_env, reader_name, file_list, local_reader):
self._server = None
self._addr = None
self._port = None
self._endpoint = None
self._trainer_env = trainer_env
self._reader_name = reader_name
self._file_list = file_list
self._local_reader = local_reader
def start(self, addr, cache_capcity=1000, max_workers=100, concurrency=20):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=max_workers),
options=[
("grpc.max_send_message_length", 1024 * 1024 * 1024),
("grpc.max_receive_message_length", 1024 * 1024 * 1024),
],
maximum_concurrent_rpcs=concurrency,
)
data_server_pb2_grpc.add_DataServerServicer_to_server(
DataServerServicer(
trainer_env=self._trainer_env,
reader_name=self._reader_name,
file_list=self._file_list,
pod_ids=self._trainer_env.pod_ids,
local_reader=self._local_reader,
),
server,
)
self._addr = addr
self._port = server.add_insecure_port("{}:0".format(addr))
assert (
self._port > 0
), "data server start on addr:{} error, selected port is {}".format(
addr, self._port
)
self._endpoint = "{}:{}".format(self._addr, self._port)
server.start()
self._server = server
print("start data_server:", self._endpoint)
@property
def endpoint(self):
return self._endpoint
def wait(self, timeout=None):
if timeout is not None:
self._server.stop(timeout)
return
self._server.wait_for_termination(timeout)
def shutdown(self):
pass
|
the-stack_0_3505 | import re
from dataclasses import dataclass, field
from typing import Optional, Sequence, Union
from snuba_sdk.column import Column
from snuba_sdk.expressions import (
Expression,
InvalidExpression,
ScalarLiteralType,
ScalarType,
is_literal,
is_scalar,
)
class InvalidFunction(InvalidExpression):
pass
alias_re = re.compile(r"^[a-zA-Z](\w|\.)+$")
# In theory the function matcher should be the same as the column one.
# However legacy API sends curried functions as raw strings, and it
# wasn't worth it to import an entire parsing grammar into the SDK
# just to accomodate that one case. Instead, allow it for now and
# once that use case is eliminated we can remove this.
function_name_re = re.compile(r"^[a-zA-Z](\w|[().,+]| |\[|\])+$")
@dataclass(frozen=True)
class CurriedFunction(Expression):
function: str
initializers: Optional[Sequence[Union[ScalarLiteralType, Column]]] = None
parameters: Optional[
Sequence[Union[ScalarType, Column, "CurriedFunction", "Function"]]
] = None
alias: Optional[str] = None
def validate(self) -> None:
if not isinstance(self.function, str):
raise InvalidFunction(f"function '{self.function}' must be a string")
if self.function == "":
# TODO: Have a whitelist of valid functions to check, maybe even with more
# specific parameter type checking
raise InvalidFunction("function cannot be empty")
if not function_name_re.match(self.function):
raise InvalidFunction(
f"function '{self.function}' contains invalid characters"
)
if self.initializers is not None:
if not isinstance(self.initializers, Sequence):
raise InvalidFunction(
f"initializers of function {self.function} must be a Sequence"
)
elif not all(
isinstance(param, Column) or is_literal(param)
for param in self.initializers
):
raise InvalidFunction(
f"initializers to function {self.function} must be a scalar or column"
)
if self.alias is not None:
if not isinstance(self.alias, str) or self.alias == "":
raise InvalidFunction(
f"alias '{self.alias}' of function {self.function} must be None or a non-empty string"
)
if not alias_re.match(self.alias):
raise InvalidFunction(
f"alias '{self.alias}' of function {self.function} contains invalid characters"
)
if self.parameters is not None:
if not isinstance(self.parameters, Sequence):
raise InvalidFunction(
f"parameters of function {self.function} must be a Sequence"
)
for param in self.parameters:
if not isinstance(
param, (Column, CurriedFunction, Function)
) and not is_scalar(param):
assert not isinstance(param, bytes) # mypy
raise InvalidFunction(
f"parameter '{param}' of function {self.function} is an invalid type"
)
def __eq__(self, other: object) -> bool:
# Don't use the alias to compare equality
if not isinstance(other, CurriedFunction):
return False
return (
self.function == other.function
and self.initializers == other.initializers
and self.parameters == other.parameters
)
@dataclass(frozen=True)
class Function(CurriedFunction):
initializers: Optional[Sequence[Union[ScalarLiteralType, Column]]] = field(
init=False, default=None
)
|
the-stack_0_3506 | import numpy as np
import glob
import os
import fridge.Material.Element as Element
import fridge.utilities.utilities as utilities
AVOGADROS_NUMBER = 0.6022140857
cur_dir = os.path.dirname(__file__)
material_dir = os.path.join(cur_dir, '../data/materials/')
class Material(object):
"""Creates a material consisting of elements based on the Material database."""
def __init__(self):
self.atomDensity = 0.0
self.density = 0.0
self.linearCoeffExpansion = 0.0
self.name = ''
self.materialName = ''
self.atomPercent = {}
self.enrichmentDict = {}
self.weightPercent = {}
self.elementDict = {}
self.elements = []
self.zaids = []
self.weightFraction = []
self.enrichmentZaids = []
self.enrichmentIsotopes = []
self.enrichmentVector = []
self.isotopicAtomPercents = []
def set_material(self, material):
self.name = material
self.read_material_data(self.name)
self.create_material_data()
def read_material_data(self, material):
"""Read in the material data from the material database."""
material_yaml_file = glob.glob(os.path.join(material_dir, material + '.yaml'))
inputs = utilities.yaml_reader(material_yaml_file, material_dir, material)
self.name = inputs['Name']
self.materialName = material
self.elements = inputs['Elements']
self.zaids = inputs['Elemental ZAIDs']
self.weightFraction = inputs['Elemental Weight Fractions'] if 'Elemental Weight Fractions' in inputs else []
self.enrichmentZaids = inputs['Elemental Adjustment ZAIDs'] if 'Elemental Adjustment ZAIDs' in inputs else []
self.enrichmentIsotopes = inputs['Isotopic Adjustment ZAIDs'] if 'Isotopic Adjustment ZAIDs' in inputs else []
self.enrichmentVector = inputs['Isotopic Weight Percents'] if 'Isotopic Weight Percents' in inputs else []
self.isotopicAtomPercents = inputs['Isotopic Atom Percents'] if 'Isotopic Atom Percents' in inputs else []
self.density = inputs['Density']
self.linearCoeffExpansion = inputs['Linear Coefficient of Expansion']
def create_material_data(self):
"""Create a material based on the data from the material database."""
for num, zaid in enumerate(self.enrichmentZaids):
enriched_isotope_dict = {}
for isoNum, isotopes in enumerate(self.enrichmentIsotopes[num]):
enriched_isotope_dict[isotopes] = self.enrichmentVector[num][isoNum]
self.enrichmentDict[zaid] = enriched_isotope_dict
for num, element in enumerate(self.elements):
self.elementDict[self.zaids[num]] = Element.Element(element)
if self.isotopicAtomPercents:
self.atomDensity = self.density
self.set_atom_fractions()
else:
self.set_elemental_enrichment()
self.set_weight_percent()
self.atomDensity, self.atomPercent = set_atom_percent(self.weightPercent, self.density,
self.elementDict)
def set_elemental_enrichment(self):
"""Adjust the element's natural abundance to compensate for enrichment."""
for elementEnrichement, zaidVector in self.enrichmentDict.items():
for zaid, enrichmentPercent in zaidVector.items():
self.elementDict[elementEnrichement].weightPercentDict[zaid] = enrichmentPercent
def set_weight_percent(self, void_percent=1.0):
"""Calculates the weight percent of a material."""
weight_total = 0.0
for zaidNum, zaid in enumerate(self.zaids):
for isotope, isotopeFraction in self.elementDict[zaid].weightPercentDict.items():
if isotopeFraction != 0.0:
self.weightPercent[isotope] = isotopeFraction * self.weightFraction[zaidNum] * void_percent
weight_total += self.weightPercent[isotope]
try:
assert np.allclose(weight_total, 1.0 * void_percent)
except AssertionError:
print("Weight percent does not sum to 1.0 for {}. Check the material file.".format(self.name))
def set_void(self, void_percent):
"""Adjust the atom density/atom percent of a material to account for voiding."""
self.set_weight_percent(void_percent)
self.atomDensity, self.atomPercent = set_atom_percent(self.weightPercent, self.density, self.elementDict)
def set_atom_fractions(self, void_percent=1.0):
"""Calculates the atom density of a material given a material with atom densities defined."""
for zaidNum, zaid in enumerate(self.zaids):
for isotope, isotopeFraction in self.elementDict[zaid].atomPercentDict.items():
if zaid in self.isotopicAtomPercents:
print(self.elementDict[zaid].weightPercentDict[isotope])
self.atomPercent[isotope] = self.elementDict[zaid].atomPercentDict[isotope] * \
self.isotopicAtomPercents[zaid] * void_percent
elif isotope in self.isotopicAtomPercents:
self.atomPercent[isotope] = self.isotopicAtomPercents[isotope] * void_percent
assert np.allclose(sum(self.atomPercent.values()), self.density, 3)
def set_atom_percent(weight_percents, density, element_dict):
"""Converts the weight percent of a material to the atom percent and atom density."""
atom_densities = {}
atom_percent = {}
for zaid, weight in weight_percents.items():
element = str(zaid)
if len(element) < 5:
current_element = int(element[:1] + '000')
else:
current_element = int(element[:2] + '000')
atom_densities[zaid] = weight*density*AVOGADROS_NUMBER / element_dict[current_element].molecularMassDict[zaid]
atom_density = sum(atom_densities.values())
for zaid, atomicDensity in atom_densities.items():
atom_percent[zaid] = atomicDensity / atom_density
return atom_density, atom_percent
def get_smeared_material(materials, void_material='', void_percent=1.0):
"""Create the material data card for a smeared material."""
smear_material = {}
for material, materialWeightPercent in materials.items():
void_multiplier = 1.0
if material == 'Void':
pass
else:
base_material = Material()
base_material.set_material(material)
if base_material.materialName == void_material:
void_multiplier = void_percent
for isotope, isotopeWeightPercent in base_material.weightPercent.items():
element = str(isotope)
if len(element) < 5:
current_element = element[:1] + '000'
else:
current_element = element[:2] + '000'
current_element = int(current_element)
try:
smear_material[isotope] += isotopeWeightPercent * materialWeightPercent * base_material.density \
* AVOGADROS_NUMBER * void_multiplier / \
base_material.elementDict[current_element].molecularMassDict[isotope]
except KeyError:
smear_material[isotope] = isotopeWeightPercent * materialWeightPercent * base_material.density \
* AVOGADROS_NUMBER * void_multiplier / \
base_material.elementDict[current_element].molecularMassDict[isotope]
smeared_material = Material()
smeared_material.name = "{}".format([val for val in materials])
smeared_material.atomDensity = sum(smear_material.values())
smeared_atom_percent = {}
for k, v in smear_material.items():
smeared_atom_percent[k] = v / smeared_material.atomDensity
smeared_material.atomPercent = smeared_atom_percent
return smeared_material
def smear_coolant_wirewrap(info):
"""Returns a smeared material for the coolant and wire wrap."""
height = info[0]
fuel_radius = info[1] / 2
wirewrap_radius = info[2] / 2
wire_wrap_axial_pitch = info[3]
fuel_pitch = info[4]
coolant_material = info[5]
clad_material = info[6]
fuel_volume = utilities.get_cylinder_volume(fuel_radius, height)
wire_wrap_volume = utilities.get_toroidal_volume(fuel_radius, wirewrap_radius, wire_wrap_axial_pitch, height)
pin_hexagonal_universe_volume = utilities.get_hexagonal_prism_volume(fuel_pitch, height)
coolant_volume = pin_hexagonal_universe_volume - fuel_volume - wire_wrap_volume
total_coolant_wire_wrap_volume = coolant_volume + wire_wrap_volume
wire_wrap_volume_percent = wire_wrap_volume / total_coolant_wire_wrap_volume
coolant_volume_percent = coolant_volume / total_coolant_wire_wrap_volume
smeared_material_dict = {clad_material: wire_wrap_volume_percent, coolant_material: coolant_volume_percent}
return smeared_material_dict
|
the-stack_0_3508 | """
My 5th bot.
I don't actually like those films, but that was an order. And, frankly, a very interesting one!
"""
from bs4 import BeautifulSoup
from datetime import datetime
import requests
import telebot
import json
def search_link(name):
"""Find a link for a film"""
with open("database.json", "r", encoding="utf-8") as f:
database = json.load(f)
try:
return "https://doramalive.ru" + database[name]
# If there is no such film:
except:
return "error"
def parse_dorama_page(link):
"""Parse the film webpage"""
res = requests.get(link)
soup = BeautifulSoup(res.text, 'html.parser')
dorama = {}
# Put the information into the dictionary
dorama["link"] = link
dorama["name"] = " ".join(soup.find("h1").string.split()[1::])
dorama["rating"] = soup.find("div", class_="vote-detail").get_text()
dorama["description"] = soup.find("div", class_="detail-more").get_text()
parametrs = soup.find_all("dl", class_="dl-horizontal")
for parametr in parametrs:
par = parametr.find_all("dd")
dorama["made_in"] = par[1].get_text()
dorama["made_date"] = par[2].get_text()
dorama["genres"] = []
genres = soup.find_all("span", "label label-default genre")
for genre in genres:
dorama["genres"].append(" ".join(genre.find("a").get("title").split()[2::]).title())
return dorama
# BOT STARTS HERE ###
bot = telebot.TeleBot("2133317357:AAEAEsYGXuZqD0psX-GapGh1YjCrFcNkToU")
print("Bot is active!")
@bot.message_handler(commands=["start"])
def command_start(message):
"""Handler of the first command /start"""
bot.send_message(message.chat.id, "✨")
bot.send_message(message.chat.id, "Привет! Я помогу вам найти информацию о дорамах. "
"Просто напишите мне название, а всё вам о ней расскажу!")
@bot.message_handler(content_types=['text'])
def reply(message):
"""Handler of any text message. It is supposed to be the name of a film"""
print(f"Human: {not (message.from_user.is_bot)} || Name: {message.from_user.first_name} "
f"{message.from_user.last_name} || Id: {message.from_user.id} || Time: {datetime.now().strftime('%H:%M')};")
link = search_link(message.text.lower())
# If there is no such film:
if link == "error":
bot.send_message(message.chat.id, "К сожаленю такой дорамы нет. Или вы неверно "
"ввели название ☹️ Попробуйте, пожалуйста, ещё раз.")
# If there is
else:
dorama = parse_dorama_page(link)
n = round(float(dorama["rating"].split()[0]))
stars = ["⭐" for i in range(n)]
msg = f"<b>Название:</b> {dorama['name']}\n<b>Производство:</b> {dorama['made_in']}\n<b>Дата премьеры:" \
f"</b> {dorama['made_date']}\n<b>Рейтинг: {''.join(stars)}</b> {dorama['rating']}\n<b>Жанры: ▫️</b> " \
f"{'▫️'.join(dorama['genres'])}\n<b>Описание:</b> {dorama['description']}\n<b>Ссылка:</b> " \
f"{dorama['link']}"
bot.send_message(message.chat.id, msg, parse_mode="html")
bot.polling(none_stop=True, interval=0) |
the-stack_0_3511 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''Training set and test set loader'''
import os
import pickle
import numpy as np
class TrainDataset:
'''Training data loader'''
def __init__(self,
sliding_dir,
train_pkl_path,
valid_pkl_path,
visual_dim,
sentence_embed_dim,
IoU=0.5,
nIoU=0.15,
context_num=1,
context_size=128
):
self.sliding_dir = sliding_dir
self.train_pkl_path = train_pkl_path
self.valid_pkl_path = valid_pkl_path
self.visual_dim = visual_dim
self.sentence_embed_dim = sentence_embed_dim
self.IoU = IoU
self.nIoU = nIoU
self.context_num = context_num
self.context_size = context_size
self.load_data()
def load_data(self):
'''load_data'''
train_csv = pickle.load(open(self.train_pkl_path, 'rb'), encoding='iso-8859-1')
self.clip_sentence_pairs = []
for l in train_csv:
clip_name = l[0]
sent_vecs = l[1]
for sent_vec in sent_vecs:
self.clip_sentence_pairs.append((clip_name, sent_vec))
movie_names_set = set()
self.movie_clip_names = {}
for k in range(len(self.clip_sentence_pairs)):
clip_name = self.clip_sentence_pairs[k][0]
movie_name = clip_name.split("_")[0]
if not movie_name in movie_names_set:
movie_names_set.add(movie_name)
self.movie_clip_names[movie_name] = []
self.movie_clip_names[movie_name].append(k)
self.movie_names = list(movie_names_set)
self.num_samples = len(self.clip_sentence_pairs)
# read sliding windows, and match them with the groundtruths to make training samples
sliding_clips_tmp = os.listdir(self.sliding_dir)
sliding_clips_tmp.sort()
self.clip_sentence_pairs_iou = []
movie_names = set()
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2] == "npy":
movie_name = clip_name.split("_")[0]
movie_names.add(movie_name)
movie_names = list(movie_names)
movie_names.sort()
for movie_name in self.movie_names:
start_ends = []
clip_names = []
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2] == "npy":
if clip_name.split("_")[0] == movie_name:
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
start_ends.append((start, end))
clip_names.append(clip_name)
table = {}
for clip_sentence in self.clip_sentence_pairs:
o_start_ends = []
original_clip_name = clip_sentence[0]
original_movie_name = original_clip_name.split("_")[0]
if original_movie_name == movie_name:
o_start = int(original_clip_name.split("_")[1])
o_end = int(original_clip_name.split("_")[2].split(".")[0])
if (o_start, o_end) in table.keys():
match_indexs = table[(o_start, o_end)]
for j in match_indexs:
start, end = start_ends[j]
clip_name = clip_names[j]
start_offset = o_start - start
end_offset = o_end - end
self.clip_sentence_pairs_iou.append(
(clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))
else:
o_start_ends.append((o_start, o_end))
start_ends = np.array(start_ends)
o_start_ends = np.array(list(set(o_start_ends)))
if o_start_ends.shape[0] == 0:
continue
ious = self.calc_IoU(start_ends, o_start_ends)
nIoLs = self.calc_nIoL(o_start_ends, start_ends)
match_indexs = (nIoLs < self.nIoU)[0] & (ious > self.IoU)[:, 0]
match_indexs = np.where(match_indexs)[0]
table[(o_start, o_end)] = match_indexs
for k in match_indexs:
start, end = start_ends[k]
clip_name = clip_names[k]
start_offset = o_start - start
end_offset = o_end - end
self.clip_sentence_pairs_iou.append(
(clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))
self.num_samples_iou = len(self.clip_sentence_pairs_iou)
def calc_nIoL(self, base, sliding_clip):
'''Calculate the nIoL of two fragments'''
A = base.shape[0]
inter = self.calc_inter(base, sliding_clip)
sliding_clip = np.expand_dims(sliding_clip, 0).repeat(A, axis=0)
length = sliding_clip[:, :, 1] - sliding_clip[:, :, 0]
nIoL = 1 - inter / length
return nIoL
def calc_IoU(self, clips_a, clips_b):
'''Calculate the IoU of two fragments'''
inter = self.calc_inter(clips_a, clips_b)
union = self.calc_union(clips_a, clips_b)
return inter / union
def calc_inter(self, clips_a, clips_b):
'''Calculate the intersection of two fragments'''
A = clips_a.shape[0]
B = clips_b.shape[0]
clips_a = np.expand_dims(clips_a, 1).repeat(B, axis=1)
clips_b = np.expand_dims(clips_b, 0).repeat(A, axis=0)
max_min = np.maximum(clips_a[:, :, 0], clips_b[:, :, 0])
min_max = np.minimum(clips_a[:, :, 1], clips_b[:, :, 1])
return np.maximum(min_max - max_min, 0)
def calc_union(self, clips_a, clips_b):
'''Calculate the union of two fragments'''
A = clips_a.shape[0]
B = clips_b.shape[0]
clips_a = np.expand_dims(clips_a, 1).repeat(B, axis=1)
clips_b = np.expand_dims(clips_b, 0).repeat(A, axis=0)
min_min = np.minimum(clips_a[:, :, 0], clips_b[:, :, 0])
max_max = np.maximum(clips_a[:, :, 1], clips_b[:, :, 1])
return max_max - min_min
def get_context_window(self, clip_name):
'''Get the context window of the fragment'''
movie_name = clip_name.split("_")[0]
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
self.context_size = end - start
left_context_feats = np.zeros([self.context_num, self.visual_dim // 3], dtype=np.float32)
right_context_feats = np.zeros([self.context_num, self.visual_dim // 3], dtype=np.float32)
last_left_feat = np.load(os.path.join(self.sliding_dir, clip_name))
last_right_feat = np.load(os.path.join(self.sliding_dir, clip_name))
for k in range(self.context_num):
left_context_start = start - self.context_size * (k + 1)
left_context_end = start - self.context_size * k
right_context_start = end + self.context_size * k
right_context_end = end + self.context_size * (k + 1)
left_context_name = movie_name + "_" + str(left_context_start) + "_" + str(left_context_end) + ".npy"
right_context_name = movie_name + "_" + str(right_context_start) + "_" + str(right_context_end) + ".npy"
left_context_path = os.path.join(self.sliding_dir, left_context_name)
if os.path.exists(left_context_path):
left_context_feat = np.load(left_context_path)
last_left_feat = left_context_feat
else:
left_context_feat = last_left_feat
right_context_path = os.path.join(self.sliding_dir, right_context_name)
if os.path.exists(right_context_path):
right_context_feat = np.load(right_context_path)
last_right_feat = right_context_feat
else:
right_context_feat = last_right_feat
left_context_feats[k] = left_context_feat
right_context_feats[k] = right_context_feat
return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)
def __getitem__(self, index):
'''Return a data'''
left_context_feat, right_context_feat = self.get_context_window(self.clip_sentence_pairs_iou[index][2])
feat_path = os.path.join(self.sliding_dir, self.clip_sentence_pairs_iou[index][2])
featmap = np.load(feat_path)
vis = np.hstack((left_context_feat, featmap, right_context_feat))
sent = self.clip_sentence_pairs_iou[index][1][:self.sentence_embed_dim]
p_offset = self.clip_sentence_pairs_iou[index][3]
l_offset = self.clip_sentence_pairs_iou[index][4]
offset = np.array([p_offset, l_offset], dtype=np.float32)
return np.concatenate((vis, sent)), offset
def __len__(self):
'''Return the length of the data set'''
return self.num_samples_iou
class TestingDataSet:
'''TestingDataSet'''
def __init__(self, img_dir, csv_path, batch_size):
self.batch_size = batch_size
self.image_dir = img_dir
self.semantic_size = 4800
csv = pickle.load(open(csv_path, 'rb'), encoding='iso-8859-1')
self.clip_sentence_pairs = []
for l in csv:
clip_name = l[0]
sent_vecs = l[1]
for sent_vec in sent_vecs:
self.clip_sentence_pairs.append((clip_name, sent_vec))
movie_names_set = set()
self.movie_clip_names = {}
for k in range(len(self.clip_sentence_pairs)):
clip_name = self.clip_sentence_pairs[k][0]
movie_name = clip_name.split("_")[0]
if not movie_name in movie_names_set:
movie_names_set.add(movie_name)
self.movie_clip_names[movie_name] = []
self.movie_clip_names[movie_name].append(k)
self.movie_names = list(movie_names_set)
self.movie_names.sort()
self.clip_num_per_movie_max = 0
for movie_name in self.movie_clip_names:
if len(self.movie_clip_names[movie_name]) > self.clip_num_per_movie_max:
self.clip_num_per_movie_max = len(self.movie_clip_names[movie_name])
self.sliding_clip_path = img_dir
sliding_clips_tmp = os.listdir(self.sliding_clip_path)
self.sliding_clip_names = []
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2] == "npy":
movie_name = clip_name.split("_")[0]
if movie_name in self.movie_clip_names:
self.sliding_clip_names.append(clip_name.split(".")[0]+"."+clip_name.split(".")[1])
self.num_samples = len(self.clip_sentence_pairs)
assert self.batch_size <= self.num_samples
def get_clip_sample(self, sample_num, movie_name, clip_name):
'''Get a clip'''
length = len(os.listdir(self.image_dir+movie_name+"/"+clip_name))
sample_step = 1.0*length/sample_num
sample_pos = np.floor(sample_step*np.array(range(sample_num)))
sample_pos_str = []
img_names = os.listdir(self.image_dir+movie_name+"/"+clip_name)
# sort is very important! to get a correct sequence order
img_names.sort()
for pos in sample_pos:
sample_pos_str.append(self.image_dir+movie_name+"/"+clip_name+"/"+img_names[int(pos)])
return sample_pos_str
def get_context_window(self, clip_name, win_length):
'''Get the context window of the fragment'''
movie_name = clip_name.split("_")[0]
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
clip_length = 128#end-start
left_context_feats = np.zeros([win_length, 4096], dtype=np.float32)
right_context_feats = np.zeros([win_length, 4096], dtype=np.float32)
last_left_feat = np.load(self.sliding_clip_path+clip_name)
last_right_feat = np.load(self.sliding_clip_path+clip_name)
for k in range(win_length):
left_context_start = start-clip_length*(k+1)
left_context_end = start-clip_length*k
right_context_start = end+clip_length*k
right_context_end = end+clip_length*(k+1)
left_context_name = movie_name+"_"+str(left_context_start)+"_"+str(left_context_end)+".npy"
right_context_name = movie_name+"_"+str(right_context_start)+"_"+str(right_context_end)+".npy"
if os.path.exists(self.sliding_clip_path+left_context_name):
left_context_feat = np.load(self.sliding_clip_path+left_context_name)
last_left_feat = left_context_feat
else:
left_context_feat = last_left_feat
if os.path.exists(self.sliding_clip_path+right_context_name):
right_context_feat = np.load(self.sliding_clip_path+right_context_name)
last_right_feat = right_context_feat
else:
right_context_feat = last_right_feat
left_context_feats[k] = left_context_feat
right_context_feats[k] = right_context_feat
return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)
def load_movie_byclip(self, movie_name, sample_num):
'''Read visual features through clip'''
movie_clip_sentences = []
movie_clip_featmap = []
clip_set = set()
for k in range(len(self.clip_sentence_pairs)):
if movie_name in self.clip_sentence_pairs[k][0]:
movie_clip_sentences.append(
(self.clip_sentence_pairs[k][0], self.clip_sentence_pairs[k][1][:self.semantic_size]))
if not self.clip_sentence_pairs[k][0] in clip_set:
clip_set.add(self.clip_sentence_pairs[k][0])
visual_feature_path = self.image_dir+self.clip_sentence_pairs[k][0]+".npy"
feature_data = np.load(visual_feature_path)
movie_clip_featmap.append((self.clip_sentence_pairs[k][0], feature_data))
return movie_clip_featmap, movie_clip_sentences
def load_movie_slidingclip(self, movie_name, sample_num):
'''Read visual features through slidingclip'''
movie_clip_sentences = []
movie_clip_featmap = []
for k in range(len(self.clip_sentence_pairs)):
if movie_name in self.clip_sentence_pairs[k][0]:
movie_clip_sentences.append(
(self.clip_sentence_pairs[k][0], self.clip_sentence_pairs[k][1][:self.semantic_size]))
for k in range(len(self.sliding_clip_names)):
if movie_name in self.sliding_clip_names[k]:
visual_feature_path = self.sliding_clip_path+self.sliding_clip_names[k]+".npy"
left_context_feat, right_context_feat = self.get_context_window(self.sliding_clip_names[k]+".npy", 1)
feature_data = np.load(visual_feature_path)
comb_feat = np.hstack((left_context_feat, feature_data, right_context_feat))
movie_clip_featmap.append((self.sliding_clip_names[k], comb_feat))
return movie_clip_featmap, movie_clip_sentences
|
the-stack_0_3514 | from datetime import datetime
from flask import Flask
from flask import request, Response, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from flask_login import current_user, login_required, login_user, LoginManager, logout_user, UserMixin
from werkzeug.security import check_password_hash, generate_password_hash
from forms import SignUpForm, LoginForm
app = Flask(__name__)
#helps with debugging errors while flask app is running
app.config["DEBUG"] = True
#SECRET_KEY generated using python interpreter:
# $ python
# >>> import secrets
# >>> secrets.token_hex(16)
# >>> a65643b9b52d637a11b3182e923e5703
app.config["SECRET_KEY"]= 'a65643b9b52d637a11b3182e923e5703'
login_manager = LoginManager()
login_manager.init_app(app)
#Using SQLite for development
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///hackmerced.db'
db = SQLAlchemy(app)
###***** Users Table ******###
class Users(UserMixin, db.Model):
__tablename__ = "Users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(89))
fullname = db.Column(db.String(89))
email = db.Column(db.String(89))
ucmId = db.Column(db.String(89))
pwd = db.Column(db.String(128))
bio = db.Column(db.String(500))
major = db.Column(db.String(89))
gradDate = db.Column(db.String(89))
def check_password(self, userinputPwd):
return check_password_hash(self.pwd, userinputPwd)
def get_id(self):
return self.email
###***** Users Table ******###
###***** Tracks Table ******###
class Threads(db.Model):
__tablename__ = "Threads"
id = db.Column(db.Integer, primary_key=True)
views = db.Column(db.Integer, default=0)
title = db.Column(db.String(89))
url = db.Column(db.String(89))
addedTimeStamp = db.Column(db.DateTime, default=datetime.now)
#we might need a different database type to hold comments (can be very long)
description = db.Column(db.String(3000))
'''{"owner": INT , "comment": String},{},{},{}'''
replies = db.Column(db.String(3000), default=" ")
upvotes = db.Column(db.Integer, default=0)
downupvotes = db.Column(db.Integer, default=0)
usersUpvoted = db.Column(db.String(3000), default=" ")
userDownvoted = db.Column(db.String(3000), default=" ")
owner_id = db.Column(db.Integer, db.ForeignKey('Users.id'), nullable=True)
owner = db.relationship('Users', foreign_keys=owner_id)
###***** Tracks Table ******###
@login_manager.user_loader
def load_user(userInputEmail):
return Users.query.filter_by(email=userInputEmail).first()
@app.route("/signout")
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route("/dashboard")
@login_required
def dashboard_home():
return render_template('dashboard.html')
@app.route('/signup', methods= ['GET', 'POST'])
def register():
form = SignUpForm()
if request.method == "POST":
if not form.validate_on_submit():
flash('Please enter valid credentials!', 'danger')
return redirect(url_for('register'))
#Check if username already exists
#Make password atleast 8 charlong
#Take to "finish making profile" one time page
if not Users.query.filter_by(username=request.form['username']).first() and not Users.query.filter_by(email=request.form['email']).first():
print('Query responded with None.')
#create a row in DataBases
newUser = Users(username=request.form['username'],
fullname=request.form['username'],
email=request.form['email'],
pwd= generate_password_hash(str(request.form['password'])))
db.session.add(newUser)
db.session.commit()
flash('Thanks for signing up, you will now be able to login!', 'success')
return redirect(url_for('login'))
if Users.query.filter_by(username=request.form['username']).first():
flash(f'That username is taken! Select another.', 'danger')
return redirect(url_for('register'))
if Users.query.filter_by(email=request.form['email']).first():
flash('That email cannot be used.', 'danger')
return redirect(url_for('register'))
return redirect(url_for('register'))
if request.method == "GET":
return render_template('signup.html', form=form)
@app.route('/login', methods= ['GET', 'POST'])
def login():
form = LoginForm()
if request.method == "POST":
if not Users.query.filter_by(email=request.form['email']).first():
flash('No user with that email!', 'danger')
return redirect(url_for('login'))
user = load_user(str(request.form['email']))
if not user.check_password(request.form['password']):
flash('Wrong password!', 'danger')
return redirect(url_for('login'))
print(type(user))
login_user(user)
return redirect(url_for('dashboard_home'))
return render_template('login.html', form=form)
@app.route("/thread", methods=['GET','POST'])
@login_required
def make_thread():
if request.method == "POST":
if(request.form['title'] and request.form['description']):
newThread = Threads(title=request.form['title'],
url = request.form['title'].replace(" ", "-"),
description=request.form['description'],
owner=current_user)
db.session.add(newThread)
db.session.commit()
else:
return render_template("createpost.html")
return render_template('dashboard.html')
@app.route("/<threadTitle>", methods=['GET','POST'])
@login_required
def show_thread(threadTitle):
query = Threads.query.filter_by(url = threadTitle).first()
if query is None:
return redirect(url_for("dashboard_home"))
else:
views = query.views
threadDict = {"title": query.title, "description": query.description, "replies": query.replies, "views": views}
query.views = query.views + 1
db.session.commit()
return render_template('post.html', threadDict = threadDict)
if __name__ == '__main__':
app.run(host="0.0.0.0", port="8081")
|
the-stack_0_3515 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands for interacting with dogbin(https://del.dog)"""
from requests import get, post, exceptions
import os
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
DOGBIN_URL = "https://del.dog/"
NEKOBIN_URL = "https://nekobin.com/"
@register(outgoing=True, pattern=r"^.paste(?: |$)([\s\S]*)")
async def paste(pstl):
""" For .paste command, pastes the text directly to dogbin. """
dogbin_final_url = ""
match = pstl.pattern_match.group(1).strip()
reply_id = pstl.reply_to_msg_id
if not (match or reply_id):
await pstl.edit("`Elon Musk said I cannot paste void.`")
return
if match:
message = match
elif reply_id:
message = await pstl.get_reply_message()
if message.media:
downloaded_file_name = await pstl.client.download_media(
message, TEMP_DOWNLOAD_DIRECTORY,
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r"
os.remove(downloaded_file_name)
else:
message = message.message
# Dogbin
await pstl.edit("`Pasting text . . .`")
resp = post(DOGBIN_URL + "documents", data=message.encode("utf-8"))
if resp.status_code == 200:
response = resp.json()
key = response["key"]
dogbin_final_url = DOGBIN_URL + key
if response["isUrl"]:
reply_text = (
"`Pasted successfully!`\n\n"
f"`Shortened URL:` {dogbin_final_url}\n\n"
"`Original(non-shortened) URLs`\n"
f"`Dogbin URL`: {DOGBIN_URL}v/{key}\n"
)
else:
reply_text = (
"`Pasted successfully!`\n\n" f"`Dogbin URL`: {dogbin_final_url}"
)
else:
reply_text = "`Failed to reach Dogbin`"
await pstl.edit(reply_text)
if BOTLOG:
await pstl.client.send_message(
BOTLOG_CHATID, "Paste query was executed successfully",
)
@register(outgoing=True, pattern="^.getpaste(?: |$)(.*)")
async def get_dogbin_content(dog_url):
""" For .getpaste command, fetches the content of a dogbin URL. """
textx = await dog_url.get_reply_message()
message = dog_url.pattern_match.group(1)
await dog_url.edit("`Getting dogbin content...`")
if textx:
message = str(textx.message)
format_normal = f"{DOGBIN_URL}"
format_view = f"{DOGBIN_URL}v/"
if message.startswith(format_view):
message = message[len(format_view) :]
elif message.startswith(format_normal):
message = message[len(format_normal) :]
elif message.startswith("del.dog/"):
message = message[len("del.dog/") :]
else:
await dog_url.edit("`Is that even a dogbin url?`")
return
resp = get(f"{DOGBIN_URL}raw/{message}")
try:
resp.raise_for_status()
except exceptions.HTTPError as HTTPErr:
await dog_url.edit(
"Request returned an unsuccessful status code.\n\n" + str(HTTPErr)
)
return
except exceptions.Timeout as TimeoutErr:
await dog_url.edit("Request timed out." + str(TimeoutErr))
return
except exceptions.TooManyRedirects as RedirectsErr:
await dog_url.edit(
"Request exceeded the configured number of maximum redirections."
+ str(RedirectsErr)
)
return
reply_text = "`Fetched dogbin URL content successfully!`\n\n`Content:` " + resp.text
await dog_url.edit(reply_text)
if BOTLOG:
await dog_url.client.send_message(
BOTLOG_CHATID, "Get dogbin content query was executed successfully",
)
@register(outgoing=True, pattern=r"^\.neko(?: |$)([\s\S]*)")
async def neko(nekobin):
"""For .paste command, pastes the text directly to dogbin."""
nekobin_final_url = ""
match = nekobin.pattern_match.group(1).strip()
reply_id = nekobin.reply_to_msg_id
if not match and not reply_id:
return await pstl.edit("`Cannot paste text.`")
if match:
message = match
elif reply_id:
message = await nekobin.get_reply_message()
if message.media:
downloaded_file_name = await nekobin.client.download_media(
message,
TEMP_DOWNLOAD_DIRECTORY,
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8")
os.remove(downloaded_file_name)
else:
message = message.text
# Nekobin
await nekobin.edit("`Pasting text . . .`")
resp = post(NEKOBIN_URL + "api/documents", json={"content": message})
if resp.status_code == 201:
response = resp.json()
key = response["result"]["key"]
nekobin_final_url = NEKOBIN_URL + key
reply_text = (
"`Pasted successfully!`\n\n"
f"[Nekobin URL]({nekobin_final_url})\n"
f"[View RAW]({NEKOBIN_URL}raw/{key})"
)
else:
reply_text = "`Failed to reach Nekobin`"
await nekobin.edit(reply_text)
if BOTLOG:
await nekobin.client.send_message(
BOTLOG_CHATID,
"Paste query was executed successfully",
)
CMD_HELP.update(
{
"dogbin": ".paste <text/reply>\
\nUsage: Create a paste or a shortened url using dogbin (https://del.dog/)\
\n\n.getpaste\
\nUsage: Gets the content of a paste or shortened url from dogbin (https://del.dog/)\
\n\n.neko <text/reply>\
\nUsage: Create a paste or a shortened url using nekobin (https://nekobin.com/)"
}
)
|
the-stack_0_3516 | # -*- coding:utf-8 -*-
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, and_, or_, func
from sqlalchemy.orm import sessionmaker
# 创建对象基类
Base = declarative_base()
class User(Base):
"""
1.指定表名
2.指定表结构
"""
__tablename__ = 'user'
def __init__(self, name=None, age=None, address=None):
self.user_name = name
self.user_age = age
self.user_address = address
id = Column(Integer, primary_key=True, autoincrement=True)
user_name = Column('userName', String(255))
user_age = Column('userAge', Integer)
user_address = Column('userAddress', String(255))
def __str__(self):
return self.user_name
def __repr__(self):
return self.user_age
# 数据库连接 echo=True 打印sql
engine = create_engine("mysql+pymysql://root:123456@localhost:3306/java?charset=utf8", echo=True)
# 创建表结构
# Base.metadata.create_all(engine)
# 删除表
# Base.metadata.drop_all(engine)
# session
Session = sessionmaker(bind=engine)
session = Session()
if __name__ == '__main__':
# 增
u = User('user', 10, 'address')
u1 = User()
u1.user_name = 'user1'
u1.user_age = 11
u1.user_address = 'address'
session.add(u)
session.add_all([u1])
session.commit()
# 删
session.query(User).filter(User.id > 10).delete()
session.query(User).filter_by(user_name='user').delete()
session.commit()
# 改
session.query(User).filter(User.id == 1).update({User.user_name: 'user_name'})
session.query(User).filter_by(user_name='user_name').update({'user_name': 'test_name'})
session.commit()
# 查
user = session.query(User).first()
# and
users = session.query(User).filter(User.id.in_([1, 2, 3])
, User.user_name == 'test_name').all()
users1 = session.query(User).filter(and_(User.id == 1, User.user_name == 'test_name')).all()
# or
users2 = session.query(User).filter(or_(User.id > 1, User.user_name == 'test_name')).all()
# like
users3 = session.query(User).filter(User.user_name.like('name%')).all()
# limit
users4 = session.query(User)[0:1]
# sort
users5 = session.query(User).order_by(User.id.desc()).all()
# group
users6 = session.query(User).group_by(User.id).all()
# func
max_id = session.query(func.max(User.id)).one()
sum_age = session.query(func.sum(User.user_age)).one()
|
the-stack_0_3517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
from HTMLTestRunner import HTMLTestRunner
import unittest
from db_fixture import test_data
sys.path.append('./interface')
sys.path.append('./db_fixture')
# 指定测试用例为当前文件夹下的 interface 目录
test_dir = './interface'
discover = unittest.defaultTestLoader.discover(test_dir, pattern='*_test.py')
if __name__ == "__main__":
# 初始化接口测试数据
test_data.init_data()
now = time.strftime("%Y-%m-%d %H_%M_%S")
filename = './report/' + now + '_result.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner(stream=fp,
title='Guest Manage System Interface Test Report',
description='Implementation Example with: ')
runner.run(discover)
fp.close()
|
the-stack_0_3518 | from collections import defaultdict
from mesa.time import RandomActivation
class RandomActivationByBreed(RandomActivation):
"""
A scheduler which activates each type of agent once per step, in random
order, with the order reshuffled every step.
This is equivalent to the NetLogo 'ask breed...' and is generally the
default behavior for an ABM.
Assumes that all agents have a step() method.
"""
def __init__(self, model):
super().__init__(model)
self.agents_by_breed = defaultdict(dict)
self.seen_ids = set()
def add(self, agent):
"""
Add an Agent object to the schedule
Args:
agent: An Agent to be added to the schedule.
"""
self._agents[agent.unique_id] = agent
agent_class = type(agent)
self.agents_by_breed[agent_class][agent.unique_id] = agent
def remove(self, agent):
"""
Remove all instances of a given agent from the schedule.
"""
del self._agents[agent.unique_id]
agent_class = type(agent)
del self.agents_by_breed[agent_class][agent.unique_id]
def step(self, by_breed=True):
"""
Executes the step of each agent breed, one at a time, in random order.
Args:
by_breed: If True, run all agents of a single breed before running
the next one.
"""
if by_breed:
for agent_class in self.agents_by_breed:
self.step_breed(agent_class)
self.steps += 1
self.time += 1
else:
super().step()
def step_breed(self, breed):
"""
Shuffle order and run all agents of a given breed.
Args:
breed: Class object of the breed to run.
"""
agent_keys = list(self.agents_by_breed[breed].keys())
self.model.random.shuffle(agent_keys)
for agent_key in agent_keys:
self.agents_by_breed[breed][agent_key].step()
def get_breed_count(self, breed_class):
"""
Returns the current number of agents of certain breed in the queue.
"""
return len(self.agents_by_breed[breed_class].values())
|
the-stack_0_3519 | "Miscellaneous utilities"
###########################################################################
# Copyright (C) 2008 William Stein <[email protected]> #
# Distributed under the terms of the GNU General Public License (GPL) #
# http://www.gnu.org/licenses/ #
###########################################################################
from sage.structure.sequence import Sequence
from sage.categories.fields import Fields
_Fields = Fields()
def composite_field(K, L):
"""
Return a canonical field that contains both $K$ and $L$, if possible.
Otherwise, raise a ValueError.
INPUT:
K -- field
L -- field
OUTPUT:
field
EXAMPLES:
sage: composite_field(QQ,QQbar)
doctest:...: DeprecationWarning: The function composite_field() is deprecated. Use get_coercion_model().common_parent() instead
See http://trac.sagemath.org/19415 for details.
Algebraic Field
sage: composite_field(QQ,QQ[sqrt(2)])
Number Field in sqrt2 with defining polynomial x^2 - 2
sage: composite_field(QQ,QQ)
Rational Field
sage: composite_field(QQ,GF(7))
Traceback (most recent call last):
...
ValueError: unable to find a common field
"""
from sage.misc.superseded import deprecation
deprecation(19415, "The function composite_field() is deprecated. Use get_coercion_model().common_parent() instead")
C = Sequence([K(0), L(0)]).universe()
if C not in _Fields:
raise ValueError("unable to find a common field")
return C
|
the-stack_0_3523 | import sys
from colour.utilities.deprecation import ModuleAPI, build_API_changes
from colour.utilities.documentation import is_documentation_building
from colour.hints import Any
from .primitives import MAPPING_PLANE_TO_AXIS, primitive_grid, primitive_cube
from .primitives import PRIMITIVE_METHODS, primitive
from .section import hull_section
from .vertices import (
primitive_vertices_quad_mpl,
primitive_vertices_grid_mpl,
primitive_vertices_cube_mpl,
primitive_vertices_sphere,
)
from .vertices import PRIMITIVE_VERTICES_METHODS, primitive_vertices
__all__ = [
"MAPPING_PLANE_TO_AXIS",
"primitive_grid",
"primitive_cube",
]
__all__ += [
"hull_section",
]
__all__ += [
"PRIMITIVE_METHODS",
"primitive",
]
__all__ += [
"primitive_vertices_quad_mpl",
"primitive_vertices_grid_mpl",
"primitive_vertices_cube_mpl",
"primitive_vertices_sphere",
]
__all__ += [
"PRIMITIVE_VERTICES_METHODS",
"primitive_vertices",
]
# ----------------------------------------------------------------------------#
# --- API Changes and Deprecation Management ---#
# ----------------------------------------------------------------------------#
class geometry(ModuleAPI):
"""Define a class acting like the *geometry* module."""
def __getattr__(self, attribute) -> Any:
"""Return the value from the attribute with given name."""
return super().__getattr__(attribute)
# v0.4.0
API_CHANGES = {
"ObjectRenamed": [
[
"colour.geometry.PLANE_TO_AXIS_MAPPING",
"colour.geometry.MAPPING_PLANE_TO_AXIS",
],
]
}
"""Defines the *colour.geometry* sub-package API changes."""
if not is_documentation_building():
sys.modules["colour.geometry"] = geometry( # type:ignore[assignment]
sys.modules["colour.geometry"], build_API_changes(API_CHANGES)
)
del ModuleAPI, is_documentation_building, build_API_changes, sys
|
the-stack_0_3524 | # -*- coding: utf-8 -*-
import hashlib
from flask import (render_template, g, session,
jsonify, request,redirect, flash)
from web.app import app
from web.model import (User, UserInfo,UserSetting,BasicUser,
AdvancedUser,FeedSite, Feed, Sub, ReadFeed)
@app.route("/api/pop-feedsite/sub", methods=["POST"])
def sub_pop_feedsite():
feedsiteid = request.form.get("feedsiteid")
feedsite = FeedSite.get_feedsite_by_id(feedsiteid)
if feedsite == None:
flash("add %s failed"%feedsite.title)
return jsonify(dict(rcode=404))
g.user.sub_feedsite(feedsite)
flash("add %s sucessfully"%feedsite.title)
return jsonify(dict(rcode=200))
@app.route("/api/pop-feedsite/<feedsiteid>/", methods=["GET","POST"])
def pop_feeds(feedsiteid=None):
if feedsiteid is None:
return jsonify(dict(rcode=404))
feeds = [feed.to_dict() for feed in Feed.objects(feedsite=feedsiteid).order_by("-create_date")[:15]]
return jsonify(dict(rcode=200, feeds=feeds))
|
the-stack_0_3526 | """
Test address breakpoints set with shared library of SBAddress work correctly.
"""
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class AddressBreakpointTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_address_breakpoints(self):
"""Test address breakpoints set with shared library of SBAddress work correctly."""
self.build()
self.address_breakpoints()
def address_breakpoints(self):
"""Test address breakpoints set with shared library of SBAddress work correctly."""
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by name 'c'.
breakpoint = target.BreakpointCreateBySourceRegex(
"Set a breakpoint here", lldb.SBFileSpec("main.c"))
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() >= 1,
VALID_BREAKPOINT)
# Get the breakpoint location from breakpoint after we verified that,
# indeed, it has one location.
location = breakpoint.GetLocationAtIndex(0)
self.assertTrue(location and
location.IsEnabled(),
VALID_BREAKPOINT_LOCATION)
# Next get the address from the location, and create an address breakpoint using
# that address:
address = location.GetAddress()
target.BreakpointDelete(breakpoint.GetID())
breakpoint = target.BreakpointCreateBySBAddress(address)
# Disable ASLR. This will allow us to actually test (on platforms that support this flag)
# that the breakpoint was able to track the module.
launch_info = lldb.SBLaunchInfo(None)
flags = launch_info.GetLaunchFlags()
flags &= ~lldb.eLaunchFlagDisableASLR
launch_info.SetLaunchFlags(flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
self.assertTrue(process, PROCESS_IS_VALID)
# Did we hit our breakpoint?
from lldbsuite.test.lldbutil import get_threads_stopped_at_breakpoint
threads = get_threads_stopped_at_breakpoint(process, breakpoint)
self.assertTrue(
len(threads) == 1,
"There should be a thread stopped at our breakpoint")
# The hit count for the breakpoint should be 1.
self.assertEquals(breakpoint.GetHitCount(), 1)
process.Kill()
# Now re-launch and see that we hit the breakpoint again:
launch_info.Clear()
launch_info.SetLaunchFlags(flags)
process = target.Launch(launch_info, error)
self.assertTrue(process, PROCESS_IS_VALID)
thread = get_threads_stopped_at_breakpoint(process, breakpoint)
self.assertTrue(
len(threads) == 1,
"There should be a thread stopped at our breakpoint")
# The hit count for the breakpoint should now be 2.
self.assertEquals(breakpoint.GetHitCount(), 2)
|
the-stack_0_3529 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Policy Settings
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# Third-party modules
import six
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import (
StringField,
ReferenceField,
ListField,
EmbeddedDocumentField,
BooleanField,
)
# NOC modules
from .validationpolicy import ValidationPolicy
@six.python_2_unicode_compatible
class ValidationPolicyItem(EmbeddedDocument):
policy = ReferenceField(ValidationPolicy)
is_active = BooleanField(default=True)
def __str__(self):
return self.policy.name
@six.python_2_unicode_compatible
class ValidationPolicySettings(Document):
meta = {
"collection": "noc.validationpolicysettings",
"strict": False,
"auto_create_index": False,
"indexes": [("model_id", "object_id")],
}
model_id = StringField()
object_id = StringField()
policies = ListField(EmbeddedDocumentField(ValidationPolicyItem))
def __str__(self):
return "%s: %s" % (self.model_id, self.object_id)
|
the-stack_0_3531 | #!/usr/bin/env python3
all_brivla = [ ("tavla", ["speaker", "listener", "subject", "language"])
, ("dunda", ["donor", "gift", "recipient"])
, ("ctuca", ["instructor", "audience/student(s)", "ideas/methods", "subject", "teaching method"])
, ("citka", ["consumer", "aliment"])
, ("ciska", ["writer", "text/symbols", "display/storage medium", "writing implement"])
, ("klama", ["traveler", "destination", "origin", "route", "means/vehicle"])
, ("bridi", ["predicate relationship", "relation", "arguments"])
, ("djuno", ["knower", "facts", "subject", "epistemology"])
, ("nupre", ["promisor", "promise", "beneficiary/victim"])
, ("cusku", ["expresser", "message", "audience", "expressive medium"])
, ("cizra", ["strange thing", "viewpoint holder", "property"])
, ("cmene", ["name/title", "name posessor", "name-giver/name-user"])
, ("cusku", ["agent", "expressed idea", "audience", "expressive medium"])
, ("djica", ["desirer", "event/state", "purpose"])
, ("gleki", ["happy entity", "event/state"])
, ("jimpe", ["understander", "fact/truth", "subject"])
, ("klama", ["traveler", "destination", "origin", "route", "means/vehicle"])
, ("mutce", ["much/extreme thing", "property", "extreme/direction"])
, ("nelci", ["liker", "object/state"])
, ("pilno", ["user", "instrument", "purpose"])
, ("sipna", ["asleep entity"])
, ("xamgu", ["good object/event", "beneficiary", "standard"])
, ("zgana", ["observer", "observed", "senses/means", "conditions"])
, ("bangu", ["language/dialect", "language user", "communicated idea"])
, ("cliva", ["agent", "point of departure", "route"])
, ("finti", ["inventor/composer", "invention", "purpose", "existing elements/ideas"])
, ("gunka", ["worker", "activity", "goal"])
, ("jundi", ["attentive entity", "object/affair"])
, ("kakne", ["capable entity", "capability", "conditions"])
, ("tcidu", ["reader", "text", "reading material"])
, ("valsi", ["word", "meaning", "language"])
, ("zvati", ["atendee/event", "location"])
, ("cinri", ["interesting abstraction", "interested entity"])
, ("drata", ["entity #1", "entity #2", "standard"])
, ("simsa", ["entity #1", "entity #2", "property/quantity"])
, ("klaku", ["crier", "tears", "reason"])
, ("melbi", ["beautiful entity", "viewpoint holder", "aspect", "aesthetic standard"])
, ("smuni", ["meaning/interpretation", "expression", "opinion holder"])
, ("vecnu", ["seller", "goods/service", "buyer", "price"])
, ("plise", ["apple", "species/strain"])
, ("prenu", ["person"])
, ("cilre", ["learner", "facts", "subject", "source", "method"])
, ("cnino", ["new entity", "observer", "feature", "standard"])
, ("drani", ["correct thing", "property", "situation", "standard"])
, ("fanva", ["translator", "text/utterance", "target language", "source language", "translation result"])
, ("gasnu", ["agent", "event"])
, ("kelci", ["player", "toy"])
, ("milxe", ["mild thing", "property"])
, ("mlatu", ["cat", "species/breed"])
, ("nitcu", ["needing entity", "necessity", "purpose"])
, ("pendo", ["friendly entity", "friendliness experiencer"])
, ("pensi", ["thinking entity", "subject/concept"])
, ("skami", ["computer", "purpose"])
, ("slabu", ["familiar/old thing", "observer", "feature", "standard"])
, ("troci", ["trier", "attempted event/state/property", "actions/method"])
, ("zdani", ["house", "owner/user"])
]
def encode_text(text):
if "#" in text:
return "\"%s\"" % text
else:
return text
for brivla in all_brivla:
word = brivla[0]
places = brivla[1]
x1, x2, x3, x4, x5 = [None, None, None, None, None]
try:
x1 = encode_text(places[0])
x2 = encode_text(places[1])
x3 = encode_text(places[2])
x4 = encode_text(places[3])
x5 = encode_text(places[4])
except:
pass
print("%s:" % word)
if x1:
print(" x1: %s" % x1)
if x2:
print(" x2: %s" % x2)
if x3:
print(" x3: %s" % x3)
if x4:
print(" x4: %s" % x4)
if x5:
print(" x5: %s" % x5)
print()
|
the-stack_0_3534 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Taken and modified for fairscale from:
# https://github.com/facebookresearch/fairscale/blob/main/fairscale/nn/data_parallel/sharded_ddp.py
#Commit: 8acbec718f3c70a6b9785470bb9e05cd84fc3f8e
import os
import contextlib
import logging
import time
import functools
import numpy as np
from itertools import chain
from functools import reduce
from collections import deque
from types import MethodType
import paddle
from paddle import nn
import paddle.distributed as dist
from paddle.distributed.collective import _get_global_group
from ...utils.internal_storage import GradStorage
from ...meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2
from .sharding_utils import Taskflow, Type
def _trainable(param):
return param.trainable
class ShardingStage2(nn.Layer):
"""
A wrapper for Sharding Stage2 Layer in Dygraph.
.. warning: ShardingStage2 encapsulates the layer strategy and integrates it into the nn.Layer.
.. ZeRO: https://arxiv.org/pdf/1910.02054.pdf.
"""
# TODO (Baibaifan)
# Feature Notes::
# 1. Unified memory for param and param.grad to InternalStorage.
# 2. Divide param.grad according to rank to centrally apply for and release GPU memory.
# 3. Dynamically adjust training parameters and models。
# 4. Support offload function.
# 5. Support the establishment of independent communication groups.
def __init__(
self,
layer,
sharding_optimizer,
group=None,
sync_buffers=False,
pertrain_sync_models=True,
buffer_max_size=2**23, #8MB
auto_refresh_trainable=True,
device="gpu",
use_grad_storage=True,
accumulate_grads=False):
super().__init__()
# training options
self._layer = layer
self._sharding_optimizers = [sharding_optimizer] if not isinstance(
sharding_optimizer, list) else sharding_optimizer
assert all(
list(
map(lambda opt: isinstance(opt, ShardingOptimizerStage2),
self._sharding_optimizers))
), "Please use ShardingOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable
# Gradient accumulation, Gradient flip
self._accumulate_grads = accumulate_grads
# Communication related attributes
self._group = group
group = _get_global_group() if group is None else group
self._world_size_scaling = 1.0 / group.nranks
assert group.nranks > 1, "Training must be distributed, ranks must be greater than 1"
self._rank = group.rank
self._global_root_rank = 0 # picking rank 0 as the reference
self._default_device = device
# Global statistical parameters
self._all_params = list(
chain(*[optim.local_params for optim in self._sharding_optimizers]))
self._trainable_params = []
self._grad_reduced = []
self._trainable_param2rank = {}
self._trainable_param2align = {}
self._trainable_mask = list(map(_trainable, self._all_params))
self._param_grads = []
# Set grad storage size & Display param sizes and model sizes
model_size = sum(
[np.prod(p.shape) for p in self._layer.parameters()]).item()
self._buffer_max_size = self._rank_buffer_size(buffer_max_size,
model_size)
self._use_grad_storage = use_grad_storage
self._grad_storages = {} # {dtype: {rank: GradStorage}}
self._has_grad_storage = []
self._grad_storage_list = []
# Offload
# TODO(haohongxiang): Now it's not be supported for multi-optimizers using Offload strategy
self._offload_optims = list(
filter(lambda optim: optim.offload, self._sharding_optimizers))
if len(self._offload_optims) > 0:
assert len(
self._sharding_optimizers
) == 1, "Only support offload strategy for single optimizer"
self._offload = self._sharding_optimizers[0].offload
self._offload_device = "cpu"
# Set backward pass hooks
self._bw_hooks = []
# Synchronous all ranks models
if pertrain_sync_models:
self._sync_params_and_buffers()
# Set tasks flow
self._tasks_flow = deque()
# Define optimizer step and clear_grad
if self._accumulate_grads:
self._redefine_opt_step()
self._redefine_opt_clear()
def forward(self, *inputs, **kwargs):
"""
A wrapper for Sharding Stage2 layer.
- Fresh trainable params or rebuild grad storage
- Sync layer's buffer params
- Clear all flags states
- Forward for origin layers
"""
# Whether to need to reset trainable parameters
needs_fresh = len(self._bw_hooks) == 0 and self.training
if self._auto_refresh_trainable:
needs_fresh |= self._detect_train_change()
# Front hook
self._init_internal_storage(needs_fresh)
# Sync layer's buffers state
if self._sync_buffers:
self.__sync_buffers()
# Normal FW on the base model
fw = self._layer(*inputs, **kwargs)
return fw
def _clear_gradients(self):
"""
Set zero to the gradient of the optimizer's current rank trainable parameters.
"""
# Release grad storages
for dtype in self._grad_storages.keys():
if self._rank in self._grad_storages[dtype].keys():
if not self._offload:
self._grad_storages[dtype][self._rank].buffer.zero_()
# Release params
for param in self._trainable_params:
if param.name in self._param_grads and param.grad is not None:
param.clear_gradient()
def _grad_scale(self):
"""
Before the gradient accumulation, scale the gradient.
"""
if self._offload:
for param in self._trainable_params:
if param.name in self._sharding_optimizers[
0]._master_params.keys():
self._sharding_optimizers[0]._master_params[
param.name].grad.scale_(scale=self._world_size_scaling)
else:
# Scale grad storages
for dtype in self._grad_storages.keys():
if self._rank in self._grad_storages[dtype].keys():
self._grad_storages[dtype][self._rank].buffer.scale_(
scale=self._world_size_scaling)
# Scale params
for param in self._trainable_params:
if param.name in self._param_grads and param.grad is not None:
param.grad.scale_(scale=self._world_size_scaling)
param._reset_grad_inplace_version(True)
def _init_internal_storage(self, needs_fresh):
"""
Judge Fresh trainable params or rebuild grad storage.
"""
if needs_fresh:
self._fresh_trainable()
else:
self._build_grad_storages()
# Clear all flags state
self._clear_counters()
def to(self, device=None, dtype=None, blocking=True):
"""
Synchronously or asynchronously convert the data type of the layer, the device is not supported now.
"""
assert isinstance(device, str), "Device must be type str"
assert device == self._default_device, "New devices are not supported, because of the optimizer state is not sync"
self._layer.to(device=device, dtype=dtype, blocking=blocking)
# Re-build the buckets, hooks, etc..
self._fresh_trainable()
def _fresh_trainable(self):
""" Whether to update training parameters. """
# Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance)
if reduce(lambda x, y: x or y, self._grad_reduced, False):
logging.warning("Grads waiting to be reduced.")
self._trainable_params = list(
filter(lambda x: x.trainable, self._all_params))
self._trainable_params.sort(key=lambda x: np.prod(x.shape))
self._trainable_param2rank = {}
for optim in self._sharding_optimizers:
# Need to be wrappered for Sharding Stage2 Optimizer
if len(optim.param_storages.keys()) == 0:
optim.update_opt_status()
# Get the parameters split by the optimizer according to rank
for per_rank_params in optim.dtype_rank_params.values(
): # all the params from all ranks
for params in per_rank_params:
for param in filter(lambda x: x.trainable, params):
self._trainable_param2rank[
param.name] = optim.param2rank[param.name]
self._trainable_param2align[
param.name] = optim._param2align[param.name]
self._setup_use_grad_storage()
# wait next func hook support
self._setup_backward_hooks()
@paddle.no_grad()
def __sync_buffers(self):
"""
Sync all the param buffers from all ranks (exp: batch norm statistics).
"""
for buffer in self._layer.buffers(include_sublayers=True):
dist.broadcast(
buffer,
self._global_root_rank,
self._group,
use_calc_stream=True)
# Multi stream operation will be supported later
dist.wait(tensor=buffer, group=self._group, use_calc_stream=True)
def __getattr__(self, name):
"""Forward missing attributes to wrapped layer."""
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self._layer, name)
@paddle.no_grad()
def _clear_counters(self):
"""Reset all the grad reduce and call counters."""
if self.training:
self._grad_reduced = [True for _ in self._trainable_params]
if self._use_grad_storage:
for grad_storage in self._grad_storage_list:
grad_storage.reset_checked_in()
def _get_reduce_fn(self, index, param, dst_rank):
"""
There are two ways to reduce gradient.
- 1. Do not use use_grad_storage or exceeded buffer_max_size will be reduced separately.
- 2. Use grad_storage Reduce the storage to get the full gradient from different ranks.
"""
if not self._use_grad_storage or not self._has_grad_storage[index]:
# Direct reduction
@paddle.no_grad()
def reduce(*_):
# Skip gradient reduction, do not change status information
if self._grad_reduced[index]:
assert param.grad is not None, "Parameter gradient cannot be None"
# Change reduce information
self._grad_reduced[index] = False
if not self._accumulate_grads:
param.grad.scale_(scale=self._world_size_scaling)
param._reset_grad_inplace_version(True)
# Clear the gradient that does not belong to the current rank through the callback function
def cleanup():
if dst_rank != self._rank:
param.clear_gradient(False)
elif self._offload:
self._sharding_optimizers[0]._master_params[
param.name]._copy_gradient_from(param.grad.cpu(
).cast(dtype=Type.fp32.value))
param.clear_gradient(False)
# Synchronize the reduce parameter gradient
self._tasks_flow.append(
Taskflow(
task=dist.reduce(
tensor=param.grad,
dst=dst_rank,
group=self._group,
use_calc_stream=True),
callback=cleanup))
# Multi stream operation will be supported later
dist.wait(
tensor=param.grad,
group=self._group,
use_calc_stream=True)
# Clear the task flow and trigger callback to clear the redundant gradient
self._clear_task_flow()
else:
# Buffer reduction
@paddle.no_grad()
def reduce(*_):
# Skip gradient reduction, do not change status information
if self._grad_reduced[index]:
assert param.grad is not None, "Parameter gradient cannot be None"
# Change reduce information
self._grad_reduced[index] = False
grad_storage = self._grad_storages[param.dtype][dst_rank]
grad_storage.params_checked_in += 1
if grad_storage.all_checked_in:
assert grad_storage.buffer is not None
# Normalize all ranks grad_storage
if not self._accumulate_grads:
grad_storage.buffer.scale_(
scale=self._world_size_scaling)
# Clearing up the grad_storage buffer
def cleanup():
if dst_rank != self._rank:
for p in grad_storage._params:
p.clear_gradient(False)
p._gradient_set_empty(False)
grad_storage.buffer.value().get_tensor()._clear(
)
elif self._offload:
grad_storage.to(device=self._offload_device)
for param in grad_storage._params:
self._sharding_optimizers[0]._master_params[
param.name]._copy_gradient_from(
param.grad.cast(
dtype=Type.fp32.value))
grad_storage.buffer.value().get_tensor()._clear(
)
# Reduce the bucket
grad_storage.sent = True
self._tasks_flow.append(
Taskflow(
task=dist.reduce(
tensor=grad_storage.buffer,
dst=grad_storage.destination,
group=self._group,
use_calc_stream=True),
callback=cleanup))
# Multi stream operation will be supported later
dist.wait(
tensor=grad_storage.buffer,
group=self._group,
use_calc_stream=True)
# Clear the task flow and trigger callback to clear the redundant gradient
self._clear_task_flow()
return reduce
def _setup_backward_hooks(self):
"""
Set the backward hook to synchronize the gradients of all rank by reduce group ranks.
"""
# Remove previous backward hooks
while len(self._bw_hooks) > 0:
self._bw_hooks.pop().remove()
# Go through the parameters, attach the hook
if not self.training:
return
for index, param in enumerate(self._trainable_params):
dst_rank = self._trainable_param2rank[param.name]
reduce_function = self._get_reduce_fn(index, param, dst_rank)
self._bw_hooks.append(
param._register_backward_hook(reduce_function))
@paddle.no_grad()
def _sync_params_and_buffers(self):
"""
Sync all model states for all ranks
"""
for t in self._layer.parameters():
dist.broadcast(
t,
src=self._global_root_rank,
group=self._group,
use_calc_stream=True)
# Multi stream operation will be supported later
dist.wait(tensor=t, group=self._group, use_calc_stream=True)
def _setup_use_grad_storage(self):
"""
Integrate the parameters gradient into a continuous memory according to rank, and support the update of training parameters.
"""
if not self._use_grad_storage:
return
# According to parameters's numel sort, allocate memory of parameter gradient to continuous memory according to rank
self._grad_storages = {}
self._has_grad_storage = [False for _ in self._trainable_params]
for index, param in enumerate(self._trainable_params):
dst_rank = self._trainable_param2rank[param.name]
if param.dtype not in self._grad_storages.keys():
self._grad_storages[param.dtype] = {}
if dst_rank not in self._grad_storages[param.dtype].keys():
self._grad_storages[param.dtype][dst_rank] = GradStorage(
self._buffer_max_size[param.dtype],
dtype=param.dtype,
device=self._default_device,
destination=dst_rank,
parm2align=self._trainable_param2align)
# Criteria to decide whether this parameter is to be put in GradStorage
if self._grad_storages[param.dtype][dst_rank].can_add_grad_view(
param, self._trainable_param2align[param.name]):
self._grad_storages[param.dtype][dst_rank].add_grad(
param, self._trainable_param2align[param.name])
self._has_grad_storage[index] = True
else:
self._param_grads.append(param.name)
print(
"Can not add param: {}, param's shape: {}, param align: {}, grad_storages fill: {}, ".
format(param.name, param.shape, self._trainable_param2align[
param.name], self._grad_storages[param.dtype][dst_rank]
._fill))
self._grad_storage_list = list(
chain(*[
self._grad_storages[dtype].values()
for dtype in self._grad_storages.keys()
]))
def _clear_task_flow(self):
"""Try to consume the previous tasks."""
while len(self._tasks_flow) > 0:
task = self._tasks_flow.popleft()
if task.callback is not None:
task.callback()
def _detect_train_change(self):
# Current trainable parameters
trainable_mask = list(map(_trainable, self._all_params))
# Whether parameters trainability changed
trainability_changed = trainable_mask != self._trainable_mask
if trainability_changed:
logging.warning(
"Trainable params changed, because of eval/train mode or parameter freezing/unfreeze."
)
self._trainable_mask = trainable_mask
return trainability_changed
def _build_grad_storages(self):
"""
Rebuild grad storages.
"""
# Rebuild fp16/fp32 grad storages
for dtype in self._grad_storages.keys():
for dst_rank, grad_storage in self._grad_storages[dtype].items():
if self._offload or dst_rank != self._rank:
grad_storage.manumal_relase()
grad_storage.rebuild()
def _rank_buffer_size(self, buffer_max_size, model_size):
"""
Generate the minimum buffer size for each rank & Display param sizes and model sizes.
"""
# Initialize buffer size
rank_buffer_size = {}
for shard_opt in self._sharding_optimizers:
if shard_opt.rank_buffer_size:
for dtype in shard_opt.rank_buffer_size.keys():
sizes = max(shard_opt.rank_buffer_size[dtype].values())
rank_buffer_size[dtype] = min(sizes, buffer_max_size)
if Type.fp16.value in rank_buffer_size.keys():
# FP16 GradStorage and model size
print(
"====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".
format(rank_buffer_size[Type.fp16.value] / 2**19, model_size / 2
**19))
if Type.fp32.value in rank_buffer_size.keys():
# FP32 GradStorage and model size
print(
"====== FP32 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".
format(rank_buffer_size[Type.fp32.value] / 2**18, model_size / 2
**18))
return rank_buffer_size
def _redefine_opt_step(self):
if not self._accumulate_grads:
return
grad_func = self._grad_scale
for opt in self._sharding_optimizers:
opt_step = opt.step
def _opt_step(self):
grad_func()
opt_step()
opt.step = MethodType(_opt_step, opt)
def _redefine_opt_clear(self):
clear_func = self._clear_gradients
def _opt_clear(self):
clear_func()
for opt in self._sharding_optimizers:
opt.clear_grad = MethodType(_opt_clear, opt)
|
the-stack_0_3535 | import os
import numpy
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDConfig
from chainer_chemistry.config import WEAVE_DEFAULT_NUM_MAX_ATOMS
from chainer_chemistry.dataset.preprocessors.common \
import construct_atomic_number_array
from chainer_chemistry.dataset.preprocessors.common \
import MolFeatureExtractionError
from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms
from chainer_chemistry.dataset.preprocessors.mol_preprocessor \
import MolPreprocessor
ATOM = ['H', 'C', 'N', 'O', 'S', 'Cl', 'Br', 'F', 'P', 'I']
MAX_DISTANCE = 2 # 7
# --- Atom feature extraction ---
def construct_atom_type_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS,
atom_list=None, include_unknown_atom=False):
atom_list = atom_list or ATOM
if include_unknown_atom:
# all atom not in `atom_list` as considered as "unknown atom"
# and its index is `len(atom_list)`
n_atom_type = len(atom_list) + 1
else:
n_atom_type = len(atom_list)
n_atom = mol.GetNumAtoms()
atom_type_vec = numpy.zeros((num_max_atoms, n_atom_type),
dtype=numpy.float32)
for i in range(n_atom):
a = mol.GetAtomWithIdx(i)
try:
atom_idx = atom_list.index(a.GetSymbol())
except ValueError as e:
if include_unknown_atom:
atom_idx = len(atom_list)
else:
raise MolFeatureExtractionError(e)
atom_type_vec[i, atom_idx] = 1.0
return atom_type_vec
def construct_formal_charge_vec(mol,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
n_atom = mol.GetNumAtoms()
formal_charge_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
for i in range(n_atom):
a = mol.GetAtomWithIdx(i)
formal_charge_vec[i, 0] = a.GetFormalCharge()
return formal_charge_vec
def construct_hybridization_vec(mol,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
# TODO(Oono)
# Can we enhance preprocessing speed by making factory once
# prior to calling this function many times?
n_atom = mol.GetNumAtoms()
hybridization_vec = numpy.zeros((num_max_atoms, 3), dtype=numpy.float32)
for i in range(n_atom):
a = mol.GetAtomWithIdx(i)
if a.GetHybridization() is None:
continue
hybridization_type = str(a.GetHybridization())
if hybridization_type == 'SP1':
hybridization_vec[i, 0] = 1.0
elif hybridization_type == 'SP2':
hybridization_vec[i, 1] = 1.0
elif hybridization_type == 'SP3':
hybridization_vec[i, 2] = 1.0
return hybridization_vec
def construct_partial_charge_vec(
mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
AllChem.ComputeGasteigerCharges(mol)
n = mol.GetNumAtoms()
partial_charge_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
for i in range(n):
a = mol.GetAtomWithIdx(i)
partial_charge_vec[i, 0] = a.GetProp("_GasteigerCharge")
return partial_charge_vec
def construct_atom_ring_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
nAtom = mol.GetNumAtoms()
sssr = Chem.GetSymmSSSR(mol)
ring_feature = numpy.zeros((num_max_atoms, 6,), dtype=numpy.float32)
for ring in sssr:
ring = list(ring)
for i in range(nAtom):
if i in ring:
ring_size = len(ring)
if ring_size >= 3 and ring_size <= 8:
ring_feature[i, ring_size - 3] = 1.0
return ring_feature
def construct_hydrogen_bonding(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
fdefName = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
factory = ChemicalFeatures.BuildFeatureFactory(fdefName)
feats = factory.GetFeaturesForMol(mol)
hydrogen_bonding_vec = numpy.zeros((num_max_atoms, 2), dtype=numpy.float32)
for f in feats:
if f.GetFamily() == 'Donor':
idx = f.GetAtomIds()[0]
hydrogen_bonding_vec[idx, 0] = 1.0
if f.GetFamily() == 'Acceptor':
idx = f.GetAtomIds()[0]
hydrogen_bonding_vec[idx, 1] = 1.0
return hydrogen_bonding_vec
def construct_num_hydrogens_vec(mol,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
n_hydrogen_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
n_atom = mol.GetNumAtoms()
for i in range(n_atom):
n = 0
for j in range(n_atom):
if i == j:
continue
a = mol.GetAtomWithIdx(j)
if a.GetSymbol() != 'H':
continue
k = mol.GetBondBetweenAtoms(i, j)
if k is not None:
n += 1
n_hydrogen_vec[i, 0] = n
return n_hydrogen_vec
def construct_aromaticity_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
aromaticity_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
aromatix_atoms = mol.GetAromaticAtoms()
for a in aromatix_atoms:
aromaticity_vec[a.GetIdx()] = 1.0
return aromaticity_vec
def construct_atom_feature(mol, add_Hs,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS,
atom_list=None, include_unknown_atom=False):
"""construct atom feature
Args:
mol (Mol): mol instance
add_Hs (bool): if the `mol` instance was added Hs, set True.
num_max_atoms (int): number of max atoms
atom_list (list): list of atoms to extract feature. If None, default
`ATOM` is used as `atom_list`
include_unknown_atom (bool): If False, when the `mol` includes atom
which is not in `atom_list`, it will raise
`MolFeatureExtractionError`.
If True, even the atom is not in `atom_list`, `atom_type` is set
as "unknown" atom.
Returns (numpy.ndarray): 2 dimensional array. First axis size is
`num_max_atoms`, representing each atom index.
Second axis for feature.
"""
atom_type_vec = construct_atom_type_vec(
mol, num_max_atoms, atom_list=atom_list,
include_unknown_atom=include_unknown_atom)
# TODO(nakago): Chilarity
formal_charge_vec = construct_formal_charge_vec(
mol, num_max_atoms=num_max_atoms)
partial_charge_vec = construct_partial_charge_vec(
mol, num_max_atoms=num_max_atoms)
atom_ring_vec = construct_atom_ring_vec(
mol, num_max_atoms=num_max_atoms)
hybridization_vec = construct_hybridization_vec(
mol, num_max_atoms=num_max_atoms)
hydrogen_bonding = construct_hydrogen_bonding(
mol, num_max_atoms=num_max_atoms)
aromaticity_vec = construct_aromaticity_vec(
mol, num_max_atoms=num_max_atoms)
if add_Hs:
num_hydrogens_vec = construct_num_hydrogens_vec(
mol, num_max_atoms=num_max_atoms)
feature = numpy.hstack((atom_type_vec, formal_charge_vec,
partial_charge_vec, atom_ring_vec,
hybridization_vec, hydrogen_bonding,
aromaticity_vec, num_hydrogens_vec))
else:
feature = numpy.hstack((atom_type_vec, formal_charge_vec,
partial_charge_vec, atom_ring_vec,
hybridization_vec, hydrogen_bonding,
aromaticity_vec))
return feature
# --- Pair feature extraction ---
def construct_bond_vec(mol, i, j):
bond_feature_vec = numpy.zeros((4, ), dtype=numpy.float32)
k = mol.GetBondBetweenAtoms(i, j)
if k is not None:
bond_type = str(k.GetBondType())
if bond_type == 'SINGLE':
bond_feature_vec[0] = 1.0
elif bond_type == 'DOUBLE':
bond_feature_vec[1] = 1.0
elif bond_type == 'TRIPLE':
bond_feature_vec[2] = 1.0
elif bond_type == 'AROMATIC':
bond_feature_vec[3] = 1.0
else:
raise ValueError("Unknown bond type {}".format(bond_type))
return bond_feature_vec
def construct_distance_vec(distance_matrix, i, j):
distance = min(MAX_DISTANCE, int(distance_matrix[i][j]))
distance_feature = numpy.zeros((MAX_DISTANCE, ), dtype=numpy.float32)
distance_feature[:distance] = 1.0
return distance_feature
def construct_ring_feature_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
n_atom = mol.GetNumAtoms()
sssr = Chem.GetSymmSSSR(mol)
ring_feature_vec = numpy.zeros(
(num_max_atoms ** 2, 1,), dtype=numpy.float32)
for ring in sssr:
ring = list(ring)
n_atom_in_ring = len(ring)
for i in range(n_atom_in_ring):
for j in range(n_atom_in_ring):
a0 = ring[i]
a1 = ring[j]
ring_feature_vec[a0 * n_atom + a1] = 1
return ring_feature_vec
def construct_pair_feature(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
"""construct pair feature
Args:
mol (Mol): mol instance
num_max_atoms (int): number of max atoms
Returns (numpy.ndarray): 2 dimensional array. First axis size is
`num_max_atoms` ** 2, representing index of each atom pair.
Second axis for feature.
"""
n_atom = mol.GetNumAtoms()
distance_matrix = Chem.GetDistanceMatrix(mol)
distance_feature = numpy.zeros((num_max_atoms ** 2, MAX_DISTANCE,),
dtype=numpy.float32)
for i in range(n_atom):
for j in range(n_atom):
distance_feature[i * n_atom + j] = construct_distance_vec(
distance_matrix, i, j)
bond_feature = numpy.zeros((num_max_atoms ** 2, 4,), dtype=numpy.float32)
for i in range(n_atom):
for j in range(n_atom):
bond_feature[i * n_atom + j] = construct_bond_vec(mol, i, j)
ring_feature = construct_ring_feature_vec(mol, num_max_atoms=num_max_atoms)
feature = numpy.hstack((distance_feature, bond_feature, ring_feature))
return feature
class WeaveNetPreprocessor(MolPreprocessor):
"""WeaveNetPreprocessor
WeaveNet must have fixed-size atom list for now, zero_padding option
is always set to True.
Args:
max_atoms (int): Max number of atoms for each molecule, if the
number of atoms is more than this value, this data is simply
ignored.
Setting negative value indicates no limit for max atoms.
add_Hs (bool): If True, implicit Hs are added.
use_fixed_atom_feature (bool):
If True, atom feature is extracted used in original paper.
If it is False, atomic number is used instead.
atom_list (list): list of atoms to extract feature. If None, default
`ATOM` is used as `atom_list`
include_unknown_atom (bool): If False, when the `mol` includes atom
which is not in `atom_list`, it will raise
`MolFeatureExtractionError`.
If True, even the atom is not in `atom_list`, `atom_type` is set
as "unknown" atom.
kekulize (bool): If True, Kekulizes the molecule.
"""
def __init__(self, max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS, add_Hs=True,
use_fixed_atom_feature=False, atom_list=None,
include_unknown_atom=False, kekulize=False):
super(WeaveNetPreprocessor, self).__init__(
add_Hs=add_Hs, kekulize=kekulize)
zero_padding = True
if zero_padding and max_atoms <= 0:
raise ValueError('max_atoms must be set to positive value when '
'zero_padding is True')
self.max_atoms = max_atoms
self.add_Hs = add_Hs
self.zero_padding = zero_padding
self.use_fixed_atom_feature = use_fixed_atom_feature
self.atom_list = atom_list
self.include_unknown_atom = include_unknown_atom
def get_input_features(self, mol):
"""get input features for WeaveNet
WeaveNetPreprocessor automatically add `H` to `mol`
Args:
mol (Mol):
"""
type_check_num_atoms(mol, self.max_atoms)
if self.use_fixed_atom_feature:
# original paper feature extraction
atom_array = construct_atom_feature(mol, self.add_Hs,
self.max_atoms, self.atom_list,
self.include_unknown_atom)
else:
# embed id of atomic numbers
atom_array = construct_atomic_number_array(mol, self.max_atoms)
pair_feature = construct_pair_feature(mol,
num_max_atoms=self.max_atoms)
return atom_array, pair_feature
|
the-stack_0_3536 | """
ApiGateway for CloudWedge
Provides implementation details for apigateway service. It follows contract
outlined in cloudwedge.models.AWSService
"""
from os import environ
import boto3
import jmespath
from typing import List, Any, Dict, Optional
from cloudwedge.utils.logger import get_logger
from cloudwedge.utils.tags import TagsApi
from cloudwedge.models import AWSService, AWSResource
REGION = environ.get('REGION')
LOGGER = get_logger("cloudwedge.apigateway")
# Model for Service, extending AWSResource
class ApiGatewayResource(AWSResource):
pass
# Class for Service
class ApiGatewayService(AWSService):
# Name of the service, must be unique
name = "apigateway"
# Cloudwatch alarm service specific values
cloudwatch_namespace = "AWS/ApiGateway"
cloudwatch_dashboard_section_title = "Api Gateway"
cloudwatch_dimension = "EnvironmentName"
# Default metric to be used when metrics are not explicit in tags
default_metrics = ["Latency",
"IntegrationLatency", "5XXError", "4XXError"]
# Alarm defaults for the service, applied if metric default doesnt exist
default_alarm_props = {
'Statistic': "Sum"
}
# List of supported metrics and default configurations
supported_metrics = {
'Latency' :{},
'IntegrationLatency' :{},
'5XXError' :{},
'4XXError' :{}
}
# There are dashboard additions that can be added at the metric level
override_dashboard_metric_properties = {}
@staticmethod
def build_dashboard_widgets(resources: List[ApiGatewayResource]) -> List[Any]:
"""
Build dashboard widgets for the resources
"""
# Get widgets with base method (like calling super)
return AWSService.build_dashboard_widgets(ApiGatewayService, resources)
@ staticmethod
def get_resources(session: boto3.session.Session) -> List[ApiGatewayResource]:
"""
Return all AWS ApiGateway resources within scope, based on the tags
"""
try:
# Get things in a neat apigateway resource object
cleaned_resources: List[ApiGatewayResource] = []
# Get paginator for service
paginator = session.client('apigateway').get_paginator(
'get_rest_apis').paginate()
# Collect all resources
for page_resources in paginator:
for rest_api in page_resources['items']:
rest_api_tags = rest_api.get('tags', {})
# Api gateway returns tag as key value dict, convert it to standard format
# e.g. {'STAGE': 'prod', 'cloudwedge:active': 'true'}
converted_tags = TagsApi.convert_dict_to_tags(rest_api_tags)
# If the active monitoring tag is on the instance, include in resource collection
# Stripping key so no whitespace mismatch
if any((tag['Key'].strip() == AWSService.TAG_ACTIVE and tag['Value'] == 'true') for tag in converted_tags):
# This resource has opted in to cloudwedge
# Get values from tags if they exist
owner_from_tag = TagsApi.get_owner_from_tags(converted_tags)
name_from_tag = TagsApi.get_name_from_tags(converted_tags)
rest_api_name = rest_api['name']
# Setup ApiGateway values
service = ApiGatewayService.name
resource_name = name_from_tag or rest_api_name
resource_id = rest_api_name
resource_owner = owner_from_tag
tags = converted_tags
# Create ApiGateway
clean_resource = ApiGatewayResource(
service=service,
name=resource_name,
uniqueId=resource_id,
cloudwatchDimensionId=resource_id,
owner=resource_owner,
tags=tags
)
# Add to collection
cleaned_resources.append(clean_resource)
return cleaned_resources
except Exception as err:
LOGGER.info(
f"Failed to get resources information with error: {err}")
raise err
|
the-stack_0_3538 | import sys
import os
import numpy as np
inDir = sys.argv[1]
print(inDir)
ratesInWins = {}
for fileName in os.listdir(inDir):
if fileName.endswith(".txt"):
print(fileName)
with open(inDir + "/" + fileName, "rt") as f:
sys.stderr.write("reading {}/{}\n".format(inDir, fileName))
first = True
for line in f:
if first:
first = False
else:
chrom, winMid, recRate = line.strip().split()[:3]
if chrom not in ratesInWins:
ratesInWins[chrom] = []
winMid = int(winMid)
recRate = float(recRate)
ratesInWins[chrom].append((winMid, recRate))
def getWinLenForChrom(ratesInWinsForChrom):
prevWin = ratesInWinsForChrom[0][0]
winLens = {}
for win, recRates in ratesInWinsForChrom[1:]:
winLen = win-prevWin
if winLen in winLens:
winLens[winLen] += 1
else:
winLens[winLen] = 1
prevWin = win
if len(winLens) != 1:
sys.stderr.write("window lengths not consistent within chrom arms!! ARRGHHHH!\n")
winLens = sorted(winLens.keys(), key=lambda x: winLens[x])
return winLens[-1]
def getWinLens(ratesInWins):
winLens = {}
for chrom in ratesInWins:
winLens[chrom] = getWinLenForChrom(ratesInWins[chrom])
return winLens
winLens = getWinLens(ratesInWins)
allRates = []
for chrom in ratesInWins:
for win, recRate in ratesInWins[chrom]:
allRates.append(recRate)
allRates.sort()
lenCutoff = 1/np.mean(allRates) * 1e6
rateCutoff = allRates[int(len(allRates)*0.05)]
sys.stderr.write("rate cutoff: {}; length cutoff: {}\n".format(rateCutoff, lenCutoff))
for chrom in ratesInWins:
halfWinLen = int(winLens[chrom]/2)
mode = 0
runLen = 0
runStart = 1
for winMid, recRate in ratesInWins[chrom]:
winStart = winMid - halfWinLen
winEnd = winMid + halfWinLen
if mode == 1:
if recRate <= rateCutoff:
mode = 0
runLen = 1
runStart = winStart
else:
pass
elif mode == 0:
if recRate <= rateCutoff:
runLen += 1
else:
if winStart-runStart >= lenCutoff:
print(chrom, runStart, winStart, winStart-runStart, runLen)
mode = 1
if mode == 0:
if winEnd-runStart >= lenCutoff:
print(chrom, runStart, winEnd, winEnd-runStart, runLen)
|
the-stack_0_3539 | import json
import random
from hashlib import md5
import pytz
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.db.models import Q
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import get_language, gettext_lazy as _, override
from rest_framework.authtoken.models import Token
from pretalx.common.urls import build_absolute_uri
class UserManager(BaseUserManager):
"""The user manager class."""
def create_user(self, password: str = None, **kwargs):
user = self.model(**kwargs)
user.set_password(password)
user.save()
return user
def create_superuser(self, password: str, **kwargs):
user = self.create_user(password=password, **kwargs)
user.is_staff = True
user.is_administrator = True
user.is_superuser = False
user.save(update_fields=['is_staff', 'is_administrator', 'is_superuser'])
return user
def assign_code(obj, length=6):
# This omits some character pairs completely because they are hard to read even on screens (1/I and O/0)
# and includes only one of two characters for some pairs because they are sometimes hard to distinguish in
# handwriting (2/Z, 4/A, 5/S, 6/G).
while True:
code = get_random_string(length=length, allowed_chars=User.CODE_CHARSET)
if not User.objects.filter(code__iexact=code).exists():
obj.code = code
return code
class User(PermissionsMixin, AbstractBaseUser):
"""The pretalx user model.
Users describe all kinds of persons who interact with pretalx: Organisers, reviewers, submitters, speakers.
:param code: A user's alphanumeric code is autogenerated, may not be
changed, and is the unique identifier of that user.
:param name: A name fit for public display. Will be used in the user
interface and for public display for all speakers in all of their
events.
:param password: The password is stored using Django's PasswordField. Use
the ``set_password`` and ``check_password`` methods to interact with it.
:param nick: The nickname field has been deprecated and is scheduled to be
deleted. Use the email field instead.
:param groups: Django internals, not used in pretalx.
:param user_permissions: Django internals, not used in pretalx.
"""
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'email'
CODE_CHARSET = list('ABCDEFGHJKLMNPQRSTUVWXYZ3789')
objects = UserManager()
code = models.CharField(max_length=16, unique=True, null=True)
nick = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(
max_length=120,
verbose_name=_('Name'),
help_text=_('Please enter the name you wish to be displayed publicly. This name will be used for all events you are participating in on this server.'),
)
email = models.EmailField(
unique=True,
verbose_name=_('E-Mail'),
help_text=_(
'Your email address will be used for password resets and notification about your event/submissions.'
),
)
is_active = models.BooleanField(default=True, help_text='Inactive users are not allowed to log in.')
is_staff = models.BooleanField(default=False, help_text='A default Django flag. Not in use in pretalx.')
is_administrator = models.BooleanField(default=False, help_text='Should only be ``True`` for people with administrative access to the server pretalx runs on.')
is_superuser = models.BooleanField(default=False, help_text='Never set this flag to ``True``, since it short-circuits all authorization mechanisms.')
locale = models.CharField(
max_length=32,
default=settings.LANGUAGE_CODE,
choices=settings.LANGUAGES,
verbose_name=_('Preferred language'),
)
timezone = models.CharField(
choices=[(tz, tz) for tz in pytz.common_timezones], max_length=30, default='UTC'
)
avatar = models.ImageField(
null=True,
blank=True,
verbose_name=_('Profile picture'),
help_text=_('If possible, upload an image that is least 120 pixels wide.'),
)
get_gravatar = models.BooleanField(
default=False,
verbose_name=_('Retrieve profile picture via gravatar'),
help_text=_(
'If you have registered with an email address that has a gravatar account, we can retrieve your profile picture from there.'
),
)
pw_reset_token = models.CharField(null=True, max_length=160, verbose_name='Password reset token')
pw_reset_time = models.DateTimeField(null=True, verbose_name='Password reset time')
def __str__(self) -> str:
"""For public consumption as it is used for Select widgets, e.g. on the feedback form."""
return self.name or str(_('Unnamed user'))
def get_display_name(self) -> str:
"""Returns a user's name or 'Unnamed user'."""
return self.name if self.name else str(_('Unnamed user'))
def save(self, *args, **kwargs):
self.email = self.email.lower().strip()
if not self.code:
assign_code(self)
return super().save(args, kwargs)
def event_profile(self, event):
"""Retrieve (and/or create) the event.
:class:`~pretalx.person.models.profile.SpeakerProfile` for this user.
:type event: :class:`pretalx.event.models.event.Event`
:retval: :class:`pretalx.person.models.profile.EventProfile`
"""
from pretalx.person.models.profile import SpeakerProfile
profile = self.profiles.select_related('event').filter(event=event).first()
if profile:
return profile
profile = SpeakerProfile(event=event, user=self)
if self.pk:
profile.save()
return profile
def log_action(self, action: str, data: dict=None, person=None, orga: bool=False):
"""Create a log entry for this user.
:param action: The log action that took place.
:param data: Addition data to be saved.
:param person: The person modifying this user. Defaults to this user.
:type person: :class:`~pretalx.person.models.user.User`
:param orga: Was this action initiated by a privileged user?
"""
from pretalx.common.models import ActivityLog
if data:
data = json.dumps(data)
ActivityLog.objects.create(
person=person or self,
content_object=self,
action_type=action,
data=data,
is_orga_action=orga,
)
def logged_actions(self):
"""Returns all log entries that were made about this user."""
from pretalx.common.models import ActivityLog
return ActivityLog.objects.filter(
content_type=ContentType.objects.get_for_model(type(self)),
object_id=self.pk,
)
def own_actions(self):
"""Returns all log entries that were made by this user."""
from pretalx.common.models import ActivityLog
return ActivityLog.objects.filter(person=self)
def deactivate(self):
"""Delete the user by unsetting all of their information."""
from pretalx.submission.models import Answer
self.email = f'deleted_user_{random.randint(0, 999)}@localhost'
while self.__class__.objects.filter(email__iexact=self.email).exists():
self.email = f'deleted_user_{random.randint(0, 999)}'
self.name = 'Deleted User'
self.is_active = False
self.is_superuser = False
self.is_administrator = False
self.locale = 'en'
self.timezone = 'UTC'
self.pw_reset_token = None
self.pw_reset_time = None
self.save()
self.profiles.all().update(biography='')
Answer.objects.filter(
person=self, question__contains_personal_data=True
).delete()
for team in self.teams.all():
team.members.remove(self)
@cached_property
def gravatar_parameter(self) -> str:
return md5(self.email.strip().encode()).hexdigest()
@cached_property
def has_avatar(self) -> bool:
return self.get_gravatar or self.has_local_avatar
@cached_property
def has_local_avatar(self) -> bool:
return self.avatar and self.avatar != 'False'
def get_events_with_any_permission(self):
"""Returns a queryset of events for which this user has any type of
permission."""
from pretalx.event.models import Event
if self.is_administrator:
return Event.objects.all()
return Event.objects.filter(
Q(
organiser_id__in=self.teams.filter(all_events=True).values_list(
'organiser', flat=True
)
)
| Q(id__in=self.teams.values_list('limit_events__id', flat=True))
)
def get_events_for_permission(self, **kwargs):
"""Returns a queryset of events for which this user as all of the given
permissions.
Permissions are given as named arguments, e.g.
``get_events_for_permission(is_reviewer=True)``.
"""
from pretalx.event.models import Event
if self.is_administrator:
return Event.objects.all()
orga_teams = self.teams.filter(**kwargs)
absolute = orga_teams.filter(all_events=True).values_list(
'organiser', flat=True
)
relative = orga_teams.filter(all_events=False).values_list(
'limit_events', flat=True
)
return Event.objects.filter(
models.Q(organiser__in=absolute) | models.Q(pk__in=relative)
).distinct()
def get_permissions_for_event(self, event) -> set:
"""Returns a set of all permission a user has for the given event.
:type event: :class:`~pretalx.event.models.event.Event`
"""
if self.is_administrator:
return {
'can_create_events',
'can_change_teams',
'can_change_organiser_settings',
'can_change_event_settings',
'can_change_submissions',
'is_reviewer',
}
teams = event.teams.filter(members__in=[self])
if not teams:
return set()
return set().union(*[team.permission_set for team in teams])
def remaining_override_votes(self, event) -> int:
"""Returns the amount of override votes a user may still give in
reviews in the given event.
:type event: :class:`~pretalx.event.models.event.Event`
"""
allowed = max(
event.teams.filter(members__in=[self], is_reviewer=True).values_list(
'review_override_votes', flat=True
)
or [0]
)
overridden = self.reviews.filter(
submission__event=event, override_vote__isnull=False
).count()
return max(allowed - overridden, 0)
def regenerate_token(self) -> Token:
"""Generates a new API access token, deleting the old one."""
self.log_action(action='pretalx.user.token.reset')
Token.objects.filter(user=self).delete()
return Token.objects.create(user=self)
@transaction.atomic
def reset_password(self, event, user=None):
from pretalx.mail.models import QueuedMail
self.pw_reset_token = get_random_string(32)
self.pw_reset_time = now()
self.save()
context = {
'name': self.name or '',
'url': build_absolute_uri(
'orga:auth.recover', kwargs={'token': self.pw_reset_token}
),
}
mail_text = _(
'''Hi {name},
you have requested a new password for your pretalx account.
To reset your password, click on the following link:
{url}
If this wasn\'t you, you can just ignore this email.
All the best,
the pretalx robot'''
)
with override(get_language()):
mail = QueuedMail.objects.create(
subject=_('Password recovery'),
text=str(mail_text).format(**context),
)
mail.to_users.add(self)
mail.send()
self.log_action(
action='pretalx.user.password.reset', person=user, orga=bool(user)
)
|
the-stack_0_3540 | # -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage, (C) 2016 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
# my-objectname are dummy values, please replace them with original values.
import time
from datetime import datetime
from minio import Minio, CopyConditions
from minio.error import ResponseError
client = Minio('s3.amazonaws.com',
access_key='YOUR-ACCESSKEY',
secret_key='YOUR-SECRETKEY')
# client.trace_on(sys.stderr)
copy_conditions = CopyConditions()
# Set modified condition, copy object modified since 2014 April.
t = (2014, 4, 0, 0, 0, 0, 0, 0, 0)
mod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_modified_since(mod_since)
# Set unmodified condition, copy object unmodified since 2014 April.
# copy_conditions.set_unmodified_since(mod_since)
# Set matching ETag condition, copy object which matches the following ETag.
# copy_conditions.set_match_etag("31624deb84149d2f8ef9c385918b653a")
# Set matching ETag except condition, copy object which does not match the
# following ETag.
# copy_conditions.set_match_etag_except("31624deb84149d2f8ef9c385918b653a")
try:
copy_result = client.copy_object("my-bucket", "my-object",
"/my-sourcebucket/my-sourceobject",
copy_conditions)
print(copy_result)
except ResponseError as err:
print(err)
|
the-stack_0_3541 | """
Space : O(1)
Time : O(log n)
"""
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
low = 1
high = n
while(low <= high):
mid = (low + high) // 2
if not isBadVersion(mid):
if low != mid:
low = mid
else:
low = mid + 1
elif isBadVersion(mid):
if not isBadVersion(mid-1) and isBadVersion(mid):
return mid
else:
high = mid
|
the-stack_0_3542 | """PIL/Tkinter based simulator for InkyWHAT and InkyWHAT."""
import numpy
from . import inky
from . import inky_uc8159
class InkyMock(inky.Inky):
"""Base simulator class for Inky."""
def __init__(self, colour, h_flip=False, v_flip=False):
"""Initialise an Inky pHAT Display.
:param colour: one of red, black or yellow, default: black
"""
global tkinter, ImageTk, Image
try:
import tkinter
except ImportError:
raise ImportError('Simulation requires tkinter')
try:
from PIL import ImageTk, Image
except ImportError:
raise ImportError('Simulation requires PIL ImageTk and Image')
resolution = (self.WIDTH, self.HEIGHT)
if resolution not in inky._RESOLUTION.keys():
raise ValueError('Resolution {}x{} not supported!'.format(*resolution))
self.resolution = resolution
self.width, self.height = resolution
self.cols, self.rows, self.rotation = inky._RESOLUTION[resolution]
self.buf = numpy.zeros((self.height, self.width), dtype=numpy.uint8)
if colour not in ('red', 'black', 'yellow', 'multi'):
raise ValueError('Colour {} is not supported!'.format(colour))
self.colour = colour
self.h_flip = h_flip
self.v_flip = v_flip
impression_palette = [57, 48, 57, # black
255, 255, 255, # white
58, 91, 70, # green
61, 59, 94, # blue
156, 72, 75, # red
208, 190, 71, # yellow
177, 106, 73, # orange
255, 255, 255] # clear
bw_inky_palette = [255, 255, 255, # 0 = white
0, 0, 0] # 1 = black
red_inky_palette = [255, 255, 255, # 0 = white
0, 0, 0, # 1 = black
255, 0, 0] # index 2 is red
ylw_inky_palette = [255, 255, 255, # 0 = white
0, 0, 0, # 1 = black
223, 204, 16] # index 2 is yellow
# yellow color value: screen capture from
# https://www.thoughtsmakethings.com/Pimoroni-Inky-pHAT
self.c_palette = {'black': bw_inky_palette,
'red': red_inky_palette,
'yellow': ylw_inky_palette,
'multi': impression_palette}
self._tk_done = False
self.tk_root = tkinter.Tk()
self.tk_root.title('Inky Preview')
self.tk_root.geometry('{}x{}'.format(self.WIDTH, self.HEIGHT))
self.tk_root.aspect(self.WIDTH, self.HEIGHT, self.WIDTH, self.HEIGHT)
self.tk_root.protocol('WM_DELETE_WINDOW', self._close_window)
self.cv = None
self.cvh = self.HEIGHT
self.cvw = self.WIDTH
def wait_for_window_close(self):
"""Wait until the Tkinter window has closed."""
while not self._tk_done:
self.tk_root.update_idletasks()
self.tk_root.update()
def _close_window(self):
self._tk_done = True
self.tk_root.destroy()
def resize(self, event):
"""Resize background image to window size."""
# adapted from:
# https://stackoverflow.com/questions/24061099/tkinter-resize-background-image-to-window-size
# https://stackoverflow.com/questions/19838972/how-to-update-an-image-on-a-canvas
self.cvw = event.width
self.cvh = event.height
self.cv.config(width=self.cvw, height=self.cvh)
image = self.disp_img_copy.resize([self.cvw, self.cvh])
self.photo = ImageTk.PhotoImage(image)
self.cv.itemconfig(self.cvhandle, image=self.photo, anchor='nw')
self.tk_root.update()
def _send_command(self, command, data=None):
pass
def _simulate(self, region):
pass
def _display(self, region):
im = Image.fromarray(region, 'P')
im.putpalette(self.c_palette[self.colour])
self.disp_img_copy = im.copy() # can be changed due to window resizing, so copy
image = self.disp_img_copy.resize([self.cvw, self.cvh])
self.photo = ImageTk.PhotoImage(image)
if self.cv is None:
self.cv = tkinter.Canvas(self.tk_root, width=self.WIDTH, height=self.HEIGHT)
self.cv.pack(side='top', fill='both', expand='yes')
self.cvhandle = self.cv.create_image(0, 0, image=self.photo, anchor='nw')
self.cv.bind('<Configure>', self.resize)
self.tk_root.update()
def show(self, busy_wait=True):
"""Show buffer on display.
:param busy_wait: Ignored. Updates are simulated and instant.
"""
print('>> Simulating {} {}x{}...'.format(self.colour, self.WIDTH, self.HEIGHT))
region = self.buf
if self.v_flip:
region = numpy.fliplr(region)
if self.h_flip:
region = numpy.flipud(region)
if self.rotation:
region = numpy.rot90(region, self.rotation // 90)
self._simulate(region)
class InkyMockPHAT(InkyMock):
"""Inky PHAT (212x104) e-Ink Display Simulator."""
WIDTH = 212
HEIGHT = 104
WHITE = 0
BLACK = 1
RED = 2
YELLOW = 2
def _simulate(self, region):
region = numpy.rot90(region, self.rotation // 90)
region = numpy.flipud(region) # spec: phat rotated -90
region = numpy.fliplr(region) # spec: phat rotated -90
self._display(region)
class InkyMockPHATSSD1608(InkyMock):
"""Inky PHAT SSD1608 (250x122) e-Ink Display Simulator."""
WIDTH = 250
HEIGHT = 122
WHITE = 0
BLACK = 1
RED = 2
YELLOW = 2
def _simulate(self, region):
region = numpy.rot90(region, self.rotation // 90)
region = numpy.flipud(region) # spec: phat rotated -90
region = numpy.fliplr(region) # spec: phat rotated -90
self._display(region)
class InkyMockWHAT(InkyMock):
"""Inky wHAT e-Ink Display Simulator."""
WIDTH = 400
HEIGHT = 300
WHITE = 0
BLACK = 1
RED = 2
YELLOW = 2
def _simulate(self, region):
region = numpy.rot90(region, self.rotation // 90)
region = region.reshape(300, 400) # for display
self._display(region)
class InkyMockImpression(InkyMock):
"""Inky Impression e-Ink Display Simulator."""
BLACK = 0
WHITE = 1
GREEN = 2
BLUE = 3
RED = 4
YELLOW = 5
ORANGE = 6
CLEAN = 7
WIDTH = 600
HEIGHT = 448
def __init__(self):
"""Initialize a new mock Inky Impression."""
InkyMock.__init__(self, 'multi')
def _simulate(self, region):
self._display(region)
def set_pixel(self, x, y, v):
"""Set a single pixel on the display."""
self.buf[y][x] = v & 0xf
def set_image(self, image, saturation=0.5):
"""Copy an image to the display.
:param image: PIL image to copy, must be 600x448
:param saturation: Saturation for quantization palette - higher value results in a more saturated image
"""
if not image.size == (self.width, self.height):
raise ValueError("Image must be ({}x{}) pixels!".format(self.width, self.height))
if not image.mode == "P":
if Image is None:
raise RuntimeError("PIL is required for converting images: sudo apt install python-pil python3-pil")
palette = inky_uc8159.Inky._palette_blend(self, saturation)
# Image size doesn't matter since it's just the palette we're using
palette_image = Image.new("P", (1, 1))
# Set our 7 colour palette (+ clear) and zero out the other 247 colours
palette_image.putpalette(palette + [0, 0, 0] * 248)
# Force source image data to be loaded for `.im` to work
image.load()
image = image.im.convert("P", True, palette_image.im)
self.buf = numpy.array(image, dtype=numpy.uint8).reshape((self.rows, self.cols))
|
the-stack_0_3543 | # Handler for ulno-iot devkit1
import config
if config.display:
import ulno_iot_display as dp
if config.devel:
import ulno_iot_devel as dv
if config.ht:
import ulno_iot_ht as ht
import gc
gc.collect()
import wifi
import machine
from machine import Pin
import time
import ubinascii
from umqtt.simple import MQTTClient
gc.collect()
# make unique
config.mqtt_client_id += b"_" + ubinascii.hexlify(machine.unique_id())
blue_state_topic = config.mqtt_topic + b"/blue"
blue_command_topic = blue_state_topic + b"/set"
if config.devel:
red_state_topic = config.mqtt_topic + b"/red"
red_command_topic = red_state_topic + b"/set"
yellow_state_topic = config.mqtt_topic + b"/yellow"
yellow_command_topic = yellow_state_topic + b"/set"
left_state_topic = config.mqtt_topic + b"/left"
right_state_topic = config.mqtt_topic + b"/right"
bottom_state_topic = config.mqtt_topic + b"/bottom"
if config.ht:
temperature_state_topic = config.mqtt_topic + b"/temperature"
humidity_state_topic = config.mqtt_topic + b"/humidity"
if config.display:
text_command_topic = config.mqtt_topic + b"/text"
OVERFLOW = 1000
onoff = [b'off', b'on']
def publish_status():
global client
try:
client.publish(blue_state_topic, onoff[dv.blue() ^ 1])
if config.devel:
client.publish(red_state_topic, onoff[dv.red()])
client.publish(yellow_state_topic, onoff[dv.yellow()])
client.publish(left_state_topic, onoff[dv.left_button() ^ 1])
client.publish(right_state_topic, onoff[dv.right_button() ^ 1])
client.publish(bottom_state_topic, onoff[dv.lower_button() ^ 1])
if config.ht:
client.publish(temperature_state_topic, str(ht.temperature()).encode())
client.publish(humidity_state_topic, str(ht.humidity()).encode())
print('Published status.')
except:
print('Trouble publishing.')
init_client()
def callback(topic, msg):
if config.devel and topic == red_command_topic:
print("Received red in callback:", msg)
msg = msg.decode().lower()
if msg.startswith('on'):
dv.red.high()
elif msg.startswith('off'):
dv.red.low()
elif config.devel and topic == yellow_command_topic:
print("Received yellow in callback:", msg)
msg = msg.decode().lower()
if msg.startswith('on'):
dv.yellow.high()
elif msg.startswith('off'):
dv.yellow.low()
elif config.devel and topic == blue_command_topic:
msg = msg.decode().lower()
if msg.startswith('on'):
dv.blue.low()
elif msg.startswith('off'):
dv.blue.high()
elif config.display and topic == text_command_topic:
print("Received text in callback:", msg)
try:
msg = msg.decode()
if msg == "&&clear":
dp.clear()
else:
dp.println(msg)
except:
pass
def init_client():
global client
print("Trying to connect to mqtt broker.")
wifi.connect()
try:
client = MQTTClient(config.mqtt_client_id, config.mqtt_broker, user=config.mqtt_user,
password=config.mqtt_password)
client.set_callback(callback)
client.connect()
print("Connected to {}".format(config.mqtt_broker))
t = config.mqtt_topic + b"/#"
client.subscribe(t)
print("Subscribed to %s topic" % t)
except:
print("Trouble to init mqtt.")
def receive_sub():
global client
try:
client.check_msg() # non blocking
except:
print("Trouble to receive from mqtt.")
def run():
init_client()
counter = 0
last_left = dv.left_button()
last_right = dv.right_button()
last_lower = dv.lower_button()
while True:
if counter % 10 == 0: # every 10th of second
receive_sub()
if last_left != dv.left_button() \
or last_right != dv.right_button() \
or last_lower != dv.lower_button():
last_left = dv.left_button()
last_right = dv.right_button()
last_lower = dv.lower_button()
publish_status()
if counter % 500 == 0: # every 5s
publish_status()
time.sleep(0.01)
counter += 1
if counter >= OVERFLOW:
counter = 0
|
the-stack_0_3544 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds useful functions for working with dictionaries representing policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
from typing import Dict, List, Tuple, Text
def get_best_response_actions_as_string(
best_response_actions: Dict[bytes, int]) -> Text:
"""Turns a dict<bytes, int> into a bytestring compatible with C++.
i.e. the bytestring can be copy-pasted as the brace initialization for a
{std::unordered_,std::,absl::flat_hash_}map<std::string, int>.
Args:
best_response_actions: A dict mapping bytes to ints.
Returns:
A bytestring that can be copy-pasted to brace-initialize a C++
std::map<std::string, T>.
"""
best_response_keys = sorted(best_response_actions.keys())
best_response_strings = [
"%s: %i" % (k, best_response_actions[k]) for k in best_response_keys
]
return "{%s}" % (", ".join(best_response_strings))
def tabular_policy_to_cpp_map(
policy: Dict[bytes, List[Tuple[int, np.float64]]]) -> Text:
"""Turns a policy into a C++ compatible bytestring for brace-initializing.
Args:
policy: A dict representing a tabular policy. The keys are infostate
bytestrings.
Returns:
A bytestring that can be copy-pasted to brace-initialize a C++
std::map<std::string, open_spiel::ActionsAndProbs>.
"""
cpp_entries = []
policy_keys = sorted(policy.keys())
for key in policy_keys:
tuple_strs = ["{%i, %s}" % (p[0], p[1].astype(str)) for p in policy[key]]
value = "{" + ", ".join(tuple_strs) + "}"
cpp_entries.append('{"%s", %s}' % (key, value))
return "{%s}" % (",\n".join(cpp_entries))
|
the-stack_0_3545 | #database.py creates a .db file for performing umls searches.
import sqlite3
import os
import sys
import os
import atexit
features_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if features_dir not in sys.path:
sys.path.append(features_dir)
# find where umls tables are located
from read_config import enabled_modules
enabled = enabled_modules()
umls_tables = enabled['UMLS']
# set to True when create_db() is succesful
success = False
db_path = None
conn = None
MRSTY_TABLE_FILE = None
MRCON_TABLE_FILE = None
MRREL_TABLE_FILE = None
LRABR_TABLE_FILE = None
# this ensure files are closed properly and umls.db is removed if not succesful
@atexit.register
def umls_db_cleanup():
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
if conn is not None:
conn.close()
if MRSTY_TABLE_FILE is not None:
MRSTY_TABLE_FILE.close()
if MRCON_TABLE_FILE is not None:
MRCON_TABLE_FILE.close()
if MRREL_TABLE_FILE is not None:
MRREL_TABLE_FILE.close()
if LRABR_TABLE_FILE is not None:
LRABR_TABLE_FILE.close()
if success is False:
# remove umls.db, it is junk now
if db_path is not None:
os.remove(db_path)
print(sys.stderr, '\n\tError: umls.db was not created succesfully.\n')
def create_db():
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
print ("\ncreating umls.db")
#connect to the .db file we are creating.
db_path = os.path.join(umls_tables, 'umls.db')
conn = sqlite3.connect( db_path )
conn.text_factory = str
print ("opening files")
#load data in files.
try:
mrsty_path = os.path.join(umls_tables, 'MRSTY.RRF')
MRSTY_TABLE_FILE = open( mrsty_path, "r" )
except IOError:
print ("\nNo file to use for creating MRSTY.RRF table\n")
sys.exit()
try:
mrcon_path = os.path.join(umls_tables, 'MRCONSO.RRF')
MRCON_TABLE_FILE = open( mrcon_path , "r" )
except IOError:
print ("\nNo file to use for creating MRCONSO.RRF table\n")
sys.exit()
try:
mrrel_path = os.path.join(umls_tables, 'MRREL.RRF')
MRREL_TABLE_FILE = open( mrrel_path , "r" )
except IOError:
print ("\nNo file to use for creating MRREL.RRF table\n")
sys.exit()
try:
lrabr_path = os.path.join(umls_tables, 'LRABR')
LRABR_TABLE_FILE = open( lrabr_path , "r" )
except IOError:
print ("\nNo file to use for creating LRABR table\n")
sys.exit()
print ("creating tables")
c = conn.cursor()
#create tables.
c.execute( "CREATE TABLE MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) ;" )
c.execute( "CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) ;" )
c.execute( "CREATE TABLE MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF );")
c.execute( "CREATE TABLE LRABR( EUI1, ABR, TYPE, EUI2, STR);")
print ("inserting data into MRSTY table")
for line in MRSTY_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 6
c.execute( "INSERT INTO MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) values( ?, ?, ?, ?, ?, ?)" , tuple(line))
print ("inserting data into MRCON table")
for line in MRCON_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 18
c.execute( "INSERT INTO MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);", tuple(line))
print ("inserting data into MRREL table")
for line in MRREL_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 16
c.execute( "INSERT INTO MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF ) values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" , tuple(line))
print ( "inserting into LRABR table")
for line in LRABR_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
line.pop()
assert len(line) == 5
c.execute( "INSERT INTO LRABR( EUI1, ABR, TYPE, EUI2, STR) values( ?, ?, ?, ?,?)" , tuple(line) )
print ( "creating indices")
#create indices for faster queries
c.execute( "CREATE INDEX mrsty_cui_map ON MRSTY(CUI)")
c.execute( "CREATE INDEX mrcon_str_map ON MRCON(STR)")
c.execute( "CREATE INDEX mrcon_cui_map ON MRCON(CUI)")
c.execute( "CREATE INDEX mrrel_cui2_map ON MRREL( CUI2 )" )
c.execute( "CREATE INDEX mrrel_cui1_map on MRREL( CUI1 ) " )
c.execute( "CREATE INDEX mrrel_rel_map on MRREL( REL )" )
c.execute( "CREATE INDEX lrabr_abr_map on LRABR(ABR)")
c.execute( "CREATE INDEX lrabr_str_map on LRABR(STR)")
#save changes to .db
conn.commit()
success = True
print ( "\nsqlite database created")
if __name__ == "__main__":
create_db()
|
the-stack_0_3547 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from jinja2 import Template
from flexget.plugins.parsers.parser_guessit import ParserGuessit
from flexget.plugins.parsers.parser_internal import ParserInternal
from flexget.utils.qualities import Quality
class TestQualityModule(object):
def test_get(self):
assert not Quality(), 'unknown quality is not false'
assert Quality('foobar') == Quality(), 'unknown not returned'
def test_common_name(self):
for test_val in ('720p', '1280x720'):
got_val = Quality(test_val).name
assert got_val == '720p', got_val
class TestQualityParser(object):
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True)
def parser(self, request):
if request.param == 'internal':
return ParserInternal
if request.param == 'guessit':
return ParserGuessit
@pytest.mark.parametrize("test_quality", [
('Test.File 1080p.web.vp9', '1080p webdl vp9', False),
('Test.File 1080p.web', '1080p webdl'),
('Test.File.2160p.web', '2160p webdl'),
('Test.File.1080.web-random', '1080p webdl'),
('Test.File.1080.webrandom', '1080p'),
('Test.File 1080p.web-dl', '1080p webdl'),
('Test.File.web-dl.1080p', '1080p webdl'),
('Test.File.WebHD.720p', '720p webdl'),
('Test.File.720p.bluray', '720p bluray'),
('Test.File.720hd.bluray', '720p bluray'),
('Test.File.1080p.bluray', '1080p bluray'),
('Test.File.2160p.bluray', '2160p bluray'),
('Test.File.1080p.cam', '1080p cam'),
('A Movie 2011 TS 576P XviD-DTRG', '576p ts xvid'),
('Test.File.720p.bluray.r5', '720p r5'),
('Test.File.1080p.bluray.rc', '1080p r5'),
# 10bit
('Test.File.480p.10bit', '480p 10bit'),
('Test.File.720p.10bit', '720p 10bit'),
('Test.File.720p.bluray.10bit', '720p bluray 10bit'),
('Test.File.1080p.10bit', '1080p 10bit'),
('Test.File.1080p.bluray.10bit', '1080p bluray 10bit'),
('Test.File.720p.web', '720p webdl'),
('Test.File.720p.webdl', '720p webdl'),
('Test.File.1280x720_web dl', '720p webdl'),
('Test.File.720p.h264.web.dl', '720p webdl h264'),
('Test.File.1080p.webhd.x264', '1080p webdl h264'),
('Test.File.480.hdtv.x265', '480p hdtv h265'),
('Test.File.web', 'webdl'),
('Test.File.web-dl', 'webdl'),
('Test.File.720P', '720p'),
('Test.File.1920x1080', '1080p'),
('Test.File.3840x2160', '2160p'),
('Test.File.1080i', '1080i'),
('Test File blurayrip', 'bluray'),
('Test.File.br-rip', 'bluray'),
('Test.File.720px', '720p'),
('Test.File.720p50', '720p'),
('Test.File.720p60', '720p'),
('Test.File.dvd.rip', 'dvdrip'),
('Test.File.dvd.rip.r5', 'r5'),
('Test.File.[576p][00112233].mkv', '576p'),
('Test.TS.FooBar', 'ts'),
('Test.File.360p.avi', '360p'),
('Test.File.[360p].mkv', '360p'),
('Test.File.368.avi', '368p'),
('Test.File.720p.hdtv.avi', '720p hdtv'),
('Test.File.1080p.hdtv.avi', '1080p hdtv'),
('Test.File.720p.preair.avi', '720p preair'),
# ('Test.File.ts.dvdrip.avi', 'ts'), This should no exists. Having Telesync and DVDRip is a non-sense.
('Test.File.HDTS.blah', 'ts'),
# ('Test.File.HDCAM.bluray.lie', 'cam'), This should no exists. Having Cam and Bluray is a non-sense.
# Test qualities as part of words. #1593
('Tsar.File.720p', '720p'),
('Camera.1080p', '1080p'),
# Some audio formats
('Test.File.DTSHDMA', 'dtshd'),
('Test.File.DTSHD.MA', 'dtshd'),
('Test.File.DTS.HDMA', 'dtshd'),
('Test.File.dts.hd.ma', 'dtshd'),
('Test.File.DTS.HD', 'dtshd'),
('Test.File.DTSHD', 'dtshd'),
('Test.File.DTS', 'dts'),
('Test.File.truehd', 'truehd'),
('Test.File.DTSHDMA', 'dtshd'),
('Test.File.DD2.0', 'dd5.1'),
('Test.File.AC35.1', 'ac3')
])
def test_quality_failures(self, parser, test_quality):
# Kind of a hack to get around the awful limitations of Guessit without creating extra tests
guessit = test_quality[2] if len(test_quality) > 2 else False
if not guessit and parser.__name__ == 'ParserGuessit':
return
quality = parser().parse_movie(test_quality[0]).quality
assert str(quality) == test_quality[1], ('`%s` quality should be `%s` not `%s`' % (
test_quality[0], test_quality[1], quality
))
class TestQualityInternalParser(object):
@pytest.mark.parametrize("test_quality", [
('Test.File.DD+5.1', 'dd+5.1'),
('Test.File.DDP5.1', 'dd+5.1'),
('Test.File.DDP7.1', 'dd+5.1'),
('Test.File.DD5.1', 'dd5.1'),
('Test.File.DD4.0', 'dd5.1'),
('Test.File.DD2.1', 'dd5.1'),
('Test.File.FLAC1.0', 'flac'),
])
def test_quality_failures(self, test_quality):
quality = ParserInternal().parse_movie(test_quality[0]).quality
assert str(quality) == test_quality[1], ('`%s` quality should be `%s` not `%s`' % (
test_quality[0], test_quality[1], quality
))
class TestFilterQuality(object):
_config = """
templates:
global:
parsing:
series: {{parser}}
movie: {{parser}}
mock:
- {title: 'Smoke.1280x720'}
- {title: 'Smoke.HDTV'}
- {title: 'Smoke.cam'}
- {title: 'Smoke.HR'}
accept_all: yes
tasks:
qual:
quality:
- hdtv
- 720p
min:
quality: HR+
max:
quality: "<=cam <HR"
min_max:
quality: HR-720i
"""
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'])
def config(self, request):
"""Override and parametrize default config fixture."""
return Template(self._config).render({'parser': request.param})
def test_quality(self, execute_task):
task = execute_task('qual')
entry = task.find_entry('rejected', title='Smoke.cam')
assert entry, 'Smoke.cam should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_min(self, execute_task):
task = execute_task('min')
entry = task.find_entry('rejected', title='Smoke.HDTV')
assert entry, 'Smoke.HDTV should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_max(self, execute_task):
task = execute_task('max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.cam')
assert entry, 'entry not found?'
assert entry in task.accepted, 'cam should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
def test_min_max(self, execute_task):
task = execute_task('min_max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.HR')
assert entry, 'entry not found?'
assert entry in task.accepted, 'HR should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
class TestQualityAudio(object):
config = """
tasks:
test_dd_audio_channels:
quality: "dd+5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD+7.1'}
- {title: 'My Show S01E05 720p HDTV DD+5.0'}
test_dd_audio_min:
quality: ">dd5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD5.1'}
- {title: 'My Show S01E05 720p HDTV DD+2.0'}
test_dd_audio_max:
quality: "<=dd5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD5.1'}
- {title: 'My Show S01E05 720p HDTV DD+5.1'}
- {title: 'My Show S01E05 720p HDTV DD+7.1'}
"""
def test_dd_audio_channels(self, execute_task):
task = execute_task('test_dd_audio_channels')
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+7.1')
assert entry, 'Entry "My Show S01E05 720p HDTV DD+7.1" should not have been rejected'
assert entry['quality'].audio == 'dd+5.1', 'audio "dd+7.1" should have been parsed as dd+5.1'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+5.0')
assert entry['quality'].audio == 'dd+5.1', 'audio "dd+5.0" should have been parsed as dd+5.1'
def test_dd_audio_min(self, execute_task):
task = execute_task('test_dd_audio_min')
assert len(task.rejected) == 1, 'should have rejected one'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+2.0')
assert entry, 'Entry "My Show S01E05 720p HDTV DD+2.0" should not have been rejected'
assert entry['quality'].audio == 'dd+5.1', 'audio should have been parsed as dd+5.1'
def test_dd_audio_max(self, execute_task):
task = execute_task('test_dd_audio_max')
assert len(task.rejected) == 2, 'should have rejected two'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD5.1')
assert entry, 'Entry "My Show S01E05 720p HDTV DD5.1" should not have been rejected'
assert entry['quality'].audio == 'dd5.1', 'audio should have been parsed as dd5.1'
|
the-stack_0_3548 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import torchtext.data as data
from ..common.torchtext_test_case import TorchtextTestCase
class TestDataset(TorchtextTestCase):
def test_tabular_simple_data(self):
for data_format in ["csv", "tsv", "json"]:
self.write_test_ppid_dataset(data_format=data_format)
if data_format == "json":
question_field = data.Field(sequential=True)
label_field = data.Field(sequential=False)
fields = {"question1": ("q1", question_field),
"question2": ("q2", question_field),
"label": ("label", label_field)}
else:
question_field = data.Field(sequential=True)
label_field = data.Field(sequential=False)
fields = [("id", None), ("q1", question_field),
("q2", question_field), ("label", label_field)]
dataset = data.TabularDataset(
path=self.test_ppid_dataset_path, format=data_format, fields=fields)
assert len(dataset) == 3
expected_examples = [
(["When", "do", "you", "use", "シ", "instead", "of", "し?"],
["When", "do", "you", "use", "\"&\"",
"instead", "of", "\"and\"?"], "0"),
(["Where", "was", "Lincoln", "born?"],
["Which", "location", "was", "Abraham", "Lincoln", "born?"], "1"),
(["What", "is", "2+2"], ["2+2=?"], "1")]
# Ensure examples have correct contents / test __getitem__
for i in range(len(dataset)):
self.assertEqual(dataset[i].q1, expected_examples[i][0])
self.assertEqual(dataset[i].q2, expected_examples[i][1])
self.assertEqual(dataset[i].label, expected_examples[i][2])
# Test __getattr__
for i, (q1, q2, label) in enumerate(zip(dataset.q1, dataset.q2,
dataset.label)):
self.assertEqual(q1, expected_examples[i][0])
self.assertEqual(q2, expected_examples[i][1])
self.assertEqual(label, expected_examples[i][2])
# Test __iter__
for i, example in enumerate(dataset):
self.assertEqual(example.q1, expected_examples[i][0])
self.assertEqual(example.q2, expected_examples[i][1])
self.assertEqual(example.label, expected_examples[i][2])
def test_json_dataset_one_key_multiple_fields(self):
self.write_test_ppid_dataset(data_format="json")
question_field = data.Field(sequential=True)
spacy_tok_question_field = data.Field(sequential=True, tokenize="spacy")
label_field = data.Field(sequential=False)
fields = {"question1": [("q1", question_field),
("q1_spacy", spacy_tok_question_field)],
"question2": [("q2", question_field),
("q2_spacy", spacy_tok_question_field)],
"label": ("label", label_field)}
dataset = data.TabularDataset(
path=self.test_ppid_dataset_path, format="json", fields=fields)
expected_examples = [
(["When", "do", "you", "use", "シ", "instead", "of", "し?"],
["When", "do", "you", "use", "シ", "instead", "of", "し", "?"],
["When", "do", "you", "use", "\"&\"",
"instead", "of", "\"and\"?"],
["When", "do", "you", "use", "\"", "&", "\"",
"instead", "of", "\"", "and", "\"", "?"], "0"),
(["Where", "was", "Lincoln", "born?"],
["Where", "was", "Lincoln", "born", "?"],
["Which", "location", "was", "Abraham", "Lincoln", "born?"],
["Which", "location", "was", "Abraham", "Lincoln", "born", "?"],
"1"),
(["What", "is", "2+2"], ["What", "is", "2", "+", "2"],
["2+2=?"], ["2", "+", "2=", "?"], "1")]
for i, example in enumerate(dataset):
self.assertEqual(example.q1, expected_examples[i][0])
self.assertEqual(example.q1_spacy, expected_examples[i][1])
self.assertEqual(example.q2, expected_examples[i][2])
self.assertEqual(example.q2_spacy, expected_examples[i][3])
self.assertEqual(example.label, expected_examples[i][4])
def test_errors(self):
# Ensure that trying to retrieve a key not in JSON data errors
self.write_test_ppid_dataset(data_format="json")
question_field = data.Field(sequential=True)
label_field = data.Field(sequential=False)
fields = {"qeustion1": ("q1", question_field),
"question2": ("q2", question_field),
"label": ("label", label_field)}
with self.assertRaises(ValueError):
data.TabularDataset(
path=self.test_ppid_dataset_path, format="json", fields=fields)
def test_input_with_newlines_in_text(self):
# Smoke test for ensuring that TabularDataset works with files with newlines
example_with_newlines = [("\"hello \n world\"", "1"),
("\"there is a \n newline\"", "0"),
("\"there is no newline\"", "1")]
fields = [("text", data.Field(lower=True)),
("label", data.Field(sequential=False))]
for delim in [",", "\t"]:
with open(self.test_newline_dataset_path, "wt") as f:
for line in example_with_newlines:
f.write("{}\n".format(delim.join(line)))
format_ = "csv" if delim == "," else "tsv"
dataset = data.TabularDataset(
path=self.test_newline_dataset_path, format=format_, fields=fields)
# if the newline is not parsed correctly, this should raise an error
for example in dataset:
self.assert_(hasattr(example, "text"))
self.assert_(hasattr(example, "label"))
def test_csv_file_with_header(self):
example_with_header = [("text", "label"),
("HELLO WORLD", "0"),
("goodbye world", "1")]
TEXT = data.Field(lower=True, tokenize=lambda x: x.split())
fields = {
"label": ("label", data.Field(use_vocab=False,
sequential=False)),
"text": ("text", TEXT)
}
for format_, delim in zip(["csv", "tsv"], [",", "\t"]):
with open(self.test_has_header_dataset_path, "wt") as f:
for line in example_with_header:
f.write("{}\n".format(delim.join(line)))
# check that an error is raised here if a non-existent field is specified
with self.assertRaises(ValueError):
data.TabularDataset(
path=self.test_has_header_dataset_path, format=format_,
fields={"non_existent": ("label", data.Field())})
dataset = data.TabularDataset(
path=self.test_has_header_dataset_path, format=format_,
skip_header=False, fields=fields)
TEXT.build_vocab(dataset)
for i, example in enumerate(dataset):
self.assertEqual(example.text,
example_with_header[i + 1][0].lower().split())
self.assertEqual(example.label, example_with_header[i + 1][1])
# check that the vocabulary is built correctly (#225)
expected_freqs = {"hello": 1, "world": 2, "goodbye": 1, "text": 0}
for k, v in expected_freqs.items():
self.assertEqual(TEXT.vocab.freqs[k], v)
data_iter = data.Iterator(dataset, device=-1, batch_size=1,
sort_within_batch=False, repeat=False)
next(data_iter.__iter__())
def test_csv_file_no_header_one_col_multiple_fields(self):
self.write_test_ppid_dataset(data_format="csv")
question_field = data.Field(sequential=True)
spacy_tok_question_field = data.Field(sequential=True, tokenize="spacy")
label_field = data.Field(sequential=False)
# Field name/value as nested tuples
fields = [("ids", None),
(("q1", "q1_spacy"), (question_field, spacy_tok_question_field)),
(("q2", "q2_spacy"), (question_field, spacy_tok_question_field)),
("label", label_field)]
dataset = data.TabularDataset(
path=self.test_ppid_dataset_path, format="csv", fields=fields)
expected_examples = [
(["When", "do", "you", "use", "シ", "instead", "of", "し?"],
["When", "do", "you", "use", "シ", "instead", "of", "し", "?"],
["When", "do", "you", "use", "\"&\"",
"instead", "of", "\"and\"?"],
["When", "do", "you", "use", "\"", "&", "\"",
"instead", "of", "\"", "and", "\"", "?"], "0"),
(["Where", "was", "Lincoln", "born?"],
["Where", "was", "Lincoln", "born", "?"],
["Which", "location", "was", "Abraham", "Lincoln", "born?"],
["Which", "location", "was", "Abraham", "Lincoln", "born", "?"],
"1"),
(["What", "is", "2+2"], ["What", "is", "2", "+", "2"],
["2+2=?"], ["2", "+", "2=", "?"], "1")]
for i, example in enumerate(dataset):
self.assertEqual(example.q1, expected_examples[i][0])
self.assertEqual(example.q1_spacy, expected_examples[i][1])
self.assertEqual(example.q2, expected_examples[i][2])
self.assertEqual(example.q2_spacy, expected_examples[i][3])
self.assertEqual(example.label, expected_examples[i][4])
# 6 Fields including None for ids
assert len(dataset.fields) == 6
|
the-stack_0_3549 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Copyright (c) 2020 The Supernode Coin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from io import BytesIO
from time import sleep
from test_framework.messages import CTransaction, CTxIn, CTxOut, COIN, COutPoint
from test_framework.mininode import network_thread_start
from test_framework.supernodecoin_node import SupernodeCoinTestNode
from test_framework.script import CScript, OP_CHECKSIG
from test_framework.test_framework import SupernodeCoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
p2p_port,
bytes_to_hex_str,
set_node_times,
sync_blocks,
sync_mempools,
)
# filter utxos based on first 5 bytes of scriptPubKey
def getDelegatedUtxos(utxos):
return [x for x in utxos if x["scriptPubKey"][:10] == '76a97b63d1']
class SupernodeCoin_ColdStakingTest(SupernodeCoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[]] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with PoW cache: 200 blocks
self._initialize_chain()
self.enable_mocktime()
def init_test(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
self.DEFAULT_FEE = 0.05
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(SupernodeCoinTestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start() # Start up network handling in another thread
# Let the test nodes get in sync
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack()
def setColdStakingEnforcement(self, fEnable=True):
sporkName = "SPORK_17_COLDSTAKING_ENFORCEMENT"
# update spork 17 with node[0]
if fEnable:
self.log.info("Enabling cold staking with SPORK 17...")
res = self.activate_spork(0, sporkName)
else:
self.log.info("Disabling cold staking with SPORK 17...")
res = self.deactivate_spork(0, sporkName)
assert_equal(res, "success")
sleep(1)
# check that node[1] receives it
assert_equal(fEnable, self.is_spork_active(1, sporkName))
self.log.info("done")
def isColdStakingEnforced(self):
# verify from node[1]
return self.is_spork_active(1, "SPORK_17_COLDSTAKING_ENFORCEMENT")
def run_test(self):
self.description = "Performs tests on the Cold Staking P2CS implementation"
self.init_test()
NUM_OF_INPUTS = 20
INPUT_VALUE = 249
# nodes[0] - coin-owner
# nodes[1] - cold-staker
# 1) nodes[0] and nodes[2] mine 25 blocks each
# --------------------------------------------
print("*** 1 ***")
self.log.info("Mining 50 Blocks...")
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pow(peer, self.mocktime)
sync_blocks(self.nodes)
# 2) node[1] sends his entire balance (50 mature rewards) to node[2]
# - node[2] stakes a block - node[1] locks the change
print("*** 2 ***")
self.log.info("Emptying node1 balance")
assert_equal(self.nodes[1].getbalance(), 50 * 250)
txid = self.nodes[1].sendtoaddress(self.nodes[2].getnewaddress(), (50 * 250 - 0.01))
assert (txid is not None)
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# lock the change output (so it's not used as stake input in generate_pos)
for x in self.nodes[1].listunspent():
assert (self.nodes[1].lockunspent(False, [{"txid": x['txid'], "vout": x['vout']}]))
# check that it cannot stake
sleep(1)
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
# 3) nodes[0] generates a owner address
# nodes[1] generates a cold-staking address.
# ---------------------------------------------
print("*** 3 ***")
owner_address = self.nodes[0].getnewaddress()
self.log.info("Owner Address: %s" % owner_address)
staker_address = self.nodes[1].getnewstakingaddress()
staker_privkey = self.nodes[1].dumpprivkey(staker_address)
self.log.info("Staking Address: %s" % staker_address)
# 4) Check enforcement.
# ---------------------
print("*** 4 ***")
# Check that SPORK 17 is disabled
assert (not self.isColdStakingEnforced())
self.log.info("Creating a stake-delegation tx before cold staking enforcement...")
assert_raises_rpc_error(-4, "Failed to accept tx in the memory pool (reason: cold-stake-inactive (code 16))\nTransaction canceled.",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, owner_address, False, False, True)
self.log.info("Good. Cold Staking NOT ACTIVE yet.")
# Enable SPORK
self.setColdStakingEnforcement()
# double check
assert (self.isColdStakingEnforced())
# 5) nodes[0] delegates a number of inputs for nodes[1] to stake em.
# ------------------------------------------------------------------
print("*** 5 ***")
self.log.info("First check warning when using external addresses...")
assert_raises_rpc_error(-5, "Only the owner of the key to owneraddress will be allowed to spend these coins",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT")
self.log.info("Good. Warning triggered.")
self.log.info("Now force the use of external address creating (but not sending) the delegation...")
res = self.nodes[0].rawdelegatestake(staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT", True)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now delegate with internal owner address..")
self.log.info("Try first with a value (0.99) below the threshold")
assert_raises_rpc_error(-8, "Invalid amount",
self.nodes[0].delegatestake, staker_address, 0.99, owner_address)
self.log.info("Nice. it was not possible.")
self.log.info("Then try (creating but not sending) with the threshold value (1.00)")
res = self.nodes[0].rawdelegatestake(staker_address, 1.00, owner_address)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now creating %d real stake-delegation txes..." % NUM_OF_INPUTS)
for i in range(NUM_OF_INPUTS):
res = self.nodes[0].delegatestake(staker_address, INPUT_VALUE, owner_address)
assert(res != None and res["txid"] != None and res["txid"] != "")
assert_equal(res["owner_address"], owner_address)
assert_equal(res["staker_address"], staker_address)
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
self.log.info("%d Txes created." % NUM_OF_INPUTS)
# check balances:
self.expected_balance = NUM_OF_INPUTS * INPUT_VALUE
self.expected_immature_balance = 0
self.checkBalances()
# 6) check that the owner (nodes[0]) can spend the coins.
# -------------------------------------------------------
print("*** 6 ***")
self.log.info("Spending back one of the delegated UTXOs...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_equal(NUM_OF_INPUTS, len(delegated_utxos))
assert_equal(len(delegated_utxos), len(self.nodes[0].listcoldutxos()))
u = delegated_utxos[0]
txhash = self.spendUTXOwithNode(u, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to spend - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after spend.
self.expected_balance -= float(u["amount"])
self.checkBalances()
self.log.info("Balances check out after spend")
assert_equal(NUM_OF_INPUTS-1, len(self.nodes[0].listcoldutxos()))
# 7) check that the staker CANNOT use the coins to stake yet.
# He needs to whitelist the owner first.
# -----------------------------------------------------------
print("*** 7 ***")
self.log.info("Trying to generate a cold-stake block before whitelisting the owner...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Nice. Cold staker was NOT able to create the block yet.")
self.log.info("Whitelisting the owner...")
ret = self.nodes[1].delegatoradd(owner_address)
assert(ret)
self.log.info("Delegator address %s whitelisted" % owner_address)
# 8) check that the staker CANNOT spend the coins.
# ------------------------------------------------
print("*** 8 ***")
self.log.info("Trying to spend one of the delegated UTXOs with the cold-staking key...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_greater_than(len(delegated_utxos), 0)
u = delegated_utxos[0]
assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed (Script failed an OP_CHECKCOLDSTAKEVERIFY operation",
self.spendUTXOwithNode, u, 1)
self.log.info("Good. Cold staker was NOT able to spend (failed OP_CHECKCOLDSTAKEVERIFY)")
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# 9) check that the staker can use the coins to stake a block with internal miner.
# --------------------------------------------------------------------------------
print("*** 9 ***")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], NUM_OF_INPUTS-1)
self.log.info("Generating one valid cold-stake block...")
self.mocktime = self.generate_pos(1, self.mocktime)
self.log.info("New block created by cold-staking. Trying to submit...")
newblockhash = self.nodes[1].getbestblockhash()
self.log.info("Block %s submitted" % newblockhash)
# Verify that nodes[0] accepts it
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(newblockhash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 10) check that the staker can use the coins to stake a block with a rawtransaction.
# ----------------------------------------------------------------------------------
print("*** 10 ***")
self.log.info("Generating another valid cold-stake block...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert(ret is None)
# Verify that nodes[0] accepts it
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(new_block.hash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
self.mocktime += 60
set_node_times(self.nodes, self.mocktime)
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 11) check that the staker cannot stake a block changing the coinstake scriptPubkey.
# ----------------------------------------------------------------------------------
print("*** 11 ***")
self.log.info("Generating one invalid cold-stake block (changing first coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block (with dummy key)
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, "")
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert("rejected" in ret)
# Verify that nodes[0] rejects it
sync_blocks(self.nodes)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 12) neither adding different outputs to the coinstake.
# ------------------------------------------------------
print("*** 12 ***")
self.log.info("Generating another invalid cold-stake block (adding coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
# Add output (dummy key address) to coinstake (taking 100 SUNO from the pot)
self.add_output_to_coinstake(new_block, 100)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert_equal(ret, "bad-p2cs-outs")
# Verify that nodes[0] rejects it
sync_blocks(self.nodes)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 13) Now node[0] gets mad and spends all the delegated coins, voiding the P2CS contracts.
# ----------------------------------------------------------------------------------------
self.log.info("Let's void the contracts.")
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
print("*** 13 ***")
self.log.info("Cancel the stake delegation spending the delegated utxos...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
# remove one utxo to spend later
final_spend = delegated_utxos.pop()
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void the stake delegations - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# deactivate SPORK 17 and check that the owner can still spend the last utxo
self.setColdStakingEnforcement(False)
assert (not self.isColdStakingEnforced())
txhash = self.spendUTXOsWithNode([final_spend], 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void a stake delegation (with SPORK 17 disabled) - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after big spend.
self.expected_balance = 0
self.checkBalances()
self.log.info("Balances check out after the delegations have been voided.")
# re-activate SPORK17
self.setColdStakingEnforcement()
assert (self.isColdStakingEnforced())
# 14) check that coinstaker is empty and can no longer stake.
# -----------------------------------------------------------
print("*** 14 ***")
self.log.info("Trying to generate one cold-stake block again...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Cigar. Cold staker was NOT able to create any more blocks.")
# 15) check balances when mature.
# -----------------------------------------------------------
print("*** 15 ***")
self.log.info("Staking 100 blocks to mature the cold stakes...")
for i in range(2):
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pos(peer, self.mocktime)
sync_blocks(self.nodes)
self.expected_balance = self.expected_immature_balance
self.expected_immature_balance = 0
self.checkBalances()
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert (txhash != None)
self.log.info("Good. Owner was able to spend the cold staked coins - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
self.expected_balance = 0
self.checkBalances()
def checkBalances(self):
w_info = self.nodes[0].getwalletinfo()
self.log.info("OWNER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_delegated_balance"]), self.expected_immature_balance)
assert_equal(float(w_info["cold_staking_balance"]), 0)
w_info = self.nodes[1].getwalletinfo()
self.log.info("STAKER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), 0)
assert_equal(float(w_info["cold_staking_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_cold_staking_balance"]), self.expected_immature_balance)
def spendUTXOwithNode(self, utxo, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE)
outputs = {}
outputs[new_addy] = out_amount
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def spendUTXOsWithNode(self, utxos, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = []
outputs = {}
outputs[new_addy] = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[new_addy] += float(utxo["amount"])
outputs[new_addy] -= self.DEFAULT_FEE
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def add_output_to_coinstake(self, block, value, peer=1):
coinstake = block.vtx[1]
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
coinstake.vout.append(
CTxOut(value * COIN, CScript([self.DUMMY_KEY.get_pubkey(), OP_CHECKSIG])))
coinstake.vout[1].nValue -= value * COIN
# re-sign coinstake
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstake.vin[0] = CTxIn(prevout)
stake_tx_signed_raw_hex = self.nodes[peer].signrawtransaction(
bytes_to_hex_str(coinstake.serialize()))['hex']
block.vtx[1] = CTransaction()
block.vtx[1].from_hex(stake_tx_signed_raw_hex)
# re-sign block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.re_sign_block()
if __name__ == '__main__':
SupernodeCoin_ColdStakingTest().main()
|
the-stack_0_3554 | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper limit on word perplexity for kenlm ngram models
Command : python3 compute_upper_ppl_kenlm.py --vocab_file [...] --kenlm_preds [...]
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy
from utils import transform_asg
def compute_upper_limit_ppl_for_kenlm(known_words_file, kenlm_file):
with open(known_words_file, "r") as f:
known_words = set(list(map(transform_asg, f.readline().strip().split(" "))))
with open(kenlm_file, "r") as f:
sum_logp = 0
sum_logp_unk = 0
n_words = 0
n_words_unk = 0
n_letters = 0
for line in f:
if "Total" not in line:
continue
line = line.strip().split("\t")
word = ""
word_logp = 0
for token in line:
token_val = token.split("=")[0]
logp = float(token.split(" ")[-1])
if token_val == "|":
if word in known_words:
sum_logp += word_logp + numpy.log(numpy.power(10, logp))
n_words += 1
else:
sum_logp_unk += word_logp + numpy.log(numpy.power(10, logp))
n_words_unk += 1
word = ""
word_logp = 0
elif token_val == "</s>":
sum_logp += numpy.log(numpy.power(10, logp))
n_words += 1
else:
word += token_val
word_logp += numpy.log(numpy.power(10, logp))
n_letters += 1
if token_val == "</s>":
break
loss_letter = -(sum_logp + sum_logp_unk) / n_letters
ppl_word_no_unk = numpy.exp(-sum_logp / n_words)
ppl_word_unk = numpy.exp(-sum_logp_unk / n_words_unk)
ppl_word = numpy.exp(-(sum_logp + sum_logp_unk) / (n_words + n_words_unk))
print(
"Letter loss: {}, letter perplexity: {}".format(
loss_letter, numpy.exp(loss_letter)
)
)
print("Upper word perplexity for all words: {}".format(ppl_word))
print("Upper word perplexity for unknown words: {}".format(ppl_word_unk))
print(
"(Reported in the paper) "
"Upper word perplexity for known words: {}".format(ppl_word_no_unk)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper limit on word perplexity for kenlm predictions"
)
parser.add_argument(
"--vocab_file",
help="vocabulary of known words, use file "
"from --limit_vocab_file during word kenLM training.",
)
parser.add_argument(
"--kenlm_preds", help="file with kenlm predictions after query run"
)
args = parser.parse_args()
print("Evaluate file {}".format(args.kenlm_preds))
compute_upper_limit_ppl_for_kenlm(args.vocab_file, args.kenlm_preds)
|
the-stack_0_3556 | from functools import wraps
from typing import Optional
def _embed_ipython_shell(ns: Optional[dict] = None):
if ns is None:
ns = {}
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
@wraps(_embed_ipython_shell)
def wrapper(namespace=ns, banner=''):
config = load_default_config()
InteractiveShellEmbed.clear_instance()
shell = InteractiveShellEmbed.instance(
banner1=banner, user_ns=namespace, config=config
)
shell()
return wrapper
def start_python_console(namespace: Optional[dict] = None, banner: str = ''):
if namespace is None:
namespace = {}
try:
shell = _embed_ipython_shell()
shell(namespace, banner)
except SystemExit: # raised when invoking exit() hence safe to ignore
pass
|
the-stack_0_3557 | import argparse
import os
import random
import string
import time
from abc import ABC, abstractmethod
from diffimg import diff
from selenium import webdriver
from selenium.webdriver import ChromeOptions, FirefoxOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
class UITester(ABC):
# This image diff threshold is set to an upper bound of 10% for now. We should try our best
# to at least maintain this upper bound.
_SCREENSHOT_DIFF_THRESHOLD_PERCENT = 10
_BASE_PATH = os.path.dirname(os.path.abspath(__file__))
_DEFAULT_USERNAME = os.getenv('CODALAB_USERNAME', 'codalab')
_DEFAULT_PASSWORD = os.getenv('CODALAB_PASSWORD', 'codalab')
def __init__(self, test_name, base_url='http://localhost'):
self._test_name = test_name
self._base_url = base_url
@abstractmethod
def test(self):
pass
def run(self):
def add_headless(browser_options):
if args.headless:
browser_options.add_argument('--headless')
# Test Chrome
options = ChromeOptions()
add_headless(options)
self.browser = webdriver.Chrome(chrome_options=options)
self.test()
self.browser.close()
# Test Firefox
options = FirefoxOptions()
add_headless(options)
self.browser = webdriver.Firefox(log_path='', firefox_options=options)
self.test()
self.browser.close()
def login(self, username='codalab', password='codalab'):
self.browser.get(self.get_url('/home'))
self.click(By.LINK_TEXT, 'LOGIN')
self.fill_field(By.ID, 'id_login', username)
self.fill_field(By.ID, 'id_password', password, press_enter=True)
def add_run_to_worksheet(self, command, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# ar = Add a new run
self.send_keyboard_shortcut('ar')
else:
self.click(By.CSS_SELECTOR, '[aria-label="Add New Run"]')
self.pause()
self.scroll_to_bottom('worksheet_container')
active_textbox = self.browser.switch_to.active_element
active_textbox.send_keys(command)
self.pause()
if use_keyboard_shortcut:
self.save_edit_keyboard_shortcut(active_textbox)
else:
self.click(By.XPATH, "//span[.='Confirm']")
self.longer_pause()
def rerun_last_bundle(self, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# Shift + g = Jump to the last bundle
self.send_keyboard_shortcut(Keys.SHIFT + 'g')
# Enter = Expand bundle
self.send_keyboard_shortcut(Keys.ENTER)
# an = Edit and add a rerun
# This keyboard shortcut only works if the bundle is expanded.
self.send_keyboard_shortcut('an')
else:
self.expand_last_bundle()
self.scroll_to_bottom('worksheet_container')
self.click(By.XPATH, "//span[.='Edit and Rerun']")
self.pause()
active_textbox = self.browser.switch_to.active_element
active_textbox.send_keys(' rerunning bundle...')
if use_keyboard_shortcut:
self.save_edit_keyboard_shortcut(active_textbox)
else:
self.scroll_to_bottom('worksheet_container')
self.click(By.XPATH, "//span[.='Confirm']")
self.longer_pause()
def edit_last_bundle_metadata(self, name, description, permission):
def edit_field(field, text):
field.click()
self.browser.switch_to.active_element.send_keys(text)
self.browser.switch_to.active_element.send_keys(Keys.ENTER)
# Edit name and description
self.expand_last_bundle()
editable_fields = self.browser.find_elements(By.CLASS_NAME, 'editable-field')
edit_field(editable_fields[-2], name)
edit_field(editable_fields[-1], description)
# Edit bundle permission
self.scroll_to_bottom('worksheet_container')
self.browser.find_elements_by_tag_name('svg')[-1].click()
select_boxes = self.browser.find_elements_by_tag_name('select')
self.select_option(select_boxes[-1], permission)
self.longer_pause()
def toggle_web_terminal(self, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# Shift + c = Show/hide web terminal
self.send_keyboard_shortcut(Keys.SHIFT + 'c')
else:
self.browser.find_element_by_id('terminal-button').click()
self.pause()
def edit_source(self, text, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# Shift + e = Edit source mode
self.send_keyboard_shortcut(Keys.SHIFT + 'e')
else:
self.click(By.CSS_SELECTOR, '[aria-label="Edit Source"]')
source_field = self.browser.switch_to.active_element
source_field.send_keys(Keys.ENTER + Keys.ENTER)
source_field.send_keys(text)
if use_keyboard_shortcut:
self.pause()
self.save_edit_keyboard_shortcut(source_field)
else:
self.click(By.CSS_SELECTOR, '[aria-label="Save Edit"]')
self.longer_pause()
def expand_last_bundle(self):
self.scroll_to_bottom('worksheet_container')
self.browser.find_elements_by_tag_name('button')[-1].click()
self.pause()
def add_text_to_worksheet(self, text, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# at = Add text
self.send_keyboard_shortcut('at')
else:
self.click(By.CSS_SELECTOR, '[aria-label="Add Text"]')
self.pause()
self.scroll_to_bottom('worksheet_container')
last_text_box = self.browser.find_elements_by_tag_name('textarea')[-1]
self.focus_and_send_keys(last_text_box, text)
if use_keyboard_shortcut:
self.save_edit_keyboard_shortcut(last_text_box)
else:
self.click(By.XPATH, "//span[.='Save']")
self.pause()
def save_edit_keyboard_shortcut(self, element):
# Control + Enter = Save current edit
webdriver.ActionChains(self.browser).move_to_element(element).key_down(
Keys.CONTROL
).key_down(Keys.ENTER).key_up(Keys.ENTER).key_up(Keys.CONTROL).perform()
def refresh_worksheet(self):
# Shift + r = Refresh worksheet
self.send_keyboard_shortcut(Keys.SHIFT + 'r')
def pause(self):
time.sleep(1)
def longer_pause(self):
time.sleep(3)
def set_browser_size(self, width=1500, height=1200):
self.browser.set_window_position(0, 0)
self.browser.set_window_size(width, height)
def click(self, by, selector):
self.browser.find_element(by, selector).click()
def focus_and_send_keys(self, element, keys):
webdriver.ActionChains(self.browser).move_to_element(element).send_keys(keys).perform()
def send_keyboard_shortcut(self, keys):
self.browser.find_element(By.TAG_NAME, 'html').send_keys(keys)
def fill_field(self, by, selector, text, press_enter=False):
textbox = self.browser.find_element(by, selector)
textbox.send_keys(text)
if press_enter:
textbox.send_keys(Keys.ENTER)
def wait_until_worksheet_content_loads(self):
self.wait_until_page_loads('ws-item')
# Wait until placeholder items have been resolved.
by = By.CLASS_NAME
selector = "codalab-item-placeholder"
timeout_message = 'Timed out while waiting for {}: {} to be hidden.'.format(by, selector)
WebDriverWait(self.browser, 300).until(
EC.invisibility_of_element_located((by, selector)), message=timeout_message
)
def wait_until_page_loads(self, selector, by=By.CLASS_NAME):
timeout_message = 'Timed out while waiting for {}: {}.'.format(by, selector)
return WebDriverWait(self.browser, 15).until(
EC.presence_of_element_located((by, selector)), message=timeout_message
)
def switch_to_new_tab(self):
# Just give enough time for the new tab to get opened
self.pause()
self.browser.switch_to.window(
self.browser.window_handles[len(self.browser.window_handles) - 1]
)
def select_option(self, element, to_select):
for option in element.find_elements_by_tag_name('option'):
if option.text in to_select:
option.click()
break
def constructPartialSelector(self, by, partial_selector):
return '//*[contains(@{}, "{}")]'.format(by, partial_selector)
def output_images(self, selector, num_of_screenshots=10):
output_dir = self._get_output_dir('out')
element = "document.getElementById('{}')".format(selector)
scroll_height = float(self.browser.execute_script('return {}.scrollHeight'.format(element)))
for i in range(num_of_screenshots):
y = (i / num_of_screenshots) * scroll_height
self.browser.execute_script('{}.scrollTo(0, {})'.format(element, y))
path = os.path.join(output_dir, '{}{}.png'.format(self._test_name, i + 1))
self.browser.save_screenshot(path)
def compare_to_baselines(self, num_of_screenshots=10):
out_dir = self._get_output_dir('out')
baselines_dir = self._get_output_dir('baselines')
diff_dir = self._get_output_dir('diff')
has_failed = False
for i in range(num_of_screenshots):
screenshot_filename = '{}{}.png'.format(self._test_name, i + 1)
out_img = os.path.join(out_dir, screenshot_filename)
baseline_img = os.path.join(baselines_dir, screenshot_filename)
diff_img = os.path.join(diff_dir, screenshot_filename)
diff_percent = (
diff(baseline_img, out_img, delete_diff_file=True, ignore_alpha=True) * 100
)
print(
'{}% difference in {} for {}'.format(
diff_percent, self._get_browser_name(), screenshot_filename
)
)
if diff_percent > UITester._SCREENSHOT_DIFF_THRESHOLD_PERCENT:
# If an image comparison has failed, generate diff and print an error message in red
has_failed = True
diff(
out_img,
baseline_img,
delete_diff_file=False,
diff_img_file=diff_img,
ignore_alpha=True,
)
print(
'\033[91mScreenshot comparison failed in {} for {} by {}%\033[0m'.format(
self._get_browser_name(), screenshot_filename, diff_percent
)
)
assert not has_failed
def get_url(self, path):
return '{}/{}'.format(self._base_url, path)
def make_name_unique(self, name):
# Appends some unique identifier to the string input
random_id = ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(16)
)
return name + random_id
def scroll_to_bottom(self, selector):
element = "document.getElementById('{}')".format(selector)
scroll_height = float(self.browser.execute_script('return {}.scrollHeight'.format(element)))
self.browser.execute_script('{}.scrollTo(0, {})'.format(element, scroll_height))
def _get_partial_matched_elements(self, by, selector):
return self.browser.find_elements(By.XPATH, self.constructPartialSelector(by, selector))
def _get_output_dir(self, folder_name):
def create_path(path):
if not os.path.isdir(path):
os.mkdir(path)
output_dir = os.path.join(UITester._BASE_PATH, folder_name)
create_path(output_dir)
output_dir = os.path.join(output_dir, self._test_name)
create_path(output_dir)
output_dir = os.path.join(output_dir, self._get_browser_name())
create_path(output_dir)
return output_dir
def _get_browser_name(self):
return self.browser.capabilities['browserName']
class WorksheetTest(UITester):
def __init__(self):
super().__init__('worksheet')
def test(self):
self.login()
self.wait_until_worksheet_content_loads()
# wait for small worksheet to be resolved from place holder item
by = By.LINK_TEXT
selector = "Small Worksheet [cl_small_worksheet]"
timeout_message = 'Timed out while waiting for {}: {}.'.format(by, selector)
WebDriverWait(self.browser, 10).until(
EC.presence_of_element_located((by, selector)), message=timeout_message
)
self.click(By.LINK_TEXT, 'Small Worksheet [cl_small_worksheet]')
self.switch_to_new_tab()
self.wait_until_worksheet_content_loads()
self.output_images('worksheet_container')
self.compare_to_baselines()
class EditWorksheetTest(UITester):
def __init__(self):
super().__init__('edit_worksheet')
def test(self):
self.set_browser_size()
self.login()
self.wait_until_worksheet_content_loads()
# Create a new worksheet
self.click(By.XPATH, '//*[@title="New Worksheet"]')
self.fill_field(By.ID, 'name', self.make_name_unique('test-worksheet'))
self.browser.find_element(By.XPATH, "//span[.='Confirm']").find_element(
By.XPATH, './..'
).click()
self.longer_pause()
# Add a title to the worksheet
self.click(By.CLASS_NAME, 'editable-field')
self.browser.switch_to.active_element.send_keys(
'Some Random Title for the UI Test Edit Worksheet in CodaLab'
)
self.browser.switch_to.active_element.send_keys(Keys.ENTER)
# Add text to the new worksheet
self.add_text_to_worksheet('This is some text. ' * 25)
# Add a bundle and rerun it
self.add_run_to_worksheet('echo hello')
self.rerun_last_bundle()
# Edit metadata of the last bundle
self.edit_last_bundle_metadata(
'New Name Given to this Bundle', 'New Description given to this bundle. ' * 5, 'none'
)
# Test keyboard shortcuts
self.add_run_to_worksheet('echo goodbye', use_keyboard_shortcut=True)
self.rerun_last_bundle(use_keyboard_shortcut=True)
# Select the last two bundles and delete them
# shift + g = Jump to the last bundle
self.send_keyboard_shortcut(Keys.SHIFT + 'g')
# x = Select the bundle row
self.send_keyboard_shortcut('x')
self.send_keyboard_shortcut(Keys.ARROW_UP)
self.send_keyboard_shortcut('x')
# Backspace = Attempt to delete the selected bundles
self.send_keyboard_shortcut(Keys.BACKSPACE)
self.browser.find_elements_by_tag_name('button')[-1].click()
# Wait for bundles to be deleted before proceeding
self.longer_pause()
# Add some more text via keyboard shortcuts
self.add_text_to_worksheet('Some more text. ' * 25, use_keyboard_shortcut=True)
# Edit source
self.edit_source('The End.', use_keyboard_shortcut=True)
# Refresh the page to ensure that new changes are persisted
self.browser.refresh()
self.wait_until_worksheet_content_loads()
self.toggle_web_terminal(use_keyboard_shortcut=True)
self.refresh_worksheet()
# Take screenshots and compare to the existing baseline images
num_of_screenshots = 1
self.output_images('worksheet_container', num_of_screenshots)
self.compare_to_baselines(num_of_screenshots)
def main():
# Add UI tests to the list to run them
all_tests = [
WorksheetTest(),
# TODO: this test is failing intermittently in GHA. Disabling for now.
# EditWorksheetTest()
]
start_time = time.time()
for test in all_tests:
test.run()
duration_seconds = time.time() - start_time
print('Success.')
print('\n--- Completion Time: {} minutes---'.format(duration_seconds / 60))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run frontend automation tests for the CodaLab UI')
parser.add_argument(
'--headless', action='store_true', help='Whether to test using headless browsers'
)
args = parser.parse_args()
main()
|
the-stack_0_3559 | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Cedicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import CedicoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
SEGWIT_HEIGHT = 120
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_block(block) if with_witness else msg_no_witness_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(CedicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight=-1"]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_no_witness_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_no_witness_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == 1
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older cedicoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let cedicoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(block_2.serialize().hex())
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert len(witness_program) == MAX_PROGRAM_LENGTH
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 1 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)])
connect_nodes(self.nodes[0], 2)
self.sync_blocks()
# Make sure that this peer thinks segwit has activated.
assert softfork_active(self.nodes[2], 'segwit')
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
if __name__ == '__main__':
SegWitTest().main()
|
the-stack_0_3560 | from string import punctuation, digits
import numpy as np
import random
# Part I
#pragma: coderesponse template
def get_order(n_samples):
try:
with open(str(n_samples) + '.txt') as fp:
line = fp.readline()
return list(map(int, line.split(',')))
except FileNotFoundError:
random.seed(1)
indices = list(range(n_samples))
random.shuffle(indices)
return indices
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_single(feature_vector, label, theta, theta_0):
"""
Finds the hinge loss on a single data point given specific classification
parameters.
Args:
feature_vector - A numpy array describing the given data point.
label - A real valued number, the correct classification of the data
point.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given data point and parameters.
"""
# Your code here
""" My solution:
return np.maximum(0, 1 - label * (np.dot(feature_vector, theta) + theta_0))
"""
# Instructor's solution: (same)
y = theta @ feature_vector + theta_0
return max(0, 1 - y * label)
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_full(feature_matrix, labels, theta, theta_0):
"""
Finds the total hinge loss on a set of data given specific classification
parameters.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given dataset and parameters. This number should be the average hinge
loss across all of the points in the feature matrix.
"""
# Your code here
""" My solution:
k = len(feature_matrix)
total = 0
for i in range(k):
total += hinge_loss_single(feature_matrix[i], labels[i], theta, theta_0)
return total / k
"""
# Instructor's solution: (same, though much cleaner)
ys = feature_matrix @ theta + theta_0
loss = np.maximum(1 - ys * labels, np.zeros(len(labels)))
return np.mean(loss)
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron_single_step_update(
feature_vector,
label,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the perceptron algorithm.
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
current_theta - The current theta being used by the perceptron
algorithm before this update.
current_theta_0 - The current theta_0 being used by the perceptron
algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
""" My solution:
epsilon = 1e-9
if label * (np.dot(current_theta, feature_vector) + current_theta_0) \
<= epsilon:
theta = current_theta + label * feature_vector
theta_0 = current_theta_0 + label
return theta, theta_0
else:
return current_theta, current_theta_0
"""
# Instructor's solution: (same)
if label * (np.dot(current_theta, feature_vector) + current_theta_0) <= 1e-7:
return (current_theta + label * feature_vector, current_theta_0 + label)
return (current_theta, current_theta_0)
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron(feature_matrix, labels, T):
"""
Runs the full perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
theta, the linear classification parameter, after T iterations through the
feature matrix and the second element is a real number with the value of
theta_0, the offset classification parameter, after T iterations through
the feature matrix.
"""
# Your code here
""" My solution:
n = len(feature_matrix[0])
theta = np.zeros(n)
theta_0 = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i],
labels[i],
theta, theta_0)
return theta, theta_0
"""
# Instructor's solution: (same)
(nsamples, nfeatures) = feature_matrix.shape
theta = np.zeros(nfeatures)
theta_0 = 0.0
for t in range(T):
for i in get_order(nsamples):
theta, theta_0 = perceptron_single_step_update(
feature_matrix[i], labels[i], theta, theta_0)
return (theta, theta_0)
#pragma: coderesponse end
#pragma: coderesponse template
def average_perceptron(feature_matrix, labels, T):
"""
Runs the average perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
the average theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the average theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
Hint: It is difficult to keep a running average; however, it is simple to
find a sum and divide.
"""
# Your code here
""" My solution:
n = len(feature_matrix[0])
k = len(get_order(feature_matrix.shape[0]))
theta = np.zeros(n)
theta_0 = 0
theta_sum = np.zeros(n)
theta_0_sum = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i],
labels[i],
theta, theta_0)
theta_sum += theta
theta_0_sum += theta_0
return theta_sum/(k*T), theta_0_sum/(k*T)
"""
# Instructor's solution: (same)
(nsamples, nfeatures) = feature_matrix.shape
theta = np.zeros(nfeatures)
theta_sum = np.zeros(nfeatures)
theta_0 = 0.0
theta_0_sum = 0.0
for t in range(T):
for i in get_order(nsamples):
theta, theta_0 = perceptron_single_step_update(
feature_matrix[i], labels[i], theta, theta_0)
theta_sum += theta
theta_0_sum += theta_0
return (theta_sum / (nsamples * T), theta_0_sum / (nsamples * T))
#pragma: coderesponse end
#pragma: coderesponse template
def pegasos_single_step_update(
feature_vector,
label,
L,
eta,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the Pegasos algorithm
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
L - The lamba value being used to update the parameters.
eta - Learning rate to update parameters.
current_theta - The current theta being used by the Pegasos
algorithm before this update.
current_theta_0 - The current theta_0 being used by the
Pegasos algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
""" My solution:
epsilon = 1e-7
if label * (np.dot(current_theta, feature_vector) + current_theta_0) - 1 \
<= epsilon:
theta = (1 - eta*L)*current_theta + eta*label*feature_vector
theta_0 = current_theta_0 + eta*label
else:
theta = (1 - eta*L)*current_theta
theta_0 = current_theta_0
return theta, theta_0
"""
# Instructor's solution: (uses 0 instead of epsilon)
mult = 1 - (eta * L)
if label * (np.dot(feature_vector, current_theta) + current_theta_0) <= 1:
return ((mult * current_theta) + (eta * label * feature_vector),
(current_theta_0) + (eta * label))
return (mult * current_theta, current_theta_0)
#pragma: coderesponse template
def pegasos(feature_matrix, labels, T, L):
"""
Runs the Pegasos algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
For each update, set learning rate = 1/sqrt(t),
where t is a counter for the number of updates performed so far (between 1
and nT inclusive).
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the algorithm
should iterate through the feature matrix.
L - The lamba value being used to update the Pegasos
algorithm parameters.
Returns: A tuple where the first element is a numpy array with the value of
the theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
"""
# Your code here
""" My solution:
n = len(feature_matrix[0])
theta = np.zeros(n)
theta_0 = 0
time = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
time += 1
eta = 1 / np.sqrt(time)
theta, theta_0 = pegasos_single_step_update(feature_matrix[i],
labels[i], L, eta,
theta, theta_0)
return theta, theta_0
"""
# Instructor's solution: (same)
(nsamples, nfeatures) = feature_matrix.shape
theta = np.zeros(nfeatures)
theta_0 = 0
count = 0
for t in range(T):
for i in get_order(nsamples):
count += 1
eta = 1.0 / np.sqrt(count)
(theta, theta_0) = pegasos_single_step_update(feature_matrix[i],
labels[i], L, eta,
theta, theta_0)
return (theta, theta_0)
#pragma: coderesponse end
# Part II
#pragma: coderesponse template
def classify(feature_matrix, theta, theta_0):
"""
A classification function that uses theta and theta_0 to classify a set of
data points.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
theta - A numpy array describing the linear classifier.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A numpy array of 1s and -1s where the kth element of the array is
the predicted classification of the kth row of the feature matrix using the
given theta and theta_0. If a prediction is GREATER THAN zero, it should
be considered a positive classification.
"""
# Your code here
""" My solution:
n = len(feature_matrix)
predictions = np.zeros(n)
for k in range(n):
if (np.dot(theta, feature_matrix[k]) + theta_0) > 0:
predictions[k] = 1
else:
predictions[k] = -1
return predictions
"""
# Instructor's solution: (MUCH cleaner!)
return (feature_matrix @ theta + theta_0 > 1e-7) * 2.0 - 1
#pragma: coderesponse end
#pragma: coderesponse template
def classifier_accuracy(
classifier,
train_feature_matrix,
val_feature_matrix,
train_labels,
val_labels,
**kwargs):
"""
Trains a linear classifier and computes accuracy.
The classifier is trained on the train data. The classifier's
accuracy on the train and validation data is then returned.
Args:
classifier - A classifier function that takes arguments
(feature matrix, labels, **kwargs) and returns (theta, theta_0)
train_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
val_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
train_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the training
feature matrix.
val_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the validation
feature matrix.
**kwargs - Additional named arguments to pass to the classifier
(e.g. T or L)
Returns: A tuple in which the first element is the (scalar) accuracy of the
trained classifier on the training data and the second element is the
accuracy of the trained classifier on the validation data.
"""
# Your code here
""" My solution:
theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)
train_preds = classify(train_feature_matrix, theta, theta_0)
train_accuracy = accuracy(train_preds, train_labels)
val_preds = classify(val_feature_matrix, theta, theta_0)
val_accuracy = accuracy(val_preds, val_labels)
return train_accuracy, val_accuracy
"""
# Instructor's solution: (same)
theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)
train_predictions = classify(train_feature_matrix, theta, theta_0)
val_predictions = classify(val_feature_matrix, theta, theta_0)
train_accuracy = accuracy(train_predictions, train_labels)
validation_accuracy = accuracy(val_predictions, val_labels)
return (train_accuracy, validation_accuracy)
#pragma: coderesponse end
#pragma: coderesponse template
def extract_words(input_string):
"""
Helper function for bag_of_words()
Inputs a text string
Returns a list of lowercase words in the string.
Punctuation and digits are separated out into their own words.
"""
for c in punctuation + digits:
input_string = input_string.replace(c, ' ' + c + ' ')
return input_string.lower().split()
#pragma: coderesponse end
#pragma: coderesponse template
def bag_of_words(texts):
"""
Inputs a list of string reviews
Returns a dictionary of unique unigrams occurring over the input
Feel free to change this code as guided by Problem 9
"""
# Your code here
dictionary = {} # maps word to unique index
# stopwords = np.loadtxt("stopwords.txt", dtype="str")
for text in texts:
word_list = extract_words(text)
for word in word_list:
# if word in stopwords:
# continue
if word not in dictionary:
dictionary[word] = len(dictionary)
return dictionary
#pragma: coderesponse end
#pragma: coderesponse template
def extract_bow_feature_vectors(reviews, dictionary):
"""
Inputs a list of string reviews
Inputs the dictionary of words as given by bag_of_words
Returns the bag-of-words feature matrix representation of the data.
The returned matrix is of shape (n, m), where n is the number of reviews
and m the total number of entries in the dictionary.
Feel free to change this code as guided by Problem 9
"""
# Your code here
num_reviews = len(reviews)
feature_matrix = np.zeros([num_reviews, len(dictionary)])
for i, text in enumerate(reviews):
word_list = extract_words(text)
for word in word_list:
if word in dictionary:
feature_matrix[i, dictionary[word]] = 1
# feature_matrix[i, dictionary[word]] += 1
return feature_matrix
#pragma: coderesponse end
#pragma: coderesponse template
def accuracy(preds, targets):
"""
Given length-N vectors containing predicted and target labels,
returns the percentage and number of correct predictions.
"""
return (preds == targets).mean()
#pragma: coderesponse end
|
the-stack_0_3561 | from utils import read_data
import numpy as np
def test_mnist_images():
train_images = read_data.get_mnist_data(read_data.MNIST_TRAIN_IMAGES_URL)
assert train_images.shape == (60000, 28, 28)
test_images = read_data.get_mnist_data(read_data.MNIST_TEST_IMAGES_URL)
assert test_images.shape == (10000, 28, 28)
def test_mnist_labels():
train_labels = read_data.get_mnist_data(read_data.MNIST_TRAIN_LABELS_URL)
assert train_labels.shape == (60000, )
test_labels = read_data.get_mnist_data(read_data.MNIST_TEST_LABELS_URL)
assert test_labels.shape == (10000, )
|
the-stack_0_3563 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Reports builder for BIDS-Apps.
Generalizes report generation across BIDS-Apps
"""
from pathlib import Path
import re
from itertools import compress
from collections import defaultdict
from pkg_resources import resource_filename as pkgrf
from bids.layout import BIDSLayout, add_config_paths
import jinja2
from nipype.utils.filemanip import copyfile
# Add a new figures spec
try:
add_config_paths(figures=pkgrf("niworkflows", "data/nipreps.json"))
except ValueError as e:
if "Configuration 'figures' already exists" != str(e):
raise
PLURAL_SUFFIX = defaultdict(str("s").format, [("echo", "es")])
SVG_SNIPPET = [
"""\
<object class="svg-reportlet" type="image/svg+xml" data="./{0}">
Problem loading figure {0}. If the link below works, please try \
reloading the report in your browser.</object>
</div>
<div class="elem-filename">
Get figure file: <a href="./{0}" target="_blank">{0}</a>
</div>
""",
"""\
<img class="svg-reportlet" src="./{0}" style="width: 100%" />
</div>
<div class="elem-filename">
Get figure file: <a href="./{0}" target="_blank">{0}</a>
</div>
""",
]
class Element(object):
"""Just a basic component of a report"""
def __init__(self, name, title=None):
self.name = name
self.title = title
class Reportlet(Element):
"""
A reportlet has title, description and a list of components with either an
HTML fragment or a path to an SVG file, and possibly a caption. This is a
factory class to generate Reportlets reusing the layout from a ``Report``
object.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> from bids.layout import BIDSLayout
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> out_figs = testdir / 'out' / 'fmriprep'
>>> bl = BIDSLayout(str(testdir / 'work' / 'reportlets'),
... config='figures', validate=False)
.. doctest::
>>> bl.get(subject='01', desc='reconall') # doctest: +ELLIPSIS
[<BIDSFile filename='.../fmriprep/sub-01/figures/sub-01_desc-reconall_T1w.svg'>]
>>> len(bl.get(subject='01', space='.*', regex_search=True))
2
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'reconall'},
... 'description': 'Some description'})
>>> r.name
'datatype-figures_desc-reconall'
>>> r.components[0][0].startswith('<img')
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'reconall'},
... 'description': 'Some description', 'static': False})
>>> r.name
'datatype-figures_desc-reconall'
>>> r.components[0][0].startswith('<object')
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'summary'},
... 'description': 'Some description'})
>>> r.components[0][0].startswith('<h3')
True
>>> r.components[0][1] is None
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title',
... 'bids': {'datatype': 'figures', 'space': '.*', 'regex_search': True},
... 'caption': 'Some description {space}'})
>>> sorted(r.components)[0][1]
'Some description MNI152NLin2009cAsym'
>>> sorted(r.components)[1][1]
'Some description MNI152NLin6Asym'
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title',
... 'bids': {'datatype': 'fmap', 'space': '.*', 'regex_search': True},
... 'caption': 'Some description {space}'})
>>> r.is_empty()
True
.. testcleanup::
>>> os.chdir(cwd)
"""
def __init__(self, layout, out_dir, config=None):
if not config:
raise RuntimeError("Reportlet must have a config object")
# PY35: Sorted config dict for consistent behavior
self.name = config.get(
"name", "_".join("%s-%s" % i for i in sorted(config["bids"].items()))
)
self.title = config.get("title")
self.subtitle = config.get("subtitle")
self.description = config.get("description")
# Query the BIDS layout of reportlets
files = layout.get(**config["bids"])
self.components = []
for bidsfile in files:
src = Path(bidsfile.path)
ext = "".join(src.suffixes)
desc_text = config.get("caption")
contents = None
if ext == ".html":
contents = src.read_text().strip()
elif ext == ".svg":
entities = dict(bidsfile.entities)
if desc_text:
desc_text = desc_text.format(**entities)
try:
html_anchor = src.relative_to(out_dir)
except ValueError:
html_anchor = src.relative_to(Path(layout.root).parent)
dst = out_dir / html_anchor
dst.parent.mkdir(parents=True, exist_ok=True)
copyfile(src, dst, copy=True, use_hardlink=True)
contents = SVG_SNIPPET[config.get("static", True)].format(html_anchor)
# Our current implementations of dynamic reportlets do this themselves,
# however I'll leave the code here since this is potentially something we
# will want to transfer from every figure generator to this location.
# The following code misses setting preserveAspecRatio="xMidYMid meet"
# if not is_static:
# # Remove height and width attributes from initial <svg> tag
# svglines = out_file.read_text().splitlines()
# expr = re.compile(r' (height|width)=["\'][0-9]+(\.[0-9]*)?[a-z]*["\']')
# for l, line in enumerate(svglines[:6]):
# if line.strip().startswith('<svg'):
# newline = expr.sub('', line)
# svglines[l] = newline
# out_file.write_text('\n'.join(svglines))
# break
if contents:
self.components.append((contents, desc_text))
def is_empty(self):
return len(self.components) == 0
class SubReport(Element):
"""SubReports are sections within a Report."""
def __init__(self, name, isnested=False, reportlets=None, title=""):
self.name = name
self.title = title
self.reportlets = reportlets or []
self.isnested = isnested
class Report:
"""
The full report object. This object maintains a BIDSLayout to index
all reportlets.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> from bids.layout import BIDSLayout
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> out_figs = testdir / 'out' / 'fmriprep'
.. doctest::
>>> robj = Report(testdir / 'out', 'madeoutuuid', subject_id='01', packagename='fmriprep',
... reportlets_dir=testdir / 'work' / 'reportlets')
>>> robj.layout.get(subject='01', desc='reconall') # doctest: +ELLIPSIS
[<BIDSFile filename='.../figures/sub-01_desc-reconall_T1w.svg'>]
>>> robj.generate_report()
0
>>> len((testdir / 'out' / 'fmriprep' / 'sub-01.html').read_text())
36693
.. testcleanup::
>>> os.chdir(cwd)
"""
def __init__(
self,
out_dir,
run_uuid,
config=None,
out_filename="report.html",
packagename=None,
reportlets_dir=None,
subject_id=None,
):
self.root = Path(reportlets_dir or out_dir)
# Initialize structuring elements
self.sections = []
self.errors = []
self.out_dir = Path(out_dir)
self.out_filename = out_filename
self.run_uuid = run_uuid
self.packagename = packagename
self.subject_id = subject_id
if subject_id is not None:
self.subject_id = (
subject_id[4:] if subject_id.startswith("sub-") else subject_id
)
self.out_filename = f"sub-{self.subject_id}.html"
# Default template from niworkflows
self.template_path = Path(pkgrf("niworkflows", "reports/report.tpl"))
self._load_config(Path(config or pkgrf("niworkflows", "reports/default.yml")))
assert self.template_path.exists()
def _load_config(self, config):
from yaml import safe_load as load
settings = load(config.read_text())
self.packagename = self.packagename or settings.get("package", None)
if self.packagename is not None:
self.root = self.root / self.packagename
self.out_dir = self.out_dir / self.packagename
if self.subject_id is not None:
self.root = self.root / "sub-{}".format(self.subject_id)
if "template_path" in settings:
self.template_path = config.parent / settings["template_path"]
self.index(settings["sections"])
def init_layout(self):
self.layout = BIDSLayout(self.root, config="figures", validate=False)
def index(self, config):
"""
Traverse the reports config definition and instantiate reportlets.
This method also places figures in their final location.
"""
# Initialize a BIDS layout
self.init_layout()
for subrep_cfg in config:
# First determine whether we need to split by some ordering
# (ie. sessions / tasks / runs), which are separated by commas.
orderings = [
s for s in subrep_cfg.get("ordering", "").strip().split(",") if s
]
entities, list_combos = self._process_orderings(orderings, self.layout)
if not list_combos: # E.g. this is an anatomical reportlet
reportlets = [
Reportlet(self.layout, self.out_dir, config=cfg)
for cfg in subrep_cfg["reportlets"]
]
else:
# Do not use dictionary for queries, as we need to preserve ordering
# of ordering columns.
reportlets = []
for c in list_combos:
# do not display entities with the value None.
c_filt = list(filter(None, c))
ent_filt = list(compress(entities, c))
# Set a common title for this particular combination c
title = "Reports for: %s." % ", ".join(
[
'%s <span class="bids-entity">%s</span>'
% (ent_filt[i], c_filt[i])
for i in range(len(c_filt))
]
)
for cfg in subrep_cfg["reportlets"]:
cfg["bids"].update({entities[i]: c[i] for i in range(len(c))})
rlet = Reportlet(self.layout, self.out_dir, config=cfg)
if not rlet.is_empty():
rlet.title = title
title = None
reportlets.append(rlet)
# Filter out empty reportlets
reportlets = [r for r in reportlets if not r.is_empty()]
if reportlets:
sub_report = SubReport(
subrep_cfg["name"],
isnested=bool(list_combos),
reportlets=reportlets,
title=subrep_cfg.get("title"),
)
self.sections.append(sub_report)
# Populate errors section
error_dir = (
self.out_dir / "sub-{}".format(self.subject_id) / "log" / self.run_uuid
)
if error_dir.is_dir():
from ..utils.misc import read_crashfile
self.errors = [read_crashfile(str(f)) for f in error_dir.glob("crash*.*")]
def generate_report(self):
"""Once the Report has been indexed, the final HTML can be generated"""
logs_path = self.out_dir / "logs"
boilerplate = []
boiler_idx = 0
if (logs_path / "CITATION.html").exists():
text = (
re.compile("<body>(.*?)</body>", re.DOTALL | re.IGNORECASE)
.findall((logs_path / "CITATION.html").read_text())[0]
.strip()
)
boilerplate.append(
(boiler_idx, "HTML", f'<div class="boiler-html">{text}</div>')
)
boiler_idx += 1
if (logs_path / "CITATION.md").exists():
text = (logs_path / "CITATION.md").read_text()
boilerplate.append((boiler_idx, "Markdown", f"<pre>{text}</pre>\n"))
boiler_idx += 1
if (logs_path / "CITATION.tex").exists():
text = (
re.compile(
r"\\begin{document}(.*?)\\end{document}", re.DOTALL | re.IGNORECASE
)
.findall((logs_path / "CITATION.tex").read_text())[0]
.strip()
)
boilerplate.append(
(
boiler_idx,
"LaTeX",
f"""<pre>{text}</pre>
<h3>Bibliography</h3>
<pre>{Path(pkgrf(self.packagename, 'data/boilerplate.bib')).read_text()}</pre>
""",
)
)
boiler_idx += 1
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=str(self.template_path.parent)),
trim_blocks=True,
lstrip_blocks=True,
autoescape=False,
)
report_tpl = env.get_template(self.template_path.name)
report_render = report_tpl.render(
sections=self.sections, errors=self.errors, boilerplate=boilerplate
)
# Write out report
self.out_dir.mkdir(parents=True, exist_ok=True)
(self.out_dir / self.out_filename).write_text(report_render, encoding="UTF-8")
return len(self.errors)
@staticmethod
def _process_orderings(orderings, layout):
"""
Generate relevant combinations of orderings with observed values.
Arguments
---------
orderings : :obj:`list` of :obj:`list` of :obj:`str`
Sections prescribing an ordering to select across sessions, acquisitions, runs, etc.
layout : :obj:`bids.layout.BIDSLayout`
The BIDS layout
Returns
-------
entities: :obj:`list` of :obj:`str`
The relevant orderings that had unique values
value_combos: :obj:`list` of :obj:`tuple`
Unique value combinations for the entities
"""
# get a set of all unique entity combinations
all_value_combos = {
tuple(bids_file.get_entities().get(k, None) for k in orderings)
for bids_file in layout.get()
}
# remove the all None member if it exists
none_member = tuple([None for k in orderings])
if none_member in all_value_combos:
all_value_combos.remove(tuple([None for k in orderings]))
# see what values exist for each entity
unique_values = [
{value[idx] for value in all_value_combos} for idx in range(len(orderings))
]
# if all values are None for an entity, we do not want to keep that entity
keep_idx = [
False if (len(val_set) == 1 and None in val_set) or not val_set else True
for val_set in unique_values
]
# the "kept" entities
entities = list(compress(orderings, keep_idx))
# the "kept" value combinations
value_combos = [
tuple(compress(value_combo, keep_idx)) for value_combo in all_value_combos
]
# sort the value combinations alphabetically from the first entity to the last entity
value_combos.sort(
key=lambda entry: tuple(
str(value) if value is not None else "0" for value in entry
)
)
return entities, value_combos
def run_reports(
out_dir,
subject_label,
run_uuid,
config=None,
reportlets_dir=None,
packagename=None,
):
"""
Run the reports.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> (testdir / 'fmriprep').mkdir(parents=True, exist_ok=True)
.. doctest::
>>> run_reports(testdir / 'out', '01', 'madeoutuuid', packagename='fmriprep',
... reportlets_dir=testdir / 'work' / 'reportlets')
0
.. testcleanup::
>>> os.chdir(cwd)
"""
return Report(
out_dir,
run_uuid,
config=config,
subject_id=subject_label,
packagename=packagename,
reportlets_dir=reportlets_dir,
).generate_report()
def generate_reports(
subject_list, output_dir, run_uuid, config=None, work_dir=None, packagename=None
):
"""Execute run_reports on a list of subjects."""
reportlets_dir = None
if work_dir is not None:
reportlets_dir = Path(work_dir) / "reportlets"
report_errors = [
run_reports(
output_dir,
subject_label,
run_uuid,
config=config,
packagename=packagename,
reportlets_dir=reportlets_dir,
)
for subject_label in subject_list
]
errno = sum(report_errors)
if errno:
import logging
logger = logging.getLogger("cli")
error_list = ", ".join(
"%s (%d)" % (subid, err)
for subid, err in zip(subject_list, report_errors)
if err
)
logger.error(
"Preprocessing did not finish successfully. Errors occurred while processing "
"data from participants: %s. Check the HTML reports for details.",
error_list,
)
return errno
|
the-stack_0_3564 | """Test functions for the sparse.linalg._expm_multiply module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_, assert_equal
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import SparseEfficiencyWarning
import scipy.linalg
from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
_onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
_expm_multiply_interval)
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
class TestExpmActionSimple(object):
"""
These tests do not consider the case of multiple time steps in one call.
"""
def test_theta_monotonicity(self):
pairs = sorted(_theta.items())
for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
assert_(theta_a < theta_b)
def test_p_max_default(self):
m_max = 55
expected_p_max = 8
observed_p_max = _compute_p_max(m_max)
assert_equal(observed_p_max, expected_p_max)
def test_p_max_range(self):
for m_max in range(1, 55+1):
p_max = _compute_p_max(m_max)
assert_(p_max*(p_max - 1) <= m_max + 1)
p_too_big = p_max + 1
assert_(p_too_big*(p_too_big - 1) > m_max + 1)
def test_onenormest_matrix_power(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
for p in range(4):
if not p:
M = np.identity(n)
else:
M = np.dot(M, A)
estimated = _onenormest_matrix_power(A, p)
exact = np.linalg.norm(M, 1)
assert_(less_than_or_close(estimated, exact))
assert_(less_than_or_close(exact, 3*estimated))
def test_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
expected = np.dot(scipy.linalg.expm(A), B)
assert_allclose(observed, expected)
def test_matrix_vector_multiply(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
observed = expm_multiply(A, v)
expected = np.dot(scipy.linalg.expm(A), v)
assert_allclose(observed, expected)
def test_scaled_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
for t in (0.2, 1.0, 1.5):
with np.errstate(invalid='ignore'):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = np.dot(scipy.linalg.expm(t*A), B)
assert_allclose(observed, expected)
def test_scaled_expm_multiply_single_timepoint(self):
np.random.seed(1234)
t = 0.1
n = 5
k = 2
A = np.random.randn(n, n)
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = scipy.linalg.expm(t*A).dot(B)
assert_allclose(observed, expected)
def test_sparse_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
expected = scipy.linalg.expm(A).dot(B)
assert_allclose(observed, expected)
def test_complex(self):
A = np.array([
[1j, 1j],
[0, 1j]], dtype=complex)
B = np.array([1j, 1j])
observed = expm_multiply(A, B)
expected = np.array([
1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
1j * np.exp(1j)], dtype=complex)
assert_allclose(observed, expected)
class TestExpmActionInterval(object):
def test_sparse_expm_multiply_interval(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
n = 40
k = 3
endpoint = True
for num in (14, 13, 2):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
v = np.random.randn(n)
for target in (B, v):
X = expm_multiply(A, target,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
for solution, t in zip(X, samples):
assert_allclose(solution,
scipy.linalg.expm(t*A).dot(target))
def test_expm_multiply_interval_vector(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
X = expm_multiply(A, v,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(v))
def test_expm_multiply_interval_matrix(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
for k in (1, 2):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
X = expm_multiply(A, B,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
def test_sparse_expm_multiply_interval_dtypes(self):
# Test A & B int
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A complex, B int
A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A int, B complex
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = 1j*np.ones(5, dtype=complex)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
def test_expm_multiply_interval_status_0(self):
self._help_test_specific_expm_interval_status(0)
def test_expm_multiply_interval_status_1(self):
self._help_test_specific_expm_interval_status(1)
def test_expm_multiply_interval_status_2(self):
self._help_test_specific_expm_interval_status(2)
def _help_test_specific_expm_interval_status(self, target_status):
np.random.seed(1234)
start = 0.1
stop = 3.2
num = 13
endpoint = True
n = 5
k = 2
nrepeats = 10
nsuccesses = 0
for num in [14, 13, 2] * nrepeats:
A = np.random.randn(n, n)
B = np.random.randn(n, k)
status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=True)
if status == target_status:
X, status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=False)
assert_equal(X.shape, (num, n, k))
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
nsuccesses += 1
if not nsuccesses:
msg = 'failed to find a status-' + str(target_status) + ' interval'
raise Exception(msg)
|
the-stack_0_3567 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.document
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.document import RedlineDisplayType as RedlineDisplayType
if hasattr(RedlineDisplayType, '_constants') and isinstance(RedlineDisplayType._constants, dict):
RedlineDisplayType._constants['__ooo_ns__'] = 'com.sun.star.document'
RedlineDisplayType._constants['__ooo_full_ns__'] = 'com.sun.star.document.RedlineDisplayType'
RedlineDisplayType._constants['__ooo_type_name__'] = 'const'
def build_enum():
global RedlineDisplayTypeEnum
ls = [f for f in dir(RedlineDisplayType) if not callable(getattr(RedlineDisplayType, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(RedlineDisplayType, name)
RedlineDisplayTypeEnum = IntEnum('RedlineDisplayTypeEnum', _dict)
build_enum()
else:
from ...lo.document.redline_display_type import RedlineDisplayType as RedlineDisplayType
class RedlineDisplayTypeEnum(IntEnum):
"""
Enum of Const Class RedlineDisplayType
specifies which changes in a document are displayed.
"""
NONE = RedlineDisplayType.NONE
"""
no changes are displayed.
"""
INSERTED = RedlineDisplayType.INSERTED
"""
only inserted parts are displayed and attributed.
"""
INSERTED_AND_REMOVED = RedlineDisplayType.INSERTED_AND_REMOVED
"""
only inserted parts are displayed and attributed.
"""
REMOVED = RedlineDisplayType.REMOVED
"""
only removed parts are displayed and attributed.
"""
__all__ = ['RedlineDisplayType', 'RedlineDisplayTypeEnum']
|
the-stack_0_3569 | from datetime import datetime
from time import sleep
from redis import StrictRedis
from random import random
from rediscache_decorator import Cache
### Comment this section if you don't have redis instance ###
redis = StrictRedis(decode_responses=True)
cache = Cache(redis)
@cache.ttl(300)
def pseudo_calc():
sleep(1)
print("Computation in progress")
return str(datetime.now())
for i in range(10):
print(pseudo_calc())
@cache.ttl(123)
def another():
return "hello"
# Example: rediscache_decorator dict
@cache.dict(60)
def return_a_dict(*args, **kwargs):
sleep(1)
print("Computation in progress")
return {"now": str(datetime.now())}
for i in range(5):
print(return_a_dict())
# Example: rediscache_decorator float number
@cache.float(60)
def return_a_float(*args, **kwargs):
return random()
for i in range(5):
print(return_a_float())
|
the-stack_0_3570 | import os
# toolchains options
ARCH='sparc-v8'
CPU='bm3803'
CROSS_TOOL='gcc'
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\97981\Downloads\bcc-2.1.1-gcc\bin'
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'sparc-gaisler-elf-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=v8 -nostartfiles'
#DEVICE = ' '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__'
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-bm3803.map -T bm3803.lds -Ttext=0x40000000'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -Wall'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -Wall'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\
SIZE + ' $TARGET \n'
|
the-stack_0_3575 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, MetaData
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from .common import app, db
# The `framework.models.main_database` module`...
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower()
local_table = local_cls.__table__
if name in local_table.columns:
newname = name + "_"
return newname
return name
def name_for_collection_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower() + '_collection'
for c in referred_cls.__table__.columns:
if c == name:
name += "_"
return name
def classes(model_map):
classes_found = {}
with app.app_context():
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI']) # app.config['MAIN_DATABASE_URI'])
metadata = MetaData(engine)
session = Session(engine)
metadata.reflect(bind=engine, only=model_map.keys()) #app.config['MAIN_DATABASE_MODEL_MAP'].keys())
Model = declarative_base(metadata=metadata, cls=(db.Model,), bind=engine)
Base = automap_base(metadata=metadata, declarative_base=Model)
Base.prepare(
name_for_scalar_relationship=name_for_scalar_relationship,
name_for_collection_relationship=name_for_collection_relationship
)
for cls in Base.classes:
cls.__table__.info = {'bind_key': 'main'}
if cls.__table__.name in model_map: #app.config['MAIN_DATABASE_MODEL_MAP']:
#globals()[app.config['MAIN_DATABASE_MODEL_MAP'][cls.__table__.name]] = cls
classes_found[model_map[cls.__table__.name]] = cls
return classes_found
|
the-stack_0_3577 | from collections import OrderedDict
from django.forms import BoundField, CheckboxInput, CheckboxSelectMultiple, FileInput, RadioSelect
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from django_jinja import library
from bootstrapform_jinja.config import BOOTSTRAP_COLUMN_COUNT
@library.test
def checkbox_field(field):
"""
Jinja test to check if a field is a checkbox
"""
return isinstance(field.field.widget, CheckboxInput)
@library.test
def multiple_checkbox_field(field):
"""
Jinja test to check if a field is a multiple value checkbox
"""
return isinstance(field.field.widget, CheckboxSelectMultiple)
@library.test
def radio_field(field):
"""
Jinja test to check if a field is a radio select
"""
return isinstance(field.field.widget, RadioSelect)
def add_input_classes(field):
"""
Add form-control to class attribute of the widget of the given field.
"""
if not isinstance(field.field.widget, (CheckboxInput, CheckboxSelectMultiple, RadioSelect, FileInput)):
attrs = field.field.widget.attrs
attrs['class'] = attrs.get('class', '') + ' form-control'
@library.filter
def bootstrap(element):
"""
Render field, form or formset with bootstrap styles
"""
return render(element)
@library.filter
def bootstrap_inline(element):
"""
Render field, form or formset with bootstrap styles in single line
"""
return render(element, {'label': 'sr-only'})
@library.filter
def bootstrap_horizontal(element, label_cols=None, max_columns=None):
"""
Render field, form or formset with bootstrap styles in horizontal layout
"""
if not label_cols:
label_cols = ('col-sm-2', 'col-lg-2')
if isinstance(label_cols, str):
label_cols = label_cols.split()
# ensure that label_cols includes only strings and doesn't have duplicates
label_cols = tuple(OrderedDict.fromkeys(map(str, label_cols)).keys())
if not max_columns:
max_columns = BOOTSTRAP_COLUMN_COUNT
cls_value = []
cls_single_value = []
for cl in label_cols:
base, sep, value_nb_cols = cl.rpartition('-')
prefix = base + sep
try:
value_nb_cols = int(value_nb_cols)
except ValueError:
value_nb_cols = max_columns
if value_nb_cols >= max_columns:
split_class = prefix + str(max_columns)
else:
offset_class = prefix + 'offset-' + str(value_nb_cols)
split_class = prefix + str(max_columns - value_nb_cols)
cls_single_value.extend((split_class, offset_class))
cls_value.append(split_class)
classes = {
'label': ' '.join(label_cols),
'value': ' '.join(cls_value),
'single_value': ' '.join(cls_single_value),
}
return render(element, classes)
def render(element, markup_classes=None):
"""
Internal render function used by boostrap filters
"""
classes = {'label': '', 'value': '', 'single_value': ''}
if markup_classes:
classes.update(markup_classes)
if isinstance(element, BoundField):
# InputField
add_input_classes(element)
template = get_template('bootstrapform/field.jinja')
context = {'field': element, 'form': element.form, 'classes': classes}
elif getattr(element, 'management_form', None):
# FormSet
for form in element.forms:
for field in form.visible_fields():
add_input_classes(field)
template = get_template('bootstrapform/formset.jinja')
context = {'formset': element, 'classes': classes}
else:
# Form
for field in element.visible_fields():
add_input_classes(field)
template = get_template('bootstrapform/form.jinja')
context = {'form': element, 'classes': classes}
return mark_safe(template.render(context))
@library.filter
def bootstrap_classes(field):
"""
Filter that adds form-control to given input field
"""
add_input_classes(field)
return mark_safe(field)
|
the-stack_0_3578 | import json
from pathlib import Path
def save_json(filepath, content, append=False, topcomment=None):
"""
Saves content to a json file
:param filepath: path to a file (must include .json)
:param content: dictionary of stuff to save
"""
fp = Path(filepath)
if fp.suffix not in (".json"):
raise ValueError(f"Filepath {fp} not valid should point to json file")
with open(filepath, "w") as json_file:
json.dump(content, json_file, indent=4)
def load_json(filepath):
"""
Load a json file
:param filepath: path to json file
"""
fp = Path(filepath)
if not fp.exists():
raise ValueError("Unrecognized file path: {}".format(filepath))
with open(filepath) as f:
data = json.load(f)
return data
|
the-stack_0_3579 | # This code is derived from https://github.com/esa/pykep/pull/127
# originally developed by Moritz v. Looz @mlooz .
# It was modified following suggestions from Waldemar Martens @MartensWaldemar_gitlab
# Solar orbiter is quite a challenge for state of the art optimizers, but
# good solutions fulfilling the requirements can be found and an example is
# shown in check_good_solution()
#
# See https://www.esa.int/Science_Exploration/Space_Science/Solar_Orbiter
import math
from math import cos, pi, sin, sqrt
from fcmaes import retry, advretry
from fcmaes.optimizer import logger, de_cma, single_objective, de, Bite_cpp
import matplotlib.pyplot as plt
import pygmo as pg
from pykep import RAD2DEG, AU
from solo_mgar_udp import solo_mgar_udp
logger("solarorbiter.log")
def read_solutions(fname):
ys = []
xs = []
with open(fname) as csvfile:
lines = csvfile.readlines()
for line in lines:
row = line.split(' ')
if len(row) < 12:
continue
ys.append(float(row[0]))
x = []
i = -1
while(True):
xi = row[i]
while not xi[-1].isdigit():
xi = xi[:-1]
if not (xi[0].isdigit() or xi[0] == '-'):
xi = xi[1:]
x.insert(0, float(xi))
break
x.insert(0, float(xi))
i -= 1
xs.append(x)
return ys, xs
def verify(ys, xs):
for i in range(len(ys)):
solo_mgar = solo_mgar_udp([7000, 8000])
y0 = ys[i]
x = xs[i]
if len(x) != 10:
continue
lambert_legs = []
resonances = []
solo_mgar._compute_dvs(x, lambert_legs, resonances)
resos = [reso._resonance for reso in resonances]
# assert resos0 == resos
y = solo_mgar.fitness(x)[0]
print(y0, y, y0-y)
assert abs(y0 - y < 0.23)
def check_good_solution(x):
solo_mgar = solo_mgar_udp([7000, 8000])
prob = pg.problem(solo_mgar)
print (str(prob.fitness(x)))
solo_mgar.pretty(x)
solo_mgar.plot(x)
solo_mgar.plot_distance_and_flybys(x)
def print_good_solutions(xs):
from functools import reduce
for i in range(len(xs)):
solo_mgar = solo_mgar_udp([7000, 8000])
lambert_legs = []
resos = []
x = xs[i]
rvt_outs, rvt_ins, rvt_pls, _, _ = solo_mgar._compute_dvs(x, lambert_legs, resos)
#rvt_outs = [rvt.rotate(solo_mgar._rotation_axis, solo_mgar._theta) for rvt in rvt_outs]
rvt_out = rvt_outs[-1].rotate(solo_mgar._rotation_axis, solo_mgar._theta) # rotate
a, e, incl, _, _, _ = rvt_out.kepler()
# orbit should be as polar as possible, but we do not care about prograde/retrograde
corrected_inclination = abs(abs(incl) % pi - pi / 2) * RAD2DEG
final_perhelion = a * (1 - e) / AU
y = solo_mgar.fitness(x)
resos = [str(resos[i]._resonance) for i in range(len(resos))]
resos = reduce((lambda x, y: x + ',' + y), resos)
print (str(i) + ' ' + str(incl*RAD2DEG) + ' ' + str(final_perhelion) + ' [' + str(y[0]), ', [' + resos + '], ' + str(x) + '],')
def optimize():
solo_mgar = solo_mgar_udp([7000, 8000])
prob = pg.problem(solo_mgar)
fprob = single_objective(prob)
# logger().info('solar orbiter' + ' de -> cmaes c++ smart retry')
# ret = advretry.minimize(fprob.fun, bounds=fprob.bounds, num_retries = 60000,
# logger = logger(), optimizer=de_cma(1500))
logger().info('solar orbiter' + ' BiteOpt parallel retry')
ret = retry.minimize(fprob.fun, bounds=fprob.bounds, num_retries = 32000,
logger = logger(), optimizer=Bite_cpp(120000, M=6))
return ret
def archipelago():
udp = solo_mgar_udp([7000, 8000])
#uda = pg.sga(gen = 6000)
uda = pg.sade(memory=True,variant=1,gen=6000)
# instantiate an unconnected archipelago
for _ in range(1000):
archi = pg.archipelago(t = pg.topologies.unconnected())
for _ in range(32):
alg = pg.algorithm(uda)
#alg.set_verbosity(1)
prob = pg.problem(udp)
pop = pg.population(prob, 20)
isl = pg.island(algo=alg, pop=pop)
archi.push_back(isl)
archi.evolve()
archi.wait_check()
def optimize_pagmo():
solo_mgar = solo_mgar_udp([7000, 8000])
for i in range(6000):
prob = pg.problem(solo_mgar)
pop = pg.population(prob=prob, size=32)
alg = pg.algorithm(pg.sade(memory=True,gen=1))
pop = alg.evolve(pop)
print(i, pop.champion_f, solo_mgar.fitness(pop.champion_x))
if __name__ == '__main__':
#optimize()
#archipelago()
ys, xs = read_solutions('data/solo_results.txt')
#print_good_solutions(xs)
#verify(ys, xs)
check_good_solution(xs[0])
plt.show()
pass
|
the-stack_0_3582 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import copy
import functools
import itertools
import multiprocessing.pool
import os
import re
import sys
import time
import weakref
from absl.testing import parameterized
import numpy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.lang import directives
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.layers import convolutional
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_sendrecv_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.structured import structured_tensor
from tensorflow.python.platform import test
from tensorflow.python.saved_model.load import load
from tensorflow.python.saved_model.save import save
from tensorflow.python.training import training_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
def total_function_cache(defined):
return defined._list_all_concrete_functions() # pylint: disable=protected-access
def _example_indexed_slices_with_dense_shape():
return indexed_slices.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]),
constant_op.constant([2]))
def _example_indexed_slices_without_dense_shape():
return indexed_slices.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]))
def _spec_for_value(value):
"""Returns the (nested) TypeSpec for a value."""
if nest.is_nested(value):
return nest.map_structure(_spec_for_value, value)
elif isinstance(value, (ops.Tensor, composite_tensor.CompositeTensor)):
return type_spec.type_spec_from_value(value)
else:
return value
# This dummy decorator imitates ordinary decorators utilizing tf_decorator.
def dummy_tf_decorator(method):
def wrapper(*args, **kwargs):
return method(*args, **kwargs)
return tf_decorator.make_decorator(method, wrapper)
# TODO(mdan): Organize these tests.
class FunctionTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(FunctionTest, self).setUp()
cpus = config.list_physical_devices('CPU')
# Set 4 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def testBasic(self):
matmul = def_function.function(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
sq2 = matmul(sq, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
def testPythonFunctionNotCallable(self):
with self.assertRaisesRegex(TypeError, 'is not a callable object'):
def_function.function(1)
def testOnExitCallback(self):
values = []
def append_1():
values.append(1)
def append_2():
values.append(2)
def g(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_1)
self.assertEqual(old_values, values)
return x + 1
tf_g = def_function.function(g)
def f(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_2)
self.assertEqual(old_values, values)
return tf_g(x)
tf_f = def_function.function(f)
self.assertEmpty(values)
tf_f(constant_op.constant(1.0))
self.assertEqual(values, [1, 2]) # Once for g, once for f.
tf_f(constant_op.constant([1.0])) # force a retrace
self.assertEqual(values, [1, 2, 1, 2]) # And again.
def testCannotAddExitCallbackWhenNotInFunctionScope(self):
with self.assertRaisesRegex(RuntimeError, 'when not building a function.'):
ops.add_exit_callback_to_default_func_graph(lambda: None)
def testVariable(self):
v1 = variables.Variable(1.0)
add = def_function.function(lambda x, v: x + v1 + v)
v2 = variables.Variable(1.0)
x = constant_op.constant(1.0)
r = add(x, v2)
self.assertEqual(3.0, self.evaluate(r))
def testVariableOnly(self):
v = variables.Variable(1.0)
add = def_function.function(lambda x: x.assign_add(1.0))
r1 = add(v)
self.assertEqual(2.0, self.evaluate(r1))
c = constant_op.constant(1.0)
with self.assertRaisesRegex(AttributeError, 'no attribute'):
add(c)
@test_util.disable_tfrt('Packed tensor is not supported in tfrt yet.')
def testPackedVariable(self):
with ops.device('/cpu:0'):
v0_0 = resource_variable_ops.ResourceVariable(1.0)
with ops.device('/cpu:1'):
v0_1 = resource_variable_ops.ResourceVariable(2.0)
v1_0 = resource_variable_ops.ResourceVariable(3.0)
with ops.device('/cpu:2'):
v1_1 = resource_variable_ops.ResourceVariable(4.0)
packed_var_0 = ops.pack_eager_tensors([v0_0.handle, v0_1.handle])
packed_var_1 = ops.pack_eager_tensors([v1_0.handle, v1_1.handle])
# TODO(b/145922293): use ResourceVariable.assign_add and
# ResourceVariable.read_value directly once we support packing multiple
# ResourceVariable into one ResourceVariable.
@def_function.function
def read_var():
resource_variable_ops.assign_add_variable_op(
packed_var_0, constant_op.constant(5.0))
resource_variable_ops.assign_add_variable_op(
packed_var_1, constant_op.constant(6.0))
with ops.device('/cpu:0'):
read0 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
with ops.device('/cpu:1'):
read1 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
read2 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
with ops.device('/cpu:2'):
read3 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
return read0, read1, read2, read3
arg_attrs = read_var.get_concrete_function().function_def.arg_attr
self.assertLen(arg_attrs, 2)
self.assertEqual(arg_attrs[0].attr['_composite_device'].s,
compat.as_bytes(packed_var_0.device))
self.assertEqual(arg_attrs[1].attr['_composite_device'].s,
compat.as_bytes(packed_var_1.device))
self.assertAllEqual(read_var(), (1 + 5, 2 + 5, 3 + 6, 4 + 6))
def testImplementsAttributeBasic(self):
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(function.IMPLEMENTS_ATTRIBUTE_NAME, f.attr, f)
else:
present += 1
self.assertEqual(f.attr[function.IMPLEMENTS_ATTRIBUTE_NAME].s,
'func'.encode('ascii'), f)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testImplementsAttributeAssertsOnSideInput(self):
with context.graph_mode(), self.cached_session():
z = array_ops.zeros(0)
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y + z)
a = array_ops.ones((1,))
b = array_ops.ones((1,))
with self.assertRaisesRegex(AssertionError,
'variables are always captured'):
v(a, b)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertEmpty(functions)
def testImplementsAttributeWorksWithGradientTape(self):
add = lambda x, y: x + y ** 2
add = def_function.function(experimental_implements='MyFunc')(add)
x = variables.Variable(3.0)
y = variables.Variable(2.0)
with backprop.GradientTape() as tape:
g = add(x, y)
dg_dy, dg_dx = tape.gradient(g, [y, x])
self.assertEqual(dg_dy.numpy(), 4.0)
self.assertEqual(dg_dx.numpy(), 1.0)
def testImplementsAttributeWorksOnVariables(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable((1.0,))
b = variables.Variable((1.0,))
r1 = v(a, b)
_ = v(a, a)
functions = ops.get_default_graph().as_graph_def().library.function
# Verify that we created only one function
self.assertLen(functions, 1)
# Verify that eval() reads the current values.
a.initializer.run()
b.initializer.run()
self.assertEqual(r1.eval(), 2)
a.assign_add([1]).eval()
self.assertEqual(r1.eval(), 3)
def testImplementsAttributeWorksOnConstants(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, 2.)
r2 = v(2., a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 1)
self.assertLen(functions[0].signature.input_arg, 2)
# Verify that eval() reads the current values.
a.initializer.run()
self.assertEqual(r1.eval(), 3)
self.assertEqual(r2.eval(), 3)
def testImplementsAttributeSpecializes(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, [2.])
r2 = v([2., 2], a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 2)
# Ensure that all parameters are still there and haven't been inlined!
self.assertLen(functions[0].signature.input_arg, 2)
self.assertLen(functions[1].signature.input_arg, 2)
# Verify that eval() reads the current values.
a.initializer.run()
numpy.testing.assert_equal(r1.eval(), [3.])
numpy.testing.assert_equal(r2.eval(), [3., 3.])
def testImplementsWorksWithTensorSpec(self):
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
v = v.get_concrete_function(
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32))
x = v(1., 2.)
self.assertEqual(x.numpy(), 3.)
def testImplementsAttributeAsNameAttrList(self):
implements_attr = (
'name: "embedding_matmul" attr { key: "key1" value { i: 2 } '
'} attr { key: "key2" value { b: false } }')
v = def_function.function(
experimental_implements=implements_attr)(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(function.IMPLEMENTS_ATTRIBUTE_NAME, f.attr, f)
else:
present += 1
attr_value = f.attr[function.IMPLEMENTS_ATTRIBUTE_NAME]
self.assertIsNotNone(attr_value.func, f)
self.assertEqual(attr_value.func.name, 'embedding_matmul')
name_attrs = attr_value.func.attr
self.assertLen(name_attrs, 2)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testExternalControlDependency(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
@function.defun
def f():
with ops.control_dependencies([op]):
return 1.0
self.evaluate(f())
self.assertAllEqual(self.evaluate(v), 2.0)
def testInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(constant_op.constant([1.0]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
def testInputShapeRelaxationOnInstanceMethod(self):
# Test that experimental_relax_shapes is passed during
# instance method bounding.
unknown_dim = [False]
class Foo(object):
@def_function.function(experimental_relax_shapes=True)
def func(self, a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
foo = Foo()
foo.func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
foo.func(constant_op.constant([1.0]))
self.assertFalse(unknown_dim[0])
foo.func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
def testInputShapeFunctionRelaxationWithRaggedTensors(self):
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
check_trace( # Initial call gets traced.
ragged_factory_ops.constant([[1], [2, 3, 4]]),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32))
check_trace( # Input TypeSpec is the same -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4]]), None)
check_trace( # Even if component tensor shapes change -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4, 5, 6]]), None)
check_trace( # Different TypeSpec shape (nrows): retrace
ragged_factory_ops.constant([[1], [2], [3]]),
ragged_tensor.RaggedTensorSpec([3, None], dtypes.int32))
check_trace( # Different nrows again: relax & retrace
ragged_factory_ops.constant([[1], [2], [3], [4]]),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32))
check_trace( # Different nrows yet again: not retrace
ragged_factory_ops.constant([[1]]), None)
check_trace( # Different ragged_rank: retrace
ragged_factory_ops.constant([[[1]]]),
ragged_tensor.RaggedTensorSpec([1, None, None], dtypes.int32))
check_trace( # Different ragged_rank again: retrace & relax
ragged_factory_ops.constant([[[1]], [[2]]]),
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32))
def testInputShapeFunctionRelaxationWithStructuredTensors(self):
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
# If we have TypeSpecs that differ in ways other than just their shape,
# then retrace each time.
check_trace(
structured_tensor.StructuredTensor.from_pyval({'a': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((1,), dtypes.int32)}))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'b': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'b': tensor_spec.TensorSpec((1,), dtypes.int32)}))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'c': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'c': tensor_spec.TensorSpec((1,), dtypes.int32)}))
# But if we call again with only shape different, then do relax:
check_trace( # retrace
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((2,), dtypes.int32)}))
check_trace( # relax & retrace
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((None,), dtypes.int32)}))
check_trace( # use relaxed graph
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3, 4]}),
None)
def testInputShapeFunctionRelaxationWithDatasetIterators(self):
# For dataset iterators, the TypeSpec includes type information that's
# not derivable from the component tensors. Make sure that the TypeSpec
# shapes get relaxed as appropriate.
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
ds_1_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([1, 2]))
ds_2_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 2]))
ds_3_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([3, 2]))
ds_4_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([4, 2]))
ds_2_1 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 1]))
check_trace( # shape=[1, 2]: retrace
dataset_ops.make_one_shot_iterator(ds_1_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([1, 2], dtypes.float32)))
check_trace( # shape=[1, 2]: no retrace (use the [1, 2] graph)
dataset_ops.make_one_shot_iterator(ds_1_2), None)
check_trace( # shape=[2, 2]: retrace
dataset_ops.make_one_shot_iterator(ds_2_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([2, 2], dtypes.float32)))
check_trace( # shape=[3, 2]: relax to [None, 2] and retrace
dataset_ops.make_one_shot_iterator(ds_3_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([None, 2], dtypes.float32)))
check_trace( # shape=[4, 2]: no retrace (use the [None, 2] graph)
dataset_ops.make_one_shot_iterator(ds_4_2), None)
check_trace( # shape=[2, 1]: relax to [None, None] and retrace
dataset_ops.make_one_shot_iterator(ds_2_1),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([None, None], dtypes.float32)))
def testCapturesVariables(self):
a = variables.Variable(1.0, trainable=False)
b = variables.Variable(1.0)
cc = [None]
@def_function.function
def f():
c = cc[0]
if c is None:
c = cc[0] = variables.Variable(1.)
return a + b + c + 1
cf = f.get_concrete_function()
c = cc[0]
captured_variables = {v.ref() for v in (a, b, c)}
trainable_variables = {v.ref() for v in (b, c)}
self.assertEqual({v.ref() for v in cf.variables}, captured_variables)
self.assertEqual({v.ref() for v in cf.trainable_variables},
trainable_variables)
self.assertEqual(cf.variables, cf.graph.variables)
self.assertEqual(cf.trainable_variables, cf.graph.trainable_variables)
def testNestedInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a_, b_=None):
del a_ # Only used to check which cache is used.
self.assertEqual(b_[0]._shape_tuple(), ())
if b_[1]._shape_tuple()[0] is None:
unknown_dim[0] = True
return b_[0] + 1
a = 'hi'
b0 = constant_op.constant(1.0)
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(a, b_=[b0, constant_op.constant([1.0, 1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
unknown_dim[0] = False
# Now do the same except with a new a which is not a tensor; this should
# change the cache key.
a = 'bye'
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
# Since we already marked a cache miss for a function with the same
# non-input signatures, here we will immediately start relaxing shapes.
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
def testNestedShapeFunctionRelaxation(self):
got_shape = [None]
# The inner function will go through shape relaxation because the shapes it
# receives will be [1], [2], [3], ...
@def_function.function(experimental_relax_shapes=True)
def bar(x_shape):
got_shape[0] = x_shape._shape_tuple()
return x_shape
# The outer function will not go through shape relaxation because the shapes
# it receives will be [1], [[1]], [[[1]]], ...
@def_function.function(experimental_relax_shapes=True)
def foo(ones):
return bar(array_ops.shape(ones))
for rank in range(1, 6):
x_shape = self.evaluate(foo(array_ops.ones([1] * rank)))
self.assertAllEqual(x_shape, [1] * rank)
if rank < 3:
self.assertEqual(got_shape[0], (rank,))
else:
self.assertEqual(got_shape[0], (None,))
def testNoHash(self):
@def_function.function()
def f(_):
return 1.0
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r'could not be represented through the generic tracing'):
f(set([]))
def testFuncName(self):
@function.defun_with_attributes(attributes={'func_name': 'multiply'})
def add(x, y):
_ = x * y
return x + y
@function.defun
def add_2(x, y):
_ = x * y
return x + y
self.assertEqual(add._name, 'multiply')
self.assertEqual(add_2._name, 'add_2')
def testBasicGraphMode(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = sq(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedInputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = a_times_b(pair({'a': t}, {'b': t}))
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function()
def pairs_mul(pair_a, pair_b):
return pair(matmul(pair_a.a, pair_b.a), matmul(pair_a.b, pair_b.b))
a = constant_op.constant([[1.0, 2.0], [1.0, 2.0]])
b = constant_op.constant([[3.0, 4.0], [3.0, 4.0]])
out = pairs_mul(pair(a, b), pair(b, a))
expected = pair(math_ops.matmul(a, b).numpy(),
math_ops.matmul(b, a).numpy())
self.assertAllClose(out, expected)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testNestedFunctionGraphNotOutOfDate(self, function_decorator):
@function_decorator
def f():
return constant_op.constant(1.)
class _Model(object):
@function_decorator
def g(self):
self.f = f.get_concrete_function()
model = _Model()
model.g()
concrete = model.f
weak_g_graph = weakref.ref(model.g.get_concrete_function().graph)
self.assertIs(weak_g_graph(), concrete.graph.outer_graph)
weak_g = weakref.ref(model.g)
del model
self.assertIsNone(weak_g())
self.assertIsNone(weak_g_graph())
self.assertIsNotNone(concrete.graph.outer_graph)
self.assertIs(ops.get_default_graph(), concrete.graph.outer_graph)
def testGraphEagerIsolation(self):
@function.defun
def f():
self.v = variables.Variable(1.0)
return self.v.read_value()
self.assertAllEqual(f(), 1.0)
with ops.Graph().as_default():
self.assertEqual(f().shape, ())
def testBasicGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testGetConcreteFunctionThreadSafety(self):
@def_function.function
def sq():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
return math_ops.matmul(t, t)
concrete_functions = []
def thread_func(_):
cf = sq.get_concrete_function()
concrete_functions.append(cf)
num_threads = 100
pool = multiprocessing.pool.ThreadPool(num_threads)
_ = pool.map(thread_func, list(range(num_threads)))
self.assertLen(set(concrete_functions), 1)
def testGetConcreteFunctionThreadSafetyWithArgs(self):
@def_function.function
def add_100(*args):
return math_ops.add_n(args)
p = multiprocessing.pool.ThreadPool(2)
args = (constant_op.constant(1.),) * 100
f1, f2 = p.map(add_100.get_concrete_function, [args] * 2)
# I see about len(args) + max(0, len(args) - 3) arguments expected.
f1(*args)
del f2
def testInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
sq_op = sq.get_concrete_function(
tensor_spec.TensorSpec((None, None), dtypes.float32))
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out1 = sq_op(t1)
self.assertAllEqual(out1, math_ops.matmul(t1, t1).numpy())
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out2 = sq_op(t2)
self.assertAllEqual(out2, math_ops.matmul(t2, t2).numpy())
def testNestedInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(mats):
((a, b),) = mats
return matmul(a, b)
sq_op_autonamed = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32),
tensor_spec.TensorSpec((None, None), dtypes.float32))])
self.assertEqual([None, None], sq_op_autonamed.output_shapes.as_list())
sq_op = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32,
name='first_mat'),
tensor_spec.TensorSpec((None, None), dtypes.float32,
name='second_mat'))])
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.4, 2.4], [3.4, 4.4]])
out = sq_op(first_mat=t1, second_mat=t2)
self.assertAllEqual(out, math_ops.matmul(t1, t2).numpy())
self.assertAllEqual(sq_op_autonamed(t1, t2),
math_ops.matmul(t1, t2).numpy())
def testExecutingStatelessDefunConcurrently(self):
@def_function.function
def stateless(x):
return math_ops.multiply(2.0, x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(1.0 * x) for x in range(100)]
outputs = [float(out) for out in pool.map(stateless, inputs)]
expected = [float(2.0 * x) for x in inputs]
self.assertSequenceEqual(outputs, expected)
def testExecutingManyStatelessDefunsConcurrently(self):
@def_function.function
def stateless(x):
del x
return math_ops.multiply(2.0, 2.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
objects = [object() for _ in range(100)]
outputs = [float(out) for out in pool.map(stateless, objects)]
expected = [4.0] * 100
self.assertSequenceEqual(outputs, expected)
@test_util.disable_tfrt('b/169431085: This test is flaky on tfrt')
def testExecutingStatefulDefunConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
v.assign(x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(0.0)] * 100
pool.map(stateful, inputs)
self.assertEqual(float(v.read_value()), 0.0)
def testExecutingManyStatefulDefunsConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
del x
return v.assign(0.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
pool.map(stateful, [object() for _ in range(100)])
self.assertEqual(float(v.read_value()), 0.0)
def testShareRendezvous(self):
# Disable grappler from inlining the functions. Note we run the send & recv
# in graph mode since with eager mode the function should automatically be
# inlined.
context.context().set_optimizer_experimental_options(
{'disable_meta_optimizer': True})
cpu = '/device:CPU:0'
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
@def_function.function
def send():
x = constant_op.constant(1)
gen_sendrecv_ops.send(x, 'x', cpu, 0, cpu)
return x
send._shared_rendezvous = True # pylint: disable=protected-access
@def_function.function(input_signature=signature)
def send_body(n):
send()
return n - 1
@def_function.function
def recv():
return gen_sendrecv_ops.recv(dtypes.int32, 'x', cpu, 0, cpu)
recv._shared_rendezvous = True # pylint: disable=protected-access
@def_function.function(input_signature=signature)
def recv_body(n):
recv()
return n - 1
@def_function.function(input_signature=signature)
def cond(n):
return n > 0
# Instead of calling the send & recv functions directly we want to call them
# through a functional while to ensure the rendezvous is shared across the
# while boundary.
@def_function.function
def fn(n):
functional_ops.While([n], cond.get_concrete_function(),
send_body.get_concrete_function())
return functional_ops.While([n], cond.get_concrete_function(),
recv_body.get_concrete_function())
# Use a graph context since functions will not be automatically inlined
with context.graph_mode(), self.cached_session():
self.evaluate(fn(2))
def disabled_testRandomSeed(self):
@def_function.function
def f():
return random_ops.random_normal(())
random_seed.set_random_seed(1)
x = f()
self.assertNotEqual(x, f())
random_seed.set_random_seed(1)
self.assertAllEqual(f(), x)
def testNestedInputsGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = a_times_b.get_concrete_function(
pair(dict(a=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'a')),
dict(b=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'b'))))
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(a=t, b=t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return (matmul(a, a), {'b': constant_op.constant(1.0)})
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes,
(tensor_shape.TensorShape([2, 2]),
{'b': tensor_shape.TensorShape([])}))
self.assertEqual(sq_op.output_dtypes,
(dtypes.float32, {'b': dtypes.float32}))
(a, b) = sq_op(t)
self.assertAllEqual(a, math_ops.matmul(t, t).numpy())
self.assertAllEqual(b['b'].numpy(), 1.0)
def testGraphFunctionNoneOutput(self):
@def_function.function
def fn(unused_a, unused_b):
return None
x = constant_op.constant(1)
fn_op = fn.get_concrete_function(x, x)
self.assertEqual(fn_op.output_dtypes, None)
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
def testDefunNumpyArraysConvertedToTensors(self):
def f(x):
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x)
self.assertLen(total_function_cache(defined), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(total_function_cache(defined), 1)
np_ones = numpy.ones([], numpy.float32)
np_zeros = numpy.zeros([], numpy.float32)
tf_ones = array_ops.ones([])
tf_zeros = array_ops.zeros([])
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(np_ones).numpy())
self.assertLen(total_function_cache(defined), 2)
self.assertEqual(0., defined(np_zeros).numpy())
self.assertEqual(1., defined(tf_ones).numpy())
self.assertEqual(0., defined(tf_zeros).numpy())
self.assertLen(total_function_cache(defined), 2)
# Test that mutable inputs are supported.
mutable = numpy.ones([], numpy.float32)
self.assertEqual(1., defined(mutable).numpy())
mutable.fill(0)
self.assertEqual(0., defined(mutable).numpy())
class MyNdarray(numpy.ndarray):
pass
# Test that the subclasses of ndarray are converted too.
self.assertEqual(1., defined(np_ones.view(MyNdarray)).numpy())
self.assertEqual(0., defined(np_zeros.view(MyNdarray)).numpy())
# We should not have triggered any re-tracing of the python function.
self.assertLen(total_function_cache(defined), 2)
def testNumpyDtypeInputSupported(self):
@function.defun
def f(x, dtype):
return constant_op.constant(dtype(x))
self.assertEqual(f(1, numpy.float32).numpy(), numpy.float32(1))
self.assertEqual(f(2, numpy.float32).numpy(), numpy.float32(2))
self.assertEqual(f(1, numpy.int32).numpy(), numpy.int32(1))
self.assertEqual(f(2, numpy.int32).numpy(), numpy.int32(2))
def testDefunNumpyArraysConvertedToTensorsInKwargs(self):
def f(**kwargs):
x = kwargs.pop('x')
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x=x)
self.assertLen(total_function_cache(defined), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x=x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(total_function_cache(defined), 1)
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(x=numpy.ones([])).numpy())
self.assertEqual(0., defined(x=numpy.zeros([])).numpy())
self.assertEqual(1., defined(x=array_ops.ones([])).numpy())
self.assertEqual(0., defined(x=array_ops.zeros([])).numpy())
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@def_function.function
def add_int32s():
return x + x
self.assertEqual(2, int(add_int32s()))
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
return v.read_value()
self.assertEqual(1.0, float(f()))
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
x = constant_op.constant(2.0)
@def_function.function
def test_assign_add():
v.assign_add(x)
return v.read_value()
self.assertEqual(3.0, float(test_assign_add()))
@test_util.run_in_graph_and_eager_modes
def testTensorInitializationInFunctionRaisesError(self):
@def_function.function
def tensor_init():
with self.assertRaisesRegex(ValueError, 'could not be lifted out'):
resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
tensor_init()
@test_util.run_in_graph_and_eager_modes
def testCallableTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
self.v = resource_variable_ops.ResourceVariable(
lambda: constant_op.constant(2.0))
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
@test_util.also_run_as_tf_function
def testInitScopeTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
with ops.init_scope():
const = constant_op.constant(2.0)
# Note: this variable bypasses tf.function's variable creation
# requirements by bypassing variable_creator_scope by using
# ResourceVariable instead of Variable.
self.v = resource_variable_ops.ResourceVariable(const)
return self.v.read_value()
value = tensor_init()
self.assertAllEqual(value, 2.0)
@test_util.run_in_graph_and_eager_modes
def testGetConcreteFunctionCreatesVariables(self):
v_holder = []
@def_function.function
def tensor_init():
if not v_holder:
v_holder.append(variables.Variable(5.))
return v_holder[0].read_value()
concrete = tensor_init.get_concrete_function()
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(5., self.evaluate(concrete()))
self.assertAllEqual(5., self.evaluate(tensor_init()))
def testFuncGraphCaptureByValue(self):
v = variables.Variable(1.0)
def trivial_function():
return v.read_value()
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testFuncGraphCaptureByValueNested(self):
v = variables.Variable(1.0)
def trivial_function():
return control_flow_ops.cond(
array_ops.placeholder_with_default(True, ()),
v.read_value, v.read_value)
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testShapeInferenceForMoreSpecificInput(self):
def f(a):
return array_ops.reshape(a, [-1, 3])
signature = [tensor_spec.TensorSpec(None, dtypes.float32)]
compiled = def_function.function(f, input_signature=signature)
@def_function.function
def use_f():
inputs = array_ops.zeros([10, 10, 3])
self.assertAllEqual(f(inputs).shape, compiled(inputs).shape)
use_f()
def testFuncListAttr(self):
@function.defun
def test_function(val):
def fn1():
return array_ops.ones([10])
fn2 = lambda: array_ops.ones([10]) * 2
def fn3(x=3):
return array_ops.ones([10]) * x
fn4 = functools.partial(fn3, x=4)
fn5 = functools.partial(fn3, 5)
return gen_functional_ops.case(val, [], [dtypes.float32],
[function.defun(f).get_concrete_function()
for f in (fn1, fn2, fn3, fn4, fn5)])
ones = array_ops.ones([10])
self.assertAllEqual([ones], test_function(0))
self.assertAllEqual([ones * 2], test_function(1))
self.assertAllEqual([ones * 3], test_function(2))
self.assertAllEqual([ones * 4], test_function(3))
self.assertAllEqual([ones * 5], test_function(4))
self.assertAllEqual([ones * 5], test_function(22)) # default branch
@test_util.enable_control_flow_v2
def testVariableInLoopInFunction(self):
@function.defun
def test_function():
def loop_test(_):
return False
def loop_body(_):
return variable_scope.get_variable('a', shape=())
return control_flow_ops.while_loop(loop_test, loop_body, [0.0])
self.assertEqual(test_function().shape, [])
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
with context.graph_mode():
v = variables.Variable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# Check that shape inference works while creating the defun
compiled = def_function.function(f)
compiled()
def testDefunShapeInferenceWithCapturedTensorListInGraphMode(self):
with context.graph_mode():
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(1.0))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(2.0))
def f():
tl, value = list_ops.tensor_list_pop_back(
tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
return tl
compiled = def_function.function(f)
output_tensor_list = compiled()
_, value = list_ops.tensor_list_pop_back(
output_tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testDefunForcesResourceVariables(self):
def variable_creator():
self.v = variables.Variable(0.0)
return self.v.read_value()
self.v = None
defined = function.defun(variable_creator)
defined() # Create the variable.
self.assertIsInstance(
self.v, resource_variable_ops.ResourceVariable)
def testRunMetadata(self):
@def_function.function
def f(x):
return x * x
with ops.device('cpu:0'):
context.enable_run_metadata()
f(constant_op.constant(1.0))
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertLen(run_metadata.partition_graphs, 1)
def testGraphModeCaptureVariable(self):
with context.graph_mode(), self.cached_session():
class HasAVar(object):
def __init__(self):
self.v = resource_variable_ops.ResourceVariable(1.0)
def call(self):
return self.v * 2
o = HasAVar()
self.evaluate(variables.global_variables_initializer())
call = def_function.function(o.call)
op = call()
self.assertAllEqual(self.evaluate(op), 2.0)
def testGraphModeManyFunctions(self):
with ops.Graph().as_default(), self.cached_session():
@def_function.function
def f(x):
return x * x
@def_function.function
def g(x):
return f(x) + 1
self.assertAllEqual(g(constant_op.constant(2.0)), 5.0)
def testDict(self):
@def_function.function
def f(x):
return {'name': x + 1}
self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
def testWeakrefInputsRejected(self):
@def_function.function
def f(x):
return x
class Dummy:
pass
o = Dummy()
wr = weakref.ref(o)
with self.assertRaisesRegex(ValueError, 'weakref'):
f(wr)
def testTensorConversionWithDefun(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testTensorConversionCall(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
@def_function.function
def g(x):
return f(f(x))
self.assertAllEqual(8, g(constant_op.constant(2)))
def testCallShape(self):
@def_function.function
def f(x):
return x + 1
@def_function.function
def g(x):
x = f(x)
self.assertEqual(x.shape.as_list(), [])
return None
g(constant_op.constant(1.0))
def testNestedDefunWithNoOutputAndTapedInput(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@def_function.function
def f(x):
# This function intentionally takes a taped variable as input,
# but does not return any values
math_ops.add(x, three)
@def_function.function
def g(x):
y = math_ops.add(x, three)
f(y)
g(three)
def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
defined = def_function.function(sum_gather)
self.assertAllEqual(sum_gather(), defined())
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1', ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor', sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
]) # pyformat: disable
def testReturnCompositeTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f():
return input_ct
output_ct = f()
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
('RaggedTensorRaggedRank1WithSignature',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]},
[ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)]),
('RaggedTensorRaggedRank2WithSignature',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]},
[ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32)]),
('SparseTensorWithSignature',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]},
[sparse_tensor.SparseTensorSpec([None], dtypes.int32)]),
]) # pyformat: disable
def testCompositeAsArgumentTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f(x):
return x
output_ct = f(input_ct)
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
def testTracedCompositeDiscardsShapeInfo(self):
# SparseTensorSpec intentionally excludes info about the number of elements
# that are in a sparse tensor (which is recorded as st.indices.shape[0] and
# st.values.shape[0]). Similarly, RaggedTensorSpec intentionally excludes
# info about the total number of values in a RaggedTensor (stored as
# rt.values.shape[0]). This test checks that the placeholders created by
# tf.function() properly mask this shape info.
@def_function.function
def f(rt, st):
self.assertEqual(st.indices.shape.as_list()[:1], [None])
self.assertEqual(st.values.shape.as_list(), [None])
return (rt, st)
rt = ragged_factory_ops.constant([[1, 2], [3]])
st = sparse_tensor.SparseTensor([[0]], [0], [10])
f(rt, st)
@test_util.run_gpu_only
def testFunctionOnDevice(self):
x = constant_op.constant([1.]).gpu()
f = def_function.function(math_ops.add)
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testFunctionWithResourcesOnDifferentDevices(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, gpu_result
defined = function.defun(sum_gather)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
self.assertAllEqual(expected, self.evaluate(defined()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testOpInFunctionWithConflictingResourceInputs(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='cpu')
v_also_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='also_cpu')
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='gpu')
@def_function.function
def resource_apply_adam():
training_ops.resource_apply_adam(
v_cpu.handle,
v_gpu.handle,
v_also_cpu.handle,
1.0, # beta1_power
1.0, # beta2_power
1.0, # learning_rate
1.0, # beta1
1.0, # beta2
1.0, # epsilon,
[1.0, 1.0, 1.0], # grad
False) # use_locking
return None
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Cannot place the graph because a reference or resource edge connects '
'colocation groups with incompatible assigned devices'):
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(resource_apply_adam())
@test_util.run_gpu_only
def testFunctionHandlesInputsOnDifferentDevices(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = reshape(value, shape).cpu()
self.assertAllEqual(reshaped, [[1], [2]])
@test_util.run_gpu_only
def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1]).gpu()
reshape(value, shape) # No error is raised
def testNoneOutput(self):
@def_function.function
def my_function(_):
return None
self.assertAllEqual(my_function(1), None)
def testNestedFunctions(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@tf_function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@def_function.function
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@def_function.function
def inner_read():
return v.read_value()
@def_function.function
def outer():
return inner_read()
self.assertEqual(1, int(outer()))
def testReturnCapturedEagerTensor(self):
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(read()))
def testReturnCapturedGraphTensor(self):
with context.graph_mode(), self.cached_session():
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(self.evaluate(read())))
def testSequenceInputs(self):
clip_by_global_norm = def_function.function(clip_ops.clip_by_global_norm)
t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]
clipped_list, global_norm = clip_by_global_norm(t_list,
constant_op.constant(.2))
for t in clipped_list:
self.assertIsInstance(t, ops.Tensor)
self.assertIsInstance(global_norm, ops.Tensor)
def testNestedSequenceInputs(self):
def my_op(inputs):
a, b, c = inputs
e, f = b
g, h = e
return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c
my_eager_op = def_function.function(my_op)
ret = my_eager_op([
constant_op.constant(1), [(constant_op.constant(2),
constant_op.constant(3)),
constant_op.constant(4)],
constant_op.constant(5)
])
self.assertLen(ret, 2)
self.assertAllEqual(ret[0][0], 2)
self.assertAllEqual(ret[0][1][0][0], 8)
self.assertAllEqual(ret[0][1][0][1], 4)
self.assertIsInstance(ret[0][1][0], tuple)
self.assertAllEqual(ret[0][1][1], 6)
self.assertAllEqual(ret[0][2], 10)
self.assertAllEqual(ret[1], 15)
def testVariableNamesRespectNameScopesWithDefun(self):
@def_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable(0.0, name='bar')
self.assertEqual(v.name, 'foo/bar:0')
create_variable()
def testVariableNamesRespectNameScopesWithDefunInGraph(self):
with context.graph_mode():
@def_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')
self.assertEqual(v.name, 'foo/bar:0')
with ops.get_default_graph().as_default():
create_variable()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testCallOptionsMemory(self):
@function.defun
def model(x):
return x + constant_op.constant(1.)
# This happens with a lot of option toggles, e.g. soft device placement
context.context().function_call_options = None
model(constant_op.constant(2.))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testLayerInDefun(self):
conv = convolutional.Conv2D(
filters=1,
kernel_size=2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
@function.defun
def model(x):
return conv(x)
x = array_ops.ones([1, 2, 2, 1])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[[[4.0]]]], self.evaluate(y))
# Variable lifting is somewhat different between defun/tf.function, so testing
# device placement on both makes sense.
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
@test_util.run_in_graph_and_eager_modes
def testVariablesPlacedOnOutsideDevice(self, function_decorator):
class _Obj(object):
def __init__(self):
self.v = None
@function_decorator
def f(self):
if self.v is None:
self.v = variables.Variable(1.)
return self.v + 1.
has_device = _Obj()
with ops.device('cpu:0'):
has_device.f()
self.assertIn('CPU', has_device.v.device)
@test_util.run_in_graph_and_eager_modes
def testMultipleDeviceCheck(self):
def f():
with ops.device('cpu'):
return test_ops.device_placement_op()
func = function.defun(f)
with ops.device('cpu:0'):
output = self.evaluate(func())
self.assertIn(compat.as_bytes('CPU:0'), output)
@test_util.run_in_graph_and_eager_modes
def testDeviceAnnotationsRespected(self):
def multi_device_fn():
with ops.device('/cpu:0'):
s0 = test_ops.device_placement_op()
with ops.device('/cpu:1'):
s1 = test_ops.device_placement_op()
with ops.device('/cpu:2'):
s2 = test_ops.device_placement_op()
s3 = test_ops.device_placement_op()
return s0, s1, s2, s3
defined = function.defun(multi_device_fn)
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
with ops.device('/cpu:3'):
outputs = self.evaluate(defined())
# All function definitions are agnostic to call site devices.
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:3'), outputs[3])
with ops.device('/cpu:0'):
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:0'), outputs[3])
@test_util.run_in_graph_and_eager_modes
def testCallingGraphFunctionOnDifferentDevice(self):
def func():
return constant_op.constant(0)
defined = def_function.function(func)
with ops.device('cpu:0'):
cpu_graph_function = defined.get_concrete_function()
with ops.device('cpu:0'):
self.assertEqual(
self.evaluate(cpu_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
with ops.device(None):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
default_graph_function = defined.get_concrete_function()
self.assertEqual(
self.evaluate(default_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(default_graph_function()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testColocateWithRespected(self):
# TODO(b/113291792): Use multiple CPUs instead of a GPU.
with ops.device('cpu:0'):
x = array_ops.identity(1.0)
with ops.device('gpu:0'):
y = array_ops.identity(1.0)
@def_function.function
def foo():
return test_ops.device_placement_op()
with ops.colocate_with(x):
self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))
with ops.colocate_with(y):
self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))
def testVariablesAreTracked(self):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x):
return v * x
defined = def_function.function(foo)
x = constant_op.constant([1.0])
self.assertEqual(1., self.evaluate(defined(x)))
v.assign(2.)
x = constant_op.constant([1.0, 2.0])
self.assertAllEqual([2., 4.], self.evaluate(defined(x)))
def testCacheObjectHashCollisions(self):
class Foo(object):
def __hash__(self):
return 42
def func(foo):
return constant_op.constant([id(foo)])
defined = function.defun(func)
foo_1 = Foo()
defined(foo_1)
self.assertLen(total_function_cache(defined), 1)
foo_2 = Foo()
defined(foo_2)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([[1.0]], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorUnknownShapesCollisionRelaxedShapes(self):
def func(t):
return t + t
with context.graph_mode(), self.cached_session():
defined = function.defun(func, experimental_relax_shapes=True)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[])
defined(p)
self.assertLen(total_function_cache(defined), 1)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
defined(p)
self.assertLen(total_function_cache(defined), 2)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[2])
defined(p)
# Gradual shape relaxation is performed; and the common shape between
# [1] and [2] is one containing unknown dimensions.
self.assertLen(total_function_cache(defined), 2)
# pylint: disable=protected-access
self.assertLen(defined._function_cache.arg_relaxed_specs, 1)
relaxed_specs = (
list(defined._function_cache.arg_relaxed_specs.values())[0])
self.assertLen(relaxed_specs, 1)
relaxed_shape = relaxed_specs[0].shape
# pylint: enable=protected-access
self.assertEqual(relaxed_shape.rank, 1)
self.assertEqual(tensor_shape.dimension_value(relaxed_shape[0]), None)
t = constant_op.constant([1.0, 1.0, 1.0], dtype=dtypes.float32)
defined(t)
# Shape (3,) matches the relaxed shape TensorShape([None])
self.assertLen(total_function_cache(defined), 2)
def testPythonFunctionWithDefaultArgs(self):
def func(foo, bar=1, baz=2):
del foo
del bar
del baz
return
defined = function.defun(func)
defined(0, baz=20)
self.assertLen(total_function_cache(defined), 1)
defined(1) # bar=1, baz=2
self.assertLen(total_function_cache(defined), 2)
# This matches the previous call.
defined(foo=1)
self.assertLen(total_function_cache(defined), 2)
defined(1, 2, 3)
self.assertLen(total_function_cache(defined), 3)
# This matches the previous call.
defined(1, bar=2, baz=3)
self.assertLen(total_function_cache(defined), 3)
# This matches the previous call.
defined(1, baz=3, bar=2)
self.assertLen(total_function_cache(defined), 3)
def testDatasetIteratorCaching(self):
def func(it1, it2):
next(it1)
next(it2)
return 0
defined = function.defun(func)
d = dataset_ops.DatasetV2.from_tensor_slices([1, 2, 3])
it1 = iter(d)
it2 = iter(d)
_ = defined(it1, it2) # The two iterators are different
self.assertLen(total_function_cache(defined), 1)
it3 = iter(d)
it4 = iter(d)
_ = defined(it3, it4) # The two iterators are different, should not retrace
self.assertLen(total_function_cache(defined), 1)
it5 = iter(d)
_ = defined(it5, it5) # The two iterators are the same, should retrace
self.assertLen(total_function_cache(defined), 2)
it6 = iter(d)
_ = defined(it6, it6) # The two iterators are the same, should not retrace
self.assertLen(total_function_cache(defined), 2)
def testFunctoolsPartialUnwrappedCorrectly(self):
def full_function(a, b, c=3):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2)
defined = function.defun(partial)
func_a, func_b, func_c = defined(2)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureWithMatchingInputs(self):
def foo(a):
self.assertEqual(a.shape, (2,))
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2])
self.assertAllEqual(a, defined(a))
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(a, defined.get_concrete_function()(a))
self.assertAllEqual(a, defined.get_concrete_function(a)(a))
self.assertAllEqual(a, defined.get_concrete_function(
tensor_spec.TensorSpec((2,), dtype=dtypes.float32))(a))
self.assertLen(total_function_cache(defined), 1)
def bar(a):
self.assertEqual(a._shape_tuple(), (2, None))
return a
signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)]
defined = function.defun(bar, input_signature=signature)
a = array_ops.ones([2, 1])
out = defined(a)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, a)
# Changing the second dimension shouldn't create a new function.
b = array_ops.ones([2, 3])
out = defined(b)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, b)
def testInputSignatureWithDictInPositionalArgs(self):
@function.defun
def f(*_args, **_kwargs):
return None
f(1, x=2)
self.assertLen(total_function_cache(f), 1)
f(1, x=2)
self.assertLen(total_function_cache(f), 1)
f(1, {'x': 2})
self.assertLen(total_function_cache(f), 2)
def testInputSignatureWithCompatibleInputs(self):
rank2_spec = tensor_spec.TensorSpec(shape=(None, None),
dtype=dtypes.float32)
@function.defun(input_signature=[rank2_spec])
def func(a):
self.assertEqual([None, None], a.shape.as_list())
return array_ops.shape(a)
self.assertAllEqual([3, 1], func([[0], [1.0], [1]]))
self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]])))
with self.assertRaisesRegex(ValueError, 'incompatible'):
func([0.0, 1.0, 2.0]) # Wrong shape.
with self.assertRaisesRegex(ValueError, 'incompatible'):
func([['wrong dtype']])
def testNestedInputSignatures(self):
def expected_foo(a, b):
return [a, b]
@function.defun(input_signature=[
[tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2,
tensor_spec.TensorSpec((1,), dtypes.float32),
])
def foo(a, b):
self.assertEqual(a[0]._shape_tuple(), (2, None))
self.assertEqual(a[1]._shape_tuple(), (2, None))
self.assertEqual(b._shape_tuple(), (1,))
return [a, b]
a = array_ops.ones([2, 1])
b = array_ops.ones([1])
expected = expected_foo([a, a], b)
out = foo([a, a], b)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], a)
self.assertAllEqual(out[1], b)
# Changing the unspecified dimensions shouldn't create a new function.
a = array_ops.ones([2, 3])
b = array_ops.ones([2, 5])
c = array_ops.ones([1])
expected = expected_foo([a, b], c)
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
c = c.numpy().tolist()
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
def testNestedInputSignaturesWithDict(self):
def expected_bar(a):
return a
@function.defun(input_signature=[{
'a': tensor_spec.TensorSpec((2, None), dtypes.float32),
'b': tensor_spec.TensorSpec((2, None), dtypes.float32),
'c': tensor_spec.TensorSpec((1,), dtypes.float32)}])
def bar(a):
self.assertEqual(a['a']._shape_tuple(), (2, None))
self.assertEqual(a['b']._shape_tuple(), (2, None))
self.assertEqual(a['c']._shape_tuple(), (1,))
return a
a = array_ops.ones([2, 3])
b = array_ops.ones([1])
inputs = {'a': a, 'b': a, 'c': b}
expected = expected_bar(inputs)
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
inputs = {'a': a, 'b': a, 'c': b}
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must consist exclusively of `TensorSpec` objects.
signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]
with self.assertRaisesRegex(TypeError, 'input_signature.*nested sequence'):
def_function.function(foo, input_signature=signature)
# Signatures must be either lists or tuples on their outermost levels.
signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}
with self.assertRaisesRegex(
TypeError, 'input_signature must be either a '
'tuple or a list.*'):
function.defun(foo, input_signature=signature)
@test_util.run_in_graph_and_eager_modes
def testInputsIncompatibleWithSignatureRaisesError(self):
def foo(a):
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = def_function.function(foo, input_signature=signature)
# Invalid shapes.
with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([3]))
with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([2, 1]))
# Wrong number of arguments.
with self.assertRaisesRegex(TypeError, 'specifies 1 .* got 2'):
defined(array_ops.ones([2]), array_ops.ones([2]))
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined()
with self.assertRaisesRegex(ValueError,
'inputs incompatible with input_signature'):
defined.get_concrete_function(
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.float32))
def testMismatchedConcreteSignatureRaisesError(self):
@def_function.function
def run_test():
@def_function.function
def f(x):
return x
with self.assertRaisesRegex(
TypeError, 'ConcreteFunction .* was constructed .* but was called'):
f.get_concrete_function(1)(constant_op.constant(1))
with self.assertRaisesRegex(TypeError, r'f\(x\) expected .* but got .*'):
f.get_concrete_function(constant_op.constant(1))(1)
with self.assertRaisesRegex(
TypeError, 'ConcreteFunction .* was constructed .* but was called'):
f.get_concrete_function(1)(2)
run_test()
def testInputsIncompatibleWithNestedSignatureRaisesError(self):
def foo(a, b):
return [a, b]
signature = [[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2,
[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([1])
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined([a, a, a], [a])
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined([a], [a, a, a])
defined([a, a], [a, a])
def testUnderspecifiedInputSignature(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
])
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
x = constant_op.constant(1.0)
with self.assertRaisesRegex(
TypeError, 'got keyword argument `training` '
'that was not included in input_signature'):
foo(x, training=True)
with self.assertRaisesRegex(
TypeError, 'got keyword argument `training` '
'that was not included in input_signature'):
foo(x, training=False)
self.assertAllEqual(x.numpy(), foo(x).numpy())
def testInputSignatureWithPartialFunction(self):
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2.0)
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
defined = function.defun(partial, input_signature=signature)
x = constant_op.constant(2.0)
func_a, func_b, func_c = defined(x)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureConversionWithDefaultArg(self):
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
signature = [
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.bool),
]
defined = def_function.function(foo, input_signature=signature)
a = constant_op.constant(1.0)
self.assertAllEqual(a.numpy(), defined(a))
self.assertAllEqual(a.numpy(), defined(a, training=True))
self.assertAllEqual(-a.numpy(), defined(a, training=False))
def testInputSignatureWithKeywordPositionalArgs(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def foo(flt, integer):
return flt, integer
flt = constant_op.constant(1.0)
integer = constant_op.constant(2, dtypes.int64)
out1, out2 = foo(flt, integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt=flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(integer=integer, flt=flt)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
def testInputSignatureWithKeywordArgs(self):
def foo(a, b, **kwargs):
del kwargs
return a, b
x = function.defun(
foo,
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int32)
]).get_concrete_function()
result = x(constant_op.constant(5.0), constant_op.constant(5))
self.assertAllEqual(result, [5.0, 5])
def testInputSignatureWithCompositeTensors(self):
def f(rt):
self.assertEqual(rt.values.shape.as_list(), [None])
self.assertEqual(rt.row_splits.shape.as_list(), [4])
return rt
signature = [ragged_tensor.RaggedTensorSpec(
shape=[3, None], dtype=dtypes.int32)]
defined = function.defun(f, input_signature=signature)
rt1 = ragged_factory_ops.constant([[1], [], [2, 3, 4]])
out1 = defined(rt1)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out1.values, rt1.values)
self.assertAllEqual(out1.row_splits, rt1.row_splits)
# Changing the row lengths shouldn't create a new function.
rt2 = ragged_factory_ops.constant([[1, 2], [3, 4], [5]])
out2 = defined(rt2)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out2.values, rt2.values)
self.assertAllEqual(out2.row_splits, rt2.row_splits)
# Different number of rows
rt3 = ragged_factory_ops.constant([[1, 2], [3, 4], [5], [6]])
with self.assertRaisesRegex(ValueError, 'incompatible'):
defined(rt3)
# Different dtype
rt4 = ragged_factory_ops.constant([[1.0, 2.0], [], [3.0]])
with self.assertRaisesRegex(ValueError, 'Structure .* does not match'):
defined(rt4)
# Different rank
rt5 = ragged_factory_ops.constant([[[1]], [[2]], [[3]]])
with self.assertRaisesRegex(ValueError, 'does not match'):
defined(rt5)
def testInputSignatureWithVariableArgs(self):
def f(v):
v.assign_add(1)
signature = [
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)
]
defined = function.defun(f, input_signature=signature)
v1 = variables.Variable(0)
v2 = variables.Variable(0)
defined(v1)
self.assertEqual(v1.numpy(), 1)
self.assertEqual(v2.numpy(), 0)
defined(v=v2)
self.assertEqual(v1.numpy(), 1)
self.assertEqual(v2.numpy(), 1)
def testInputSignatureWithKeywordOnlyArgs(self):
def f(a, b, c=3, *, d=4):
self.assertIsInstance(a, ops.Tensor)
self.assertIsInstance(b, ops.Tensor)
self.assertIsInstance(c, int)
self.assertIsInstance(d, (int, ops.Tensor))
return a + b + c + d
signature = [
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
]
defined = function.defun(f, input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 10)
defined = function.defun(
functools.partial(f, c=4), input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
defined = function.defun(
functools.partial(f, d=5), input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
defined = function.defun(
functools.partial(f, d=array_ops.constant(5)),
input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
mod = module.Module()
save(mod, '/tmp/kwonlyf', defined.get_concrete_function(*signature))
loaded = load('/tmp/kwonlyf')
result = loaded.signatures['serving_default'](
a=array_ops.constant(1), b=array_ops.constant(2))
self.assertEqual(result['output_0'].numpy(), 11)
def testInputSignatureWithKeywordOnlyArgsNoDefaults(self):
signature = [
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
]
def test_func(a, *, b):
return a + b
with self.assertRaisesRegex(
ValueError, "keyword-only arguments must have default values.*'b'"):
function.defun(test_func, input_signature=signature)
test_func_lambda = lambda a, *, b: a + b
with self.assertRaisesRegex(
ValueError, "keyword-only arguments must have default values.*'b'"):
function.defun(test_func_lambda, input_signature=signature)
def testTensorKeywordArguments(self):
def foo(a, b):
del a
return b
defined = function.defun(foo)
a = constant_op.constant(2.0)
b = constant_op.constant([1.0, 2.0])
one = defined(a, b)
self.assertLen(total_function_cache(defined), 1)
two = defined(a=a, b=b)
self.assertLen(total_function_cache(defined), 1)
three = defined(b=b, a=a)
self.assertLen(total_function_cache(defined), 1)
four = defined(a, b=b)
self.assertLen(total_function_cache(defined), 1)
# The next call corresponds to a new input signature, hence
# we expect another function to be defined.
five = defined(b, a)
self.assertLen(total_function_cache(defined), 2)
six = defined(a=b, b=a)
self.assertLen(total_function_cache(defined), 2)
seven = defined(b=a, a=b)
self.assertLen(total_function_cache(defined), 2)
self.assertAllEqual(one, [1.0, 2.0])
self.assertAllEqual(two, [1.0, 2.0])
self.assertAllEqual(three, [1.0, 2.0])
self.assertAllEqual(four, [1.0, 2.0])
self.assertAllEqual(five, 2.0)
self.assertAllEqual(six, 2.0)
self.assertAllEqual(seven, 2.0)
def testDefuningInstanceMethod(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
def one(self, tensor):
return tensor
@def_function.function
def two(self, tensor, other=integer):
return self.one(tensor), other
foo = Foo()
t = constant_op.constant(1.0)
one, two = foo.two(t)
self.assertEqual(one.numpy(), 1.0)
self.assertEqual(two.numpy(), 2)
def testDefuningInstanceMethodWithDefaultArgument(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
@def_function.function
def func(self, other=integer):
return other
foo = Foo()
self.assertEqual(foo.func().numpy(), int(integer))
def testPythonCallWithSideEffects(self):
state = []
@def_function.function
def side_effecting_function():
state.append(0)
side_effecting_function()
self.assertAllEqual(state, [0])
# The second invocation should call the graph function, which shouldn't
# trigger the list append.
side_effecting_function()
self.assertAllEqual(state, [0])
# Whereas calling the python function directly should create a side-effect.
side_effecting_function.python_function()
self.assertAllEqual(state, [0, 0])
def testFunctionWithNestedFunctionCallAndSideEffects(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(1.0)
@def_function.function
def add_one(a):
a.assign_add(1.0)
# Grappler will inline calls to `add_one` into the function body, we check
# that all side-effects were executed.
@def_function.function
def side_effecting_function(a, b):
add_one(a)
add_one(b)
return a + b
result = side_effecting_function(v1, v2)
self.assertEqual(result.numpy(), 4.0)
def testFunctionWithExtraAttributes(self):
@function.defun_with_attributes(attributes={'experimental_1': 'value1',
'experimental_2': 2})
def matmul(x, y):
return math_ops.matmul(x, y)
def add(x, y):
return math_ops.add(x, y)
defun_add = function.defun_with_attributes(
add, attributes={'experimental_3': True, 'experimental_4': 1.0})
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t)
double = defun_add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 2)
functions = list(graph._functions.values())
self.assertRegex(functions[0].definition.signature.name, '.*matmul.*')
attrs = functions[0].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_1'].s, b'value1')
self.assertEqual(attrs['experimental_2'].i, 2)
self.assertRegex(functions[1].definition.signature.name, '.*add.*')
attrs = functions[1].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_3'].b, True)
self.assertEqual(attrs['experimental_4'].f, 1.0)
# pylint: enable=protected-access
def testFunctionWithInvalidAttribute(self):
@function.defun_with_attributes(attributes={'experimental_1': ['value1']})
def add(x, y):
return math_ops.add(x, y)
with self.assertRaisesRegex(ValueError,
'Attribute experimental_1 must be .* Got .*'):
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
add(t, t)
def testRegisterFunction(self):
@function.defun
def add(x, y):
return math_ops.add(x, y)
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
function.register(add, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*matmul.*',
'.*forward.*matmul.*',
'.*inference.*backward.*matmul.*',
'.*inference.*add.*',
'.*forward.*add.*',
'.*inference.*backward.*add.*',
]
for i in range(len(functions)):
self.assertRegex(captured_function_names[i],
expected_func_name_regex[i])
# Check the forward and backward function has the correct attributes.
self.assertEqual(
functions[1].definition.attr['backward_function_name'].s,
functions[2].name)
self.assertEqual(
functions[2].definition.attr['forward_function_name'].s,
functions[1].name)
self.assertEqual(
functions[4].definition.attr['backward_function_name'].s,
functions[5].name)
self.assertEqual(
functions[5].definition.attr['forward_function_name'].s,
functions[4].name)
sq = defun_matmul(t, t)
double = add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
functions = list(graph._functions.values())
for i in range(len(functions)):
self.assertEqual(captured_function_names[i],
functions[i].definition.signature.name)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testRegisterConcreteFunction(self, function_decorator):
@function_decorator
def py_add(x, y):
return math_ops.add(x, y)
py_add(array_ops.ones([]), array_ops.ones([]))
add = py_add.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@function_decorator
def py_composite(x, y):
return x, add(x, y)
py_composite(array_ops.ones([]), array_ops.ones([]))
composite = py_composite.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
composite.add_to_graph()
composite.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*py_composite.*',
'.*inference.*py_add.*',
'.*forward.*py_composite.*',
'.*forward.*py_add.*',
'.*inference.*backward.*py_composite.*',
'.*inference.*backward.*py_add.*',
]
for expected, found in zip(
expected_func_name_regex,
captured_function_names):
self.assertRegex(found, expected)
composite_t, composite_double = composite(t, t)
double = add(t, t)
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double))
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double))
self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t))
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testEagerCaptures(self, function_decorator):
with context.eager_mode():
large_tensor = array_ops.ones(shape=(256,))
self.assertGreater(256, func_graph._EAGER_CONST_THRESHOLD)
small_tensor = array_ops.ones(shape=(4,))
self.assertLessEqual(4, func_graph._EAGER_CONST_THRESHOLD)
v = resource_variable_ops.ResourceVariable(0.0)
for captured, op_type in [(large_tensor, 'Placeholder'),
(small_tensor, 'Const'), (v, 'Placeholder')]:
@function_decorator
def test_fn():
return captured + 1 # pylint: disable=cell-var-from-loop
g = test_fn.get_concrete_function().graph
internal_captures = g.internal_captures
self.assertLen(internal_captures, 1)
self.assertEqual(internal_captures[0].op.type, op_type)
def testRegisterFunctionWithInputSignature(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(
matmul,
input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
# Test register function with cache, note inputs are ignored.
function.register(defun_matmul)
graph = ops.get_default_graph()
self.assertLen(graph._functions, 3)
def testRegisterFunctionWithCache(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])
function.register(defun_matmul, t, t)
function.register(defun_matmul, t2, t2)
graph = ops.get_default_graph()
# Only one function is registered since the input param are in same type
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
def testCallingFunctionWithDifferentVariables(self):
@function.defun
def foo(v):
v.assign_add(1.0)
return v.read_value()
v = resource_variable_ops.ResourceVariable(0.0)
graph_function = foo.get_concrete_function(v)
self.assertLen(graph_function.inputs, 1)
self.assertEmpty(graph_function.captured_inputs)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(v)), 2.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun
def bar(v):
del v
return constant_op.constant(1.0)
graph_function = bar.get_concrete_function(v)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(w)), 1.0)
def testCallingFunctionWithNonTensorsFails(self):
@function.defun
def foo(x):
return x
graph_function = foo.get_concrete_function(constant_op.constant(1.0))
with self.assertRaises((TypeError, ValueError)):
graph_function('Not a Tensor.')
def testSwapImplementationWithGrapplerPlugin(self):
# Set the min_graph_nodes to -1 since the graph in this test is too small,
# and will be ignored by grappler if don't set this.
rewrites = rewriter_config_pb2.RewriterConfig()
rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
rewrites.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrites, build_cost_model=1)
config_proto = config_pb2.ConfigProto(graph_options=graph_options)
with context.graph_mode(), self.cached_session(
config=config_proto, graph=ops.Graph(), use_gpu=True):
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'CPU'
})
def cpu_boost(x):
return math_ops.add(x, 2.0)
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'GPU'
})
def gpu_boost(x):
return math_ops.add(x, 4.0)
x = constant_op.constant(1.0)
function.register(cpu_boost, x)
y = gpu_boost(x)
y_value = self.evaluate(y)
if test.is_gpu_available():
self.assertEqual(y_value, 5.0)
else:
# Grappler fallback to use the CPU impl even called with GPU function.
self.assertEqual(y_value, 3.0)
@test_util.disable_tfrt('b/174712583: TFRT doesn\'t support behavior '
'equivalent to implementation_selector for function')
def testSwapImplementationInEager(self):
if not context.executing_eagerly():
self.skipTest('eager only')
# testSharedRendezvous sets the disable_meta_optimizer flag to True
# if that subtest runs before this one, then having that set to True
# will cause this subtest to fail. To avoid that scenario, explicitly
# set the disable_meta_optimizer flag to false here
context.context().set_optimizer_experimental_options({
'min_graph_nodes': -1,
'implementation_selector': True,
'disable_meta_optimizer': False
})
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'CPU'})
def on_cpu(x):
return x + 2
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'GPU'})
def on_gpu(x):
return x + 4
@function.defun
def run_on_cpu(t):
function.register(on_cpu, t)
with ops.device('CPU:0'):
return on_gpu(t)
# Expect to run the on_cpu branch, regardless whether gpu is available.
self.assertEqual(run_on_cpu(constant_op.constant(1)).numpy(), 3)
def testDefunFunctionSeparateGraphs(self):
with context.graph_mode():
@function.defun
def add(x):
return x + 5
@function.defun
def maybe_add(x, should_add):
if should_add:
return add(x)
else:
return x
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 1)
self.assertLen(total_function_cache(add), 1)
maybe_add(x, False)
self.assertLen(total_function_cache(maybe_add), 2)
self.assertLen(total_function_cache(add), 1)
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 3)
self.assertLen(total_function_cache(add), 2)
def testCacheKeyOverlappingShapes(self):
@function.defun
def defined(t):
return t
defined(array_ops.zeros([12, 1]))
self.assertLen(total_function_cache(defined), 1)
defined(array_ops.zeros([1, 21]))
self.assertLen(total_function_cache(defined), 2)
@function.defun
def defined_again(t):
return defined(t)
defined_again.get_concrete_function(array_ops.zeros([12, 1]))
self.assertLen(total_function_cache(defined_again), 1)
defined_again.get_concrete_function(array_ops.zeros([1, 21]))
self.assertLen(total_function_cache(defined_again), 2)
def testCacheTensorSpecIdenticalToTensor(self):
@function.defun
def defined(t):
return t
z = array_ops.zeros([2, 2])
z_spec = tensor_spec.TensorSpec.from_tensor(z)
self.assertIs(
defined.get_concrete_function(z_spec), defined.get_concrete_function(z))
def testCacheKeyNestedLists(self):
@function.defun
def defined(l):
return l
a = constant_op.constant(1.)
b = constant_op.constant(2.)
c = constant_op.constant(3.)
defined([[a], b, c])
self.assertLen(total_function_cache(defined), 1)
defined([[a, b], c])
self.assertLen(total_function_cache(defined), 2)
def testCacheKeyAttrsClass(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class TestClass(object):
a = attr.ib()
b = attr.ib()
@function.defun
def defined(l):
return l
defined(
TestClass(
constant_op.constant(1.),
[constant_op.constant(2.),
constant_op.constant(3.)]))
self.assertLen(total_function_cache(defined), 1)
defined(
TestClass(
constant_op.constant(1.),
[constant_op.constant(2.),
constant_op.constant(3.)]))
self.assertLen(total_function_cache(defined), 1)
defined(
TestClass([constant_op.constant(1.),
constant_op.constant(2.)], constant_op.constant(3.)))
self.assertLen(total_function_cache(defined), 2)
def testDistinctVariablesNoRetracing(self):
@function.defun
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
# We generate cache keys based on unique combinations of resource ids.
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
# Re-arranging arguments should not cause cache miss
# because the three inputs are still distinct
defined(z, y, x)
self.assertLen(total_function_cache(defined), 1)
def testRetracingOnDifferentVaribleCombinationPatterns(self):
@function.defun
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
# Retracing because the first two arguments are the same
defined(x, x, z)
self.assertLen(total_function_cache(defined), 2)
# Replacing x with y does not cause cache miss
# because the combination stays the same as (x, x, z)
defined(y, y, z)
self.assertLen(total_function_cache(defined), 2)
# A different combination pattern causes cache miss
defined(z, y, y)
self.assertLen(total_function_cache(defined), 3)
defined(z, y, y)
self.assertLen(total_function_cache(defined), 3)
def testDeepcopyVariableNoRetracing(self):
@function.defun
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
x_copy = copy.deepcopy(x)
defined(x_copy, y, z)
self.assertLen(total_function_cache(defined), 1)
def _total_function_cache_def_func(self, defined):
return defined._list_all_concrete_functions() # pylint: disable=protected-access
def testVariableRetracingOnDtypeChanges(self):
@def_function.function
def defined(a, b):
return a + b
x1 = resource_variable_ops.ResourceVariable(0.0)
x2 = resource_variable_ops.ResourceVariable(0.0)
defined(x1, x2)
self.assertLen(self._total_function_cache_def_func(defined), 1)
# Should expect retracing for new dtypes
y1 = resource_variable_ops.ResourceVariable(0)
y2 = resource_variable_ops.ResourceVariable(1)
defined(y1, y2)
self.assertLen(self._total_function_cache_def_func(defined), 2)
def testVariableRetracingDtypeShape(self):
@def_function.function
def defined(a, b):
return a + b
x1 = resource_variable_ops.ResourceVariable(0.0)
x2 = resource_variable_ops.ResourceVariable(0.0)
defined(x1, x2)
self.assertLen(self._total_function_cache_def_func(defined), 1)
y1 = resource_variable_ops.ResourceVariable([0.0, 1.0])
y2 = resource_variable_ops.ResourceVariable([0.0, 1.0])
defined(y1, y2)
self.assertLen(self._total_function_cache_def_func(defined), 2)
z1 = resource_variable_ops.ResourceVariable([[0.0, 1.0]])
z2 = resource_variable_ops.ResourceVariable([[0.0, 1.0]])
defined(z1, z2)
self.assertLen(self._total_function_cache_def_func(defined), 3)
def testDecoratedMethodInspect(self):
class DefunnedMiniModel(object):
@function.defun
def call(self, inputs, training=True):
pass
m = DefunnedMiniModel()
fullargspec = tf_inspect.getfullargspec(m.call)
self.assertIn('training', fullargspec.args)
def testFunctionModifiesInputList(self):
# Tests on `list` methods that do in place modification, except `list.sort`
# since it cannot even be "defunned" in the first place
def get_list():
return [constant_op.constant(0.), constant_op.constant(1.)]
expected_msg = '.*() should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def append(l):
l.append(constant_op.constant(0.))
append(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def extend(l):
l.extend([constant_op.constant(0.)])
extend(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def insert(l):
l.insert(0, constant_op.constant(0.))
insert(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def pop(l):
l.pop()
pop(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def reverse(l):
l.reverse()
reverse(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def remove(l):
l.remove(l[0])
remove(get_list())
# `list.clear` is a method that is in Py3 but not Py2
if sys.version.startswith('3'):
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def clear(l):
l.clear()
clear(get_list())
# One last test for keyword arguments
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def kwdappend(**kwargs):
l = kwargs['l']
l.append(constant_op.constant(0.))
kwdappend(l=get_list())
def testFunctionModifiesInputDict(self):
def get_dict():
return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}
expected_msg = '.* should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def clear(m):
m.clear()
clear(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def pop(m):
m.pop('t1')
pop(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def popitem(m):
m.popitem()
popitem(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def update(m):
m.update({'t1': constant_op.constant(3.)})
update(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def setdefault(m):
m.setdefault('t3', constant_op.constant(3.))
setdefault(get_dict())
def testFunctionModifiesInputNest(self):
with self.assertRaisesRegex(ValueError, 'modify.* should not modify'):
@def_function.function
def modify(n):
n[0]['t1'].append(constant_op.constant(1.))
nested_input = [{
't1': [constant_op.constant(0.),
constant_op.constant(1.)],
},
constant_op.constant(2.)]
modify(nested_input)
with self.assertRaisesRegex(ValueError,
'modify_same_flat.* should not modify'):
# The flat list doesn't change whereas the true structure changes
@def_function.function
def modify_same_flat(n):
n[0].append(n[1].pop(0))
nested_input = [[constant_op.constant(0.)],
[constant_op.constant(1.),
constant_op.constant(2.)]]
modify_same_flat(nested_input)
@test_util.disable_tfrt('b/173429686')
def testExecutorType(self):
@function.defun
def add_five(x):
return x + 5
self.assertEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
with self.assertRaisesRegex(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):
with context.function_executor_type('NON_EXISTENT_EXECUTOR'):
add_five(constant_op.constant(0, dtype=dtypes.int32))
for executor_type in ('', 'DEFAULT', None):
with context.function_executor_type(executor_type):
self.assertAllEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
@test_util.assert_no_garbage_created
def testReferenceCycles(self):
fn = function.defun(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
def testFunctionStackInErrorMessage(self):
if context.executing_eagerly():
# TODO(b/122736651): Remove this skipTest once fixed.
self.skipTest('Error interpolation is not working when function is '
'invoked without PartitionedCallOp.')
@def_function.function()
def fn3(x):
return x + 2
@def_function.function()
def fn2(x):
check_ops.assert_equal(fn3(x), 3)
return 2
@def_function.function()
def fn(x):
return fn2(x)
with self.assertRaises(errors.InvalidArgumentError) as cm:
fn(2)
e = cm.exception
self.assertIn('fn -> fn2', e.message)
self.assertIn('node assert_equal/Assert/Assert (defined at', e.message)
self.assertNotIn('fn3', e.message)
@test_util.run_gpu_only
def testFunctionIsNotPinned(self):
"""Tests that functions aren't pinned to the CPU by the eager runtime."""
seed1, seed2 = 79, 25
shape = constant_op.constant([4, 7])
dtype = dtypes.float32
@def_function.function
def func():
with ops.device('GPU:0'):
return gen_random_ops.random_standard_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
with ops.device('GPU:0'):
x = func()
self.assertRegex(x.device, 'GPU')
@test_util.run_in_graph_and_eager_modes
def testShapeCaching(self):
@function.defun
def func(x):
return array_ops.shape(x)
@function.defun(
input_signature=[tensor_spec.TensorSpec([None, None], dtypes.float32)])
def calls_func(x):
return func(x)
self.assertAllEqual([1, 1], self.evaluate(func(array_ops.zeros([1, 1]))))
self.assertAllEqual([2, 2], self.evaluate(func(array_ops.zeros([2, 2]))))
self.assertAllEqual(
[3, 3],
self.evaluate(calls_func(array_ops.zeros([3, 3]))))
def testLimitedRetracing(self):
trace_count = [0]
@function.defun
def func(x):
trace_count[0] += 1
return x
for _ in range(50):
func(constant_op.constant(3.))
func(constant_op.constant(4.))
func(constant_op.constant([[1., 2.]]))
func(constant_op.constant([[]]))
func(constant_op.constant([[3., 4.], [5., 6.]]))
func(constant_op.constant([[3., 4.], [5., 6.], [7., 8.]]))
# Tracing more than twice per input doesn't make sense.
self.assertLess(trace_count[0], 13)
def testLimitedRetracingWithCompositeTensors(self):
trace_count = [0]
@def_function.function
def f(x):
trace_count[0] += 1
return x
for i in range(10):
f(ragged_factory_ops.constant([[1, 2], [i]]))
f(ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]]))
f(ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]]))
self.assertEqual(trace_count[0], 3)
def test_concrete_function_shape_mismatch(self):
@def_function.function
def f(argument_name):
return argument_name + 1.
f_concrete = f.get_concrete_function(constant_op.constant([1.]))
# Calling a function from eager doesn't do any shape checking above what
# kernels do while executing.
self.assertAllEqual(
[2., 3.],
f_concrete(constant_op.constant([1., 2.])).numpy())
@def_function.function
def g():
f_concrete(constant_op.constant([1., 2.]))
with self.assertRaisesRegex(ValueError, 'argument_name'):
g()
@test_util.run_in_graph_and_eager_modes
def test_shape_inference_with_symbolic_shapes(self):
@def_function.function
def _uses_symbolic_shapes(w, x, y):
x = array_ops.identity(x, name='name_collision')
x = array_ops.transpose(x, [1, 0, 2])
x_batch = array_ops.shape(x)[0]
y_batch = array_ops.shape(y)[0]
y *= w
n = y_batch // x_batch
return array_ops.reshape(y, [n, x_batch, -1])
conc = _uses_symbolic_shapes.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@def_function.function
def _call_concrete():
c = constant_op.constant(1.)
array_ops.identity(c, name='name_collision')
output1 = conc(array_ops.ones([2]),
array_ops.ones([5, 4, 2]),
array_ops.ones([20, 2]))
self.assertEqual([5, 4, 2], output1.shape)
output2 = conc(array_ops.ones([3]),
array_ops.ones([5, 4, 3]),
array_ops.ones([40, 3]))
self.assertEqual([10, 4, 3], output2.shape)
return output1, output2
output1, output2 = _call_concrete()
self.assertEqual((5, 4, 2), self.evaluate(output1).shape)
self.assertEqual((10, 4, 3), self.evaluate(output2).shape)
def testAutoGraphContext(self):
@def_function.function
def test_fn():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED)
prev_status = ag_ctx.control_status_ctx().status
test_fn()
self.assertEqual(ag_ctx.control_status_ctx().status, prev_status)
@test_util.disable_tfrt('b/170435618')
def testCancelBeforeFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
c_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
cancelable_func()
@test_util.disable_tfrt('b/170435618')
def testCancelBlockedFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
def cancel_thread():
time.sleep(0.5)
c_mgr.start_cancel()
t = self.checkedThread(cancel_thread)
t.start()
with self.assertRaises(errors.CancelledError):
cancelable_func()
t.join()
@test_util.disable_tfrt('b/170435618')
def testCancelAfterFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
q.enqueue(37)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
self.assertAllEqual(37, cancelable_func().numpy())
# Cancellation after the function executes is a no-op.
c_mgr.start_cancel()
def testAddFunctionCallback(self):
functions = []
def function_callback(f, name, graph, inputs, outputs):
del name, graph, inputs, outputs
functions.append(f)
@def_function.function
def plus_one(x):
return x + 1
try:
function.add_function_callback(function_callback)
x_float32 = numpy.array(3.0, dtype=numpy.float32)
self.assertAllClose(plus_one(x_float32), 4.0)
self.assertLen(functions, 1)
# Function is already created. Executing it again should not invoke the
# function callback.
self.assertAllClose(plus_one(x_float32), 4.0)
self.assertLen(functions, 1)
# Signature change leads to a new Function being built.
x_float64 = numpy.array(3.0, dtype=numpy.float64)
self.assertAllClose(plus_one(x_float64), 4.0)
self.assertLen(functions, 2)
finally:
function.clear_function_callbacks()
def testFunctionCallbackAddOps(self):
file_name = os.path.join(self.get_temp_dir(), 'test')
def function_callback(f, name, graph, inputs, outputs):
del f, name, inputs
with graph.as_default():
printer = logging_ops.print_v2(
'hello',
output_stream='file://' + file_name
)
outputs[0].op._add_control_input(printer)
@def_function.function
def plus_one(x):
return x + 1
self.addCleanup(function.clear_function_callbacks)
function.add_function_callback(function_callback)
x_float32 = numpy.array(3.0, dtype=numpy.float32)
self.assertAllClose(plus_one(x_float32), 4.0)
with open(file_name, 'r') as f:
self.assertEqual(f.read().strip(), 'hello')
def testRemoveFunctionCallback(self):
functions_1 = []
def function_callback_1(f, name, graph, inputs, outputs):
del name, graph, inputs, outputs
functions_1.append(f)
functions_2 = []
def function_callback_2(f, name, graph, inputs, outputs):
del name, graph, inputs, outputs
functions_2.append(f)
@def_function.function
def plus_one(x):
return x + 1
try:
function.add_function_callback(function_callback_1)
function.add_function_callback(function_callback_2)
self.assertAllClose(plus_one(numpy.array(3.0, dtype=numpy.float32)), 4.0)
self.assertLen(functions_1, 1)
self.assertLen(functions_2, 1)
function.remove_function_callback(function_callback_1)
# The 1st callback should not be invokved after remove_function_callback()
# is called.
self.assertAllClose(plus_one(numpy.array(3.0, dtype=numpy.float64)), 4.0)
self.assertLen(functions_1, 1)
self.assertLen(functions_2, 2)
finally:
function.clear_function_callbacks()
def testClearFunctionCallbacks(self):
function.add_function_callback(lambda f: None)
function.add_function_callback(lambda f: None)
self.assertLen(function._function_callbacks, 2)
function.clear_function_callbacks()
self.assertEmpty(function._function_callbacks) # pylint:disable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = constant_op.constant(1000)
b = constant_op.constant(200)
c = constant_op.constant(30)
d = {'a': a, 'b': b}
e = (c, 4)
# Test different argument signatures when constructing the concrete func.
for cf in [
f.get_concrete_function(d, e),
f.get_concrete_function(d, y=e),
f.get_concrete_function(y=e, x=d),
f.get_concrete_function(_spec_for_value(d), _spec_for_value(e)),
f.get_concrete_function(_spec_for_value(d), y=_spec_for_value(e)),
f.get_concrete_function(y=_spec_for_value(e), x=_spec_for_value(d))
]:
# Test different calling conventions when calling the concrete func.
for output in [
cf(d, e), # structured signature
cf(d, y=e), # structured signature w/ kwarg
cf(y=e, x=d), # structured signature w/ 2 kwargs
cf(a, b, c), # flat signature
cf(x=a, x_1=b, y=c) # flat signature w/ kwargs
]:
self.assertIsInstance(output, tuple)
self.assertLen(output, 2)
self.assertAllEqual(output[0], 1200)
self.assertAllEqual(output[1], 34)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': constant_op.constant(1000), 'b': constant_op.constant(200)}
b = (50, 3)
for cf in [ # argument y is bound to non-Tensor value (50, 3).
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a), cf(x=a), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 1253)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNonTensorStringInputs(self):
@def_function.function
def f(x, y):
return string_ops.string_join([x, y])
a = constant_op.constant('a')
b = 'b'
cf = f.get_concrete_function(a, b)
for output in [cf(a), cf(x=a), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output, b'ab')
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithBoundNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 3000, 'b': 200, 'c': 9000}
b = (constant_op.constant(30), 4)
for cf in [ # argument x is bound to non-tensor value `a`
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a, b), cf(a, y=b), cf(y=b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 3234)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithAllBoundNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 5000, 'b': 500}
b = (50, 5)
cf = f.get_concrete_function(a, b)
for output in [cf(), cf(a), cf(y=b)]:
self.assertAllEqual(output[0] + output[1], 5555)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionMethodWithVarargs(self):
float32_scalar = tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
class MyModel(module.Module):
@def_function.function(input_signature=[float32_scalar, float32_scalar])
def add(self, *arg):
return math_ops.add(*arg)
m = MyModel()
cf = m.add.get_concrete_function()
cf(-12.0, 3.0)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureKeywordOrder(self):
# Check that keyword-only arguments are sorted appropriately, so that they
# feed the right tensor into each input.
@def_function.function
def g(**kwargs):
return string_ops.reduce_join(
string_ops.reduce_join(
ops.convert_to_tensor(sorted(kwargs.items())),
axis=1,
separator='='),
axis=0,
separator=', ')
s = constant_op.constant('s')
g.get_concrete_function(q=s, a=s, p=s, r=s, v=s, m=s, l=s)
self.assertAllEqual(
g(m='a', r='b', v='c', q='d', l='e', a='f', p='g'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(q='d', a='f', p='g', r='b', v='c', m='a', l='e'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(a='f', l='e', m='a', p='g', q='d', r='b', v='c'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (1, constant_op.constant(2)),
call_args=lambda: (1,),
error=r'func\(x, y\) missing required arguments: y'),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (1, 2, constant_op.constant(1.0)),
call_args=lambda: (1, 2),
error=r'func\(x, y, <arg3>\) missing required arguments: <arg3>'),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2, 3),
error=r'func\(x, y\) takes 2 .* got 3'),
dict(
testcase_name='MissingKeywordOnlyArg',
conc_args=lambda: (1, 2),
conc_kwargs=lambda: {'c': constant_op.constant(1.0)},
call_args=lambda: (1, 2),
error=r'func\(x, y, \*, c\) missing required arguments: c'),
dict(
testcase_name='ExtraKeywordArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'c': constant_op.constant(1.0)},
error=r'func\(x, y\) got unexpected keyword arguments: c'),
dict(
testcase_name='ExpectedRaggedGotNest',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: ({
'a': constant_op.constant([1, 2, 3])
},),
error=r'func\(x, y\): argument x had incorrect type\n'
r' expected: RaggedTensor\n'
r" got: {'a': (Eager)?Tensor}"),
dict(
testcase_name='WrongRaggedRank',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: (ragged_factory_ops.constant([[[1]]]),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='WrongRaggedDType',
conc_args=lambda: (ragged_factory_ops.constant([[1]]),),
call_args=lambda: (ragged_factory_ops.constant([[1.0]]),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='ExpectedDictGotTensor',
conc_args=lambda: ({
'a': constant_op.constant(1),
'b': constant_op.constant(1)
},),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='ExpectedTupleGotTensor',
conc_args=lambda:
((constant_op.constant(1), constant_op.constant(2)),),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0),),
exception=(ValueError, errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError)),
dict(
testcase_name='ExpectedTensorGotInt',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (5,),
error=r'func\(x, y\) expected a Tensor in x, but got int value 5'),
dict(
testcase_name='ExpectedIntGotDifferentInt',
conc_args=lambda: (5,),
call_args=lambda: (8,),
error=r'ConcreteFunction func\(x, y\) was constructed with int '
r'value 5 in x, but was called with int value 8'),
dict(
testcase_name='ExpectedIntGotTensor',
conc_args=lambda: (5,),
call_args=lambda: (constant_op.constant(6),),
error=r'ConcreteFunction func\(x, y\) was constructed with int '
'value 5 in x, but was called with (Eager)?Tensor value .*'),
dict(
testcase_name='TwoValuesForArgument',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'x': 3},
error=r"func\(x, y\) got two values for 'x'"),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the structrued signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@def_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\) missing required arguments: y'),
dict(
testcase_name='TwoValuesForArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {
'x': constant_op.constant(1),
'y': constant_op.constant(1)
},
error=r"func\(x, y\) got two values for 'x'"),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
error=r'func\(x, y\) takes 2 .* got 3'),
dict(
testcase_name='UnexpectedKeywordArg',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x\) got unexpected keyword arguments: c'),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, varargs_0\) missing required '
r'arguments: varargs_0'),
dict(
testcase_name='MissingKeywordArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': constant_op.constant(1)},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, c\) missing required arguments: c'),
dict(
testcase_name='ExpectedTensorGotInt',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (5, constant_op.constant(2)),
error=r'func\(x, y\): expected argument #0\(zero-based\) to be '
r'a Tensor; got int \(5\)'),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0),),
exception=(ValueError, errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError)),
dict(
testcase_name='MissingKeywordArgNestPiece',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': ragged_factory_ops.constant([[1]])},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x, y, c, c_1\) missing required arguments: c_1'),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionFlatSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the flat signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@def_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
# Remove _function_spec, to disable the structured signature.
conc._set_function_spec(None) # pylint: disable=protected-access
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionAmbiguousSignature(self):
# When both the flat & structured signatures are applicable, but they
# give different results, we use the structured signature. Note: we expect
# this to be extremely rare.
@def_function.function
def f(x, y):
return x * 10 + y
conc = f.get_concrete_function(
x=tensor_spec.TensorSpec(None, dtypes.int32, name='y'),
y=tensor_spec.TensorSpec(None, dtypes.int32, name='x'))
result = conc(x=constant_op.constant(5), y=constant_op.constant(6))
self.assertAllEqual(result, 56)
def testPrettyPrintedSignature(self):
@def_function.function
def func(x, kangaroo=None, octopus=7):
del octopus, kangaroo
return x
scalar = constant_op.constant(5)
vector = constant_op.constant([10, 10, 20])
ragged = ragged_factory_ops.constant([[10, 20], [40]])
c1 = func.get_concrete_function(scalar, vector)
c1_summary = r'func\(x, kangaroo, octopus=7\)'
c1_details = (r' Args:\n'
r' x: int32 Tensor, shape=\(\)\n'
r' kangaroo: int32 Tensor, shape=\(3,\)\n'
r' Returns:\n'
r' int32 Tensor, shape=\(\)')
self.assertRegex(c1.pretty_printed_signature(verbose=False), c1_summary)
self.assertRegex(
c1.pretty_printed_signature(verbose=True),
c1_summary + '\n' + c1_details)
self.assertRegex(
repr(c1), r'<ConcreteFunction func\(x, kangaroo, octopus=7\) at .*>')
self.assertRegex(
str(c1), 'ConcreteFunction {}\n{}'.format(c1_summary, c1_details))
c2 = func.get_concrete_function(scalar, ragged, 3)
c2_summary = r'func\(x, kangaroo, octopus=3\)'
c2_details = (r' Args:\n'
r' x: int32 Tensor, shape=\(\)\n'
r' kangaroo: RaggedTensorSpec\(.*\)\n'
r' Returns:\n'
r' int32 Tensor, shape=\(\)')
self.assertRegex(c2.pretty_printed_signature(),
c2_summary + '\n' + c2_details)
c3 = func.get_concrete_function({'a': scalar, 'b': [ragged, ragged]})
c3_summary = r'func\(x, kangaroo=None, octopus=7\)'
c3_details = (r' Args:\n'
r" x: {'a': <1>, 'b': \[<2>, <3>\]}\n"
r' <1>: int32 Tensor, shape=\(\)\n'
r' <2>: RaggedTensorSpec\(.*\)\n'
r' <3>: RaggedTensorSpec\(.*\)\n'
r' Returns:\n'
r" {'a': <1>, 'b': \[<2>, <3>\]}\n"
r' <1>: int32 Tensor, shape=\(\)\n'
r' <2>: RaggedTensorSpec\(.*\)\n'
r' <3>: RaggedTensorSpec\(.*\)')
# python 3.5 does not gurantee deterministic iteration of dict contents
# which can lead mismatch on pretty_printed_signature output for "Args"
if sys.version_info >= (3, 6):
self.assertRegex(c3.pretty_printed_signature(),
c3_summary + '\n' + c3_details)
# pylint: disable=keyword-arg-before-vararg
@def_function.function
def func2(x, y=3, *args, **kwargs):
return (x, y, args, kwargs)
c4 = func2.get_concrete_function(scalar, 4, 5, a=scalar)
c4_summary = 'func2(x, y=4, <arg3>=5, *, a)'
self.assertEqual(c4.pretty_printed_signature(verbose=False), c4_summary)
c5 = func2.get_concrete_function(8, vector)
c5_summary = 'func2(x=8, y)'
self.assertEqual(c5.pretty_printed_signature(verbose=False), c5_summary)
def testPrettyPrintedExplicitSignatureWithKeywordArg(self): # b/159639913
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def fn(a, b=1):
return a + b
concrete_fn = fn.get_concrete_function()
self.assertEqual(concrete_fn.pretty_printed_signature(False), 'fn(a)')
self.assertEqual(
concrete_fn.pretty_printed_signature(True), 'fn(a)\n'
' Args:\n'
' a: float32 Tensor, shape=<unknown>\n'
' Returns:\n'
' float32 Tensor, shape=<unknown>')
def testPrettyPrintedSignatureLoadedNamedTuple(self):
Point = collections.namedtuple('Point', ['x', 'y'])
@def_function.function
def fn(b, a): # pylint: disable=unused-argument
return 1.
b = Point(
x=constant_op.constant(1., dtype=dtypes.float32),
y=constant_op.constant(1., dtype=dtypes.float32))
a = Point(
x=constant_op.constant(1, dtype=dtypes.int32),
y=constant_op.constant(1, dtype=dtypes.int32))
mod = module.Module()
f = fn.get_concrete_function(b, a)
save(mod, '/tmp/f', signatures=f)
loaded = load('/tmp/f')
printed = loaded.signatures['serving_default'].pretty_printed_signature()
self.assertIn('a: int32 Tensor, shape=()', printed)
self.assertIn('a_1: int32 Tensor, shape=()', printed)
self.assertIn('b: float32 Tensor, shape=()', printed)
self.assertIn('b_1: float32 Tensor, shape=()', printed)
@test_util.run_in_graph_and_eager_modes
def testIndexedSlicesAsGradientsForConcreteFunctions(self):
@def_function.function
def summing_rnn(inputs):
return math_ops.reduce_sum(inputs, axis=1)
@def_function.function
def gradients(inputs):
with backprop.GradientTape() as tape:
tape.watch(inputs)
hidden = summing_rnn(inputs)
hidden = array_ops.gather(hidden, constant_op.constant([0]))
loss = math_ops.reduce_mean(hidden)
return tape.gradient(loss, inputs)
gradients(constant_op.constant([[[1.0], [2.0]]])) # No error is raised
def testFollowTypeHintsTraceBasic(self):
trace_count = [0]
def func(x: ops.Tensor):
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1) # Initial call gets traced
enabled(2)
enabled(3)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1)
disabled(2) # Retrace
disabled(3) # Retrace
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithArgs(self):
trace_count = [0]
def func(*args: ops.Tensor):
trace_count[0] += 1
return args
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
args = (
'abc',
'def',
) * 20
args2 = (
'def',
'abc',
) * 20
enabled(args)
enabled(args2)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(args)
disabled(args2) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithKwargs(self):
trace_count = [0]
def func(t: ops.Tensor, **kwargs: ops.Tensor):
del kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1, x=1, y=1.0, z='one')
enabled(2, x=2, y=2.0, z='two')
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1, x=1, y=1.0, z='one')
disabled(2, x=2, y=2.0, z='two') # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithMultipleInputTypes(self):
trace_count = [0]
def func(t: ops.Tensor, *args: ops.Tensor, **kwargs: ops.Tensor):
del args, kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1, constant_op.constant(1), 'str', x=4.0)
enabled(2, constant_op.constant(2), 'str2', x=5.0)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1, constant_op.constant(1), 'str', x=4.0)
disabled(2, constant_op.constant(2), 'str2', x=5.0) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithOnlyArgNamed(self):
trace_count = [0]
def func(t: ops.Tensor, i: int = 1, **kwargs): # pylint: disable=bad-whitespace
del i, kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 3, x=4.0, y='str')
enabled(2, 4, x=4.0, y='str') # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithNotAllNamed(self):
trace_count = [0]
def func(x, y: ops.Tensor, z: int):
del y, z
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3)
enabled(1, 20, 3) # No retrace - change in ops.Tensor typed arg
enabled(2, 2, 3) # Retrace - change in untyped arg
enabled(2, 2, 4) # Retrace - change in typed arg
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithOnlyArgsNamed(self):
trace_count = [0]
def func(x, y, *args: ops.Tensor):
del y, args
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 20, 3, 4, 5, 6)
enabled(1, 20, 3, 4, 5, 60) # No retrace - change in *args
enabled(1, 30, 7, 8, 9, 10) # Retrace - change in args
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithOnlyKwargsNamed(self):
trace_count = [0]
def func(x, y, *args, **kwargs: ops.Tensor):
del y, args, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, 5, 6, a=1.0, b=2.0, c=3.0)
enabled(
1, 2, 3, 4, 5, 6, a=1.5, b=2.5,
c=3.5) # No retrace - change in **kwargs
enabled(100, 2, 3, 4, 5, 6, a=1.0, b=2.0, c=3.0) # Retrace - change in args
enabled(
1, 2, 3, 4, 5, 100, a=1.0, b=2.0, c=3.0) # Retrace - change in *args
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithArgsEquals(self):
trace_count = [0]
def func(
x: ops.Tensor = 0, # pylint:disable=bad-whitespace
y: int = 1, # pylint:disable=bad-whitespace
**kwargs: ops.Tensor):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace - change in args
enabled(x=2, y=2, z=4) # No retrace - change in args and **kwargs
enabled(x=2, y=2, z=4, u=5) # Retrace - change in **kwargs
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsWithTensorSpec(self):
def func(x: ops.Tensor, y):
return x + y
v = def_function.function(experimental_follow_type_hints=True)(func)
v = v.get_concrete_function(
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32), 3)
x = v(constant_op.constant(1.), 3)
self.assertEqual(x.numpy(), 4.)
def testFollowTypeHintsTraceWithKwArgsAndNoVarKws(self):
trace_count = [0]
def func(a: int, b: ops.Tensor,
x: ops.Tensor = 0, y: int = 1):
del a, b, y
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(0, 0, x=1, y=2)
enabled(0, 0, x=2, y=2,) # No retrace, since only tensor changed
self.assertEqual(trace_count[0], 1)
# Pass args as keyword args.
enabled(a=0, b=0, x=2, y=2,) # No retrace, args are the same
self.assertEqual(trace_count[0], 1)
enabled(a=1, b=0, x=2, y=2,) # Retrace, since non-tensor arg changed
self.assertEqual(trace_count[0], 2)
enabled(a=1, b=2, x=2, y=2) # No retrace, since only tensor changed
self.assertEqual(trace_count[0], 2)
trace_count[0] = 0
disabled = def_function.function(func, experimental_follow_type_hints=False)
disabled(0, 0, x=1, y=2)
disabled(0, 0, x=2, y=2,) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithArgsEqualsTypedKwargs(self):
trace_count = [0]
def func(x, y, **kwargs: ops.Tensor):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace
enabled(x=1, y=2, z=4) # No retrace
enabled(x=2, y=2, z=4) # Retrace
enabled(x=2, y=2, z=4, u=5) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsEqualsTypedArgs(self):
trace_count = [0]
def func(x: ops.Tensor, y: int, **kwargs):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace
enabled(x=1, y=2, z=4) # Retrace
enabled(x=2, y=2, z=3) # No retrace
enabled(x=2, y=2, z=4, u=5) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithKwOnlyArgsBasic(self):
trace_count = [0]
def func(*, a: ops.Tensor = None, b=1): # pylint: disable=bad-whitespace
del b
trace_count[0] += 1
return a
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(a=1, b=2)
enabled(a=2, b=2) # No retrace
enabled(a=1, b=1) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedArg(self):
trace_count = [0]
def func(arg: ops.Tensor, *args, kwonly, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1000, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedArgs(self):
trace_count = [0]
def func(arg, *args: ops.Tensor, kwonly, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 200, 300, 400, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedKwOnlyArg(self):
trace_count = [0]
def func(arg, *args, kwonly: ops.Tensor, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=500, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedKwargs(self):
trace_count = [0]
def func(arg, *args, kwonly, **kwargs: ops.Tensor):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # No retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=600, kwarg2=700) # No retrace
self.assertEqual(trace_count[0], 4)
def testWithExtraWrapper(self):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@def_function.function
@dummy_tf_decorator
def add(self, x, y, z=1):
if self.var is None:
return x + y + z
foo = Foo()
self.assertEqual(foo.add(2, 3).numpy(), 6)
@parameterized.parameters([(def_function.function, dummy_tf_decorator),
(dummy_tf_decorator, def_function.function),
(def_function.function, def_function.function)])
def testWithExtraWrapperRedundantArgs(self, decorator1, decorator2):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@decorator1
@decorator2
def add1(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(TypeError, 'got two values'):
foo.add1(2, x=3) # pylint: disable=redundant-keyword-arg,no-value-for-parameter
def testWithExtraWrapperMissingArgs(self):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@def_function.function
@dummy_tf_decorator
def add1(self, x, y):
if self.var is None:
return x + y
@def_function.function
@dummy_tf_decorator
def add2(self, x, y):
if self.var is None:
return x + y
@def_function.function
@def_function.function
def add3(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(
TypeError, 'missing 1 required positional argument: \'y\''):
foo.add1(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'missing 1 required argument: x'):
foo.add1(y=2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(
TypeError, 'missing 1 required positional argument: \'y\''):
foo.add2(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'missing 1 required argument: x'):
foo.add2(y=2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(
TypeError, 'missing 1 required positional argument: \'y\''):
foo.add3(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'missing 1 required argument: x'):
foo.add3(y=2) # pylint: disable=no-value-for-parameter
def testMissingArgsTfFunctionedMethod(self):
class A(object):
def func(self, position_arg1, position_arg2):
return position_arg1, position_arg2
@def_function.function
def decorated_method(self, position_arg1, position_arg2):
return position_arg1, position_arg2
a_instance = A()
tf_method_pos = def_function.function(a_instance.func)
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_method_pos(position_arg2='foo')
# tf.function-decorated instance methods need to be tested because of
# the __get__ method implementation.
tf_func_decorated_method = def_function.function(
a_instance.decorated_method)
tf_func_decorated_method(position_arg1='foo', position_arg2='bar')
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_func_decorated_method(position_arg2='bar')
def testMissingArgsTfFunctionedObject(self):
class A(object):
def __call__(self, position_arg1, position_arg2):
return position_arg1, position_arg2
a_instance = A()
# A tf.function-decorated callable object needs to be tested because of
# the special inspect results.
tf_func_obj = def_function.function(a_instance)
tf_func_obj(position_arg1=1, position_arg2=2)
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_func_obj(position_arg2='bar')
def testMissingArgsTfFunctionedFunctions(self):
def func_pos(position_arg1, position_arg2):
return position_arg1, position_arg2
def func_with_default(position_arg, named_arg=None):
return position_arg, named_arg
def func_pos_3args(position_arg1, position_arg2, position_arg3):
return position_arg1, position_arg2, position_arg3
tf_func_pos = def_function.function(func_pos)
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_func_pos(position_arg2='foo')
tf_func_with_default = def_function.function(func_with_default)
tf_func_with_default(position_arg='bar')
with self.assertRaisesRegex(TypeError,
'.* missing 1 required argument: position_arg'):
tf_func_with_default(named_arg='foo')
tf_func_pos_3args = def_function.function(func_pos_3args)
with self.assertRaisesRegex(
TypeError,
'.* missing required arguments: position_arg1, position_arg3'):
tf_func_pos_3args(position_arg2='foo')
def testShapeInferencePropagateConstNestedStack(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((None, None), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
old_shape = array_ops.shape(x)
new_shape = array_ops.stack([old_shape[0], s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedUnstackStack(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((None, None), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
s0, _ = array_ops.unstack(array_ops.shape(x), axis=0)
new_shape = array_ops.stack([s0, s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedConcat(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function()
def g():
y = f(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
def testShapeInferencePropagateConstDoubleNested(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function()
def g():
y = def_function.function(f)(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
@test_util.run_v2_only
def testControlDependencyAfterInline(self):
v = variables.Variable(0.)
@def_function.function
def assign():
return v.assign(1.)
@def_function.function
def assign_add():
return v.assign_add(1.)
@def_function.function
def f():
check_ops.assert_equal_v2(assign(), 1.)
check_ops.assert_equal_v2(assign_add(), 2.)
# We don't have a way to inspect the inlined graph in Python, so we run it
# multiple times to have more confidence the dependency is correct.
for _ in range(30):
f()
@test_util.run_v2_only
def testReadInFuncWriteOutside(self):
# Run many times since we are testing for a potential race condition.
for _ in range(30):
# pylint: disable=cell-var-from-loop
v = variables.Variable(1.)
@def_function.function
def add_one():
return v + 1.
@def_function.function
def get_v_plus_one():
v_plus_one = add_one()
v.assign_add(2.0)
return v_plus_one
self.assertAllEqual(get_v_plus_one(), 2.0)
def testOpExpandErrorMessage(self):
@def_function.function
def test_fn():
if array_ops.constant(False):
return array_ops.constant(1)
else:
return script_ops.eager_py_func(
func=lambda: array_ops.constant([2.]), inp=(), Tout=dtypes.int32)
error_pattern = re.compile(r'Graph execution error.*func=lambda', re.DOTALL)
with self.assertRaisesRegex(errors.InvalidArgumentError, error_pattern):
test_fn()
class MultiDeviceTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
def testMultiDeviceOutput(self):
"""Tests that functions can produce outputs on multiple devices."""
@function.defun
def func(a, b, transpose_a):
with ops.device('/device:CPU:0'):
m1 = math_ops.matmul(a, b, transpose_a=transpose_a)
with ops.device('/device:GPU:0'):
m2 = math_ops.matmul(a, b, transpose_a=transpose_a)
return m1, m2
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
m1, m2 = func(t, t, transpose_a=True)
self.assertAllEqual(m1.numpy(), [[10, 14], [14, 20]])
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), [[10, 14], [14, 20]])
self.assertRegex(m2.backing_device, 'GPU')
@test_util.run_gpu_only
def testEmptyBody(self):
@function.defun
def func(a, b):
return b, a
with ops.device('/device:CPU:0'):
a = array_ops.identity(3.0)
with ops.device('/device:GPU:0'):
b = array_ops.identity(5.0)
m1, m2 = func(a, b)
self.assertAllEqual(m1.numpy(), 5.0)
self.assertRegex(m1.backing_device, 'GPU')
self.assertAllEqual(m2.numpy(), 3.0)
self.assertRegex(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceInt32(self):
"""Tests that multi-device functions can take and output INT32s.
When an INT32 device tensor is fed into a function, it is copied to CPU
by the eager runtime. The function sees all INT32 inputs on CPU.
We set allocator attribute 'on_host' for INT32 outputs. They can be
partitioned into the GPU component function, but will be allocated on
CPU nevertheless.
There is experimental support for `ints_on_device` in
FunctionLibraryRuntime now. We can try that.
"""
with ops.device('/device:CPU:0'):
int_cpu = constant_op.constant(3, dtype=dtypes.int32)
resource = resource_variable_ops.ResourceVariable(5, dtype=dtypes.int32)
with ops.device('/device:GPU:0'):
int_gpu = constant_op.constant(7, dtype=dtypes.int32)
@function.defun
def func(int_cpu, resource, int_gpu):
with ops.device('/device:CPU:0'):
m1 = int_cpu * resource + int_gpu
with ops.device('/device:GPU:0'):
# This computation will happen on GPU but m2 will be copied to CPU.
m2 = int_gpu * resource + int_cpu + 1
return m1, m2
m1, m2 = func(int_cpu, resource, int_gpu)
self.assertAllEqual(m1.numpy(), 22)
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 39)
self.assertRegex(m2.backing_device, 'CPU')
# flip arguments
m1, m2 = func(int_gpu, resource, int_cpu)
self.assertAllEqual(m1.numpy(), 38)
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 23)
self.assertRegex(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceColocateWith(self):
"""Tests that function's outputs respect colocation constraints."""
@function.defun
def func(a, b):
with ops.colocate_with(a):
ra = 2 * a
with ops.colocate_with(b):
rb = 3 * b
return ra, rb
devices = ['/device:CPU:0', '/device:GPU:0']
for dev1, dev2 in itertools.product(devices, devices):
with ops.device(dev1):
a = array_ops.identity(1.0)
with ops.device(dev2):
b = array_ops.identity(10.0)
ra, rb = func(a, b)
self.assertEqual(ra.numpy(), 2.0)
self.assertRegex(ra.backing_device, dev1)
self.assertEqual(rb.numpy(), 30.0)
self.assertRegex(rb.backing_device, dev2)
@test_util.run_gpu_only
def testMultiDeviceResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
c2 = resource_variable_ops.ResourceVariable(7.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
g2 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * g2
with ops.device('/device:GPU:0'):
result2 = resource2 * c2
return result1, result2
r1, r2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegex(r2.backing_device, 'GPU')
# Call with flipped inputs. Check that we look at resource's
# device and reinstantiates the function when inputs' devices change.
r1, r2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegex(r2.backing_device, 'GPU')
@test_util.run_gpu_only
def testOutputResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * 5
with ops.device('/device:GPU:0'):
result2 = resource2 * 7
return result1, resource1.handle, result2, resource2.handle
r1, res1, r2, res2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegex(r2.backing_device, 'GPU')
def check_handle(handle, expected_value):
self.assertRegex(handle.backing_device, 'CPU')
tensor = gen_resource_variable_ops.read_variable_op(
handle, dtypes.float32)
self.assertEqual(tensor.numpy(), expected_value)
# Check that handles returned from functions are on CPU and an op using
# the resource handle is correctly placed on the device backing the
# resource.
check_handle(res1, 2.0)
check_handle(res2, 3.0)
# Call with flipped inputs to make sure the same the function is
# reinstantiated and eager runtime does not mess up the device assignment
# for ops consuming handles returned from defuns.
r1, res1, r2, res2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegex(r2.backing_device, 'GPU')
check_handle(res1, 3.0)
check_handle(res2, 2.0)
@test_util.run_gpu_only
def testPassResourceThroughNestedFunctionCall(self):
"""Test passing GPU resource to noinline function call placed on CPU.
PartitionedCallOp must not enforce any particular device assignment for the
resource output. Inner function marked as `_nospecialize`, so Grappler would
not prune unused function output.
"""
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun_with_attributes(attributes={
'_noinline': True,
'_nospecialize': True
})
def inner(resource1):
return resource1 * 2, resource1.handle
@function.defun
def outer(resource1):
with ops.device('/device:CPU:0'):
r1, _ = inner(resource1)
return r1
r1 = outer(g1)
self.assertEqual(r1.numpy(), 6.0)
self.assertRegex(r1.backing_device, 'CPU')
@test_util.run_gpu_only
def testReturnResourceFromNestedFunctionCall(self):
"""Test returning GPU resource from noinline function call placed on CPU.
When inferring output devices for the return value, do not set a device for
returns of DT_RESOURCE data type based on the device assignment of the node
that produced that resource. As an example function call placed on CPU can
return resources on GPU.
"""
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun_with_attributes(attributes={
'_noinline': True
})
def inner(resource1):
resource1.assign_add(2.0)
return resource1 * 2, resource1.handle
@function.defun
def outer(resource1):
with ops.device('/device:CPU:0'):
r1, res1 = inner(resource1)
return r1, res1
r1, res1 = outer(g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
def check_handle(handle, expected_value):
self.assertRegex(handle.backing_device, 'CPU')
tensor = gen_resource_variable_ops.read_variable_op(
handle, dtypes.float32)
self.assertEqual(tensor.numpy(), expected_value)
# Check that handles returned from functions are on CPU and an op using
# the resource handle is correctly placed on the device backing the
# resource.
check_handle(res1, 5.0)
@test_util.run_gpu_only
def testComplexInputOutputDevicePattern(self):
"""Tests input/output mapping logic in partitioning."""
with ops.device('/device:CPU:0'):
rc0 = resource_variable_ops.ResourceVariable(2.0)
rc1 = resource_variable_ops.ResourceVariable(3.0)
cc0 = array_ops.identity(5.0)
cc1 = array_ops.identity(7.0)
with ops.device('/device:GPU:0'):
rg0 = resource_variable_ops.ResourceVariable(11.0)
rg1 = resource_variable_ops.ResourceVariable(13.0)
cg0 = array_ops.identity(17.0)
cg1 = array_ops.identity(19.0)
# Make sure tensors are on expected devices.
for tensor in [cc0, cc1]:
self.assertRegex(tensor.backing_device, 'CPU:0')
for tensor in [cg0, cg1]:
self.assertRegex(tensor.backing_device, 'GPU:0')
@function.defun
def func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1):
with ops.device('/device:CPU:0'):
m1 = rc0 * cg0
with ops.device('/device:GPU:0'):
m2 = rg0 * cc0
with ops.device('/device:CPU:0'):
r1 = 1000.0 * m2 + rc1 * cg1
with ops.device('/device:GPU:0'):
r2 = 1000.0 * m1 + rg1 * cc1
return r1, r2, m2, m1
r1, r2, m2, m1 = func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1)
self.assertRegex(m1.backing_device, 'CPU')
self.assertRegex(r1.backing_device, 'CPU')
self.assertRegex(m2.backing_device, 'GPU')
self.assertRegex(r2.backing_device, 'GPU')
self.assertEqual(m1.numpy(), 34.0)
self.assertEqual(r1.numpy(), 55000.0 + 3.0 * 19.0)
self.assertEqual(m2.numpy(), 55.0)
self.assertEqual(r2.numpy(), 34000.0 + 13.0 * 7.0)
@test_util.run_gpu_only
def testArgumentPruning(self):
"""Tests functions taking unnecessary arguments."""
with ops.device('/device:CPU:0'):
c1 = constant_op.constant(5.0)
c2 = constant_op.constant(7.0)
with ops.device('/device:GPU:0'):
g1 = constant_op.constant(11.0)
g2 = constant_op.constant(13.0)
g3 = constant_op.constant(17.0)
@function.defun
def func(g1, g2, c1, g3, c2): # pylint: disable=unused-argument
# arguments g1 and g2 are unused and can be pruned by grappler.
return c1 * g3 * c2
result = func(g1, g2, c1, g3, c2)
self.assertEqual(result.numpy(), 5.0 * 7.0 * 17.0)
def testNestedCallWatchedVariables(self):
v = variables.Variable(4.)
@def_function.function
def f():
return v ** 2.
with backprop.GradientTape() as tape:
f()
self.assertEqual((v,), tape.watched_variables())
@def_function.function
def g():
return f()
with backprop.GradientTape() as tape:
g()
self.assertEqual((v,), tape.watched_variables())
# f() can rely on the variable being read during its trace. g() checks that
# variables from a function which knows about them are recorded on the
# tape. h() tests that functions forward knowledge of variables to callers.
@def_function.function
def h():
return g()
with backprop.GradientTape() as tape:
h()
self.assertEqual((v,), tape.watched_variables())
def testReplaceCaptureWithDeferred(self):
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = constant_op.constant(3.0)
@def_function.function
def fn():
a = x + y
b = a + z
return b
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), 6.0)
value = constant_op.constant(4.0)
def closure():
return value
concrete_fn.replace_capture_with_deferred_capture(
concrete_fn.captured_inputs[1],
closure,
spec=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
placeholder=concrete_fn.inputs[1])
self.assertAllEqual(concrete_fn(), 8.0)
value = constant_op.constant(5.0)
self.assertAllEqual(concrete_fn(), 9.0)
def testRaiseReplaceCaptureWithDeferredTypeSpecMismatch(self):
bool_captured_tensor = constant_op.constant(True)
float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
value = constant_op.constant([2.], dtype=dtypes.float32)
@def_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), [2.])
new_bool_captured_tensor = constant_op.constant(False)
def bool_closure():
return new_bool_captured_tensor
# Test raise if replacing a bool capture with a closure of output type
# float32
new_float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
def float_closure():
return new_float_captured_tensor
with self.assertRaisesRegex(ValueError,
'Attempting to substitute closure with spec*'):
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
float_closure,
spec=tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
# Test replace without a placeholder
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
bool_closure,
spec=tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool))
self.assertAllEqual(concrete_fn(), [5.])
def testConcreteFunctionSetExternalCapture(self):
captured_tensor = constant_op.constant([1.])
value = constant_op.constant([2.])
@def_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
return deferred_tensor + captured_tensor
cf = fn.get_concrete_function()
self.assertLen(cf._captured_inputs, 2)
self.assertEqual(list(map(callable, cf._captured_inputs)), [False, True])
self.assertAllEqual(cf(), [3.])
# Reset capture to a deferred one, reset deferred capture to a capture.
cf.set_external_captures([cf._captured_inputs[1], cf._captured_inputs[0]])
value = constant_op.constant([3.])
self.assertAllEqual(cf(), [4.])
def testGraphReplaceCaptureAndSetExternalCapture(self):
bool_captured_tensor = constant_op.constant(True)
float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
value = constant_op.constant([2.], dtype=dtypes.float32)
@def_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), [2.])
new_bool_captured_tensor = constant_op.constant(False)
def closure():
return new_bool_captured_tensor
concrete_fn.graph.replace_capture_with_deferred_capture(
concrete_fn.captured_inputs[0],
closure,
spec=tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool),
placeholder=concrete_fn.inputs[1])
concrete_fn.set_external_captures([
closure, concrete_fn._captured_inputs[1],
concrete_fn._captured_inputs[2]
])
self.assertAllEqual(concrete_fn(), [5.])
def testDeferredCapture(self):
value = 1.0
@def_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(lazy_capture(2.0), 4.0)
def testNestedDeferredCapture(self):
value = 1.0
@def_function.function
def inner(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
@def_function.function
def outer(x):
return inner(x)
self.assertAllEqual(outer(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(outer(2.0), 4.0)
def testNestedDeferredCaptureInTFWhileLoop(self):
value = 1.
@def_function.function
def inner(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
@def_function.function
def outer():
dummy = constant_op.constant(True)
sums = constant_op.constant(0.)
while dummy:
directives.set_loop_options(
shape_invariants=[(sums, tensor_shape.TensorShape(None))])
sums += inner(2.)
dummy = constant_op.constant(False)
return sums
self.assertAllEqual(outer(), 3.)
value = constant_op.constant(2.)
self.assertAllEqual(outer(), 4.)
value = constant_op.constant(3.)
self.assertAllEqual(outer(), 5.)
def testDeferredCaptureWithKey(self):
value0 = 1.0
value1 = 2.0
@def_function.function
def lazy_capture(x):
w = ops.get_default_graph().capture_call_time_value(
lambda: value0, tensor_spec.TensorSpec(None), key=0)
y = ops.get_default_graph().capture_call_time_value(
lambda: value1, tensor_spec.TensorSpec(None), key=1)
def bad_closure():
raise ValueError('Should not run')
z = ops.get_default_graph().capture_call_time_value(
bad_closure, tensor_spec.TensorSpec(None), key=1)
return x + y + w + z
self.assertAllEqual(lazy_capture(2.0), 7.0)
value0 = 2.0
value1 = 3.0
self.assertAllEqual(lazy_capture(2.0), 10.0)
def testDeferredCaptureTypeError(self):
value = constant_op.constant(1.0)
@def_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(()))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# dtype mismatch
value = constant_op.constant(1)
with self.assertRaisesRegex(ValueError, 'Value .* to a tensor with dtype'):
lazy_capture(2.0)
# shape mismatch
value = constant_op.constant([1.0])
with self.assertRaisesRegex(ValueError, 'Value .* shape'):
lazy_capture(2.0)
def testDeferredCaptureReturnNestWithCompositeTensor(self):
i_s = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
r_t = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]])
s_t = sparse_tensor.SparseTensor(
values=[1, 2, 3], indices=[[0], [8], [10]], dense_shape=[20])
@def_function.function
def lazy_capture():
y = ops.get_default_graph().capture_call_time_value(
lambda: {'i': i_s, 't': (r_t, s_t)},
{'i': indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int32),
't': (ragged_tensor.RaggedTensorSpec([2, None, None], dtypes.int32),
sparse_tensor.SparseTensorSpec([None], dtypes.int32))})
return y['i'], y['t']
i, (r, s) = lazy_capture()
self.assertAllEqual(i_s.values, i.values)
self.assertAllEqual(i_s.indices, i.indices)
self.assertAllEqual(i_s.dense_shape, i.dense_shape)
self.assertAllEqual(r_t, r)
self.assertAllEqual(s_t.indices, s.indices)
self.assertAllEqual(s_t.values, s.values)
self.assertAllEqual(s_t.dense_shape, s.dense_shape)
def testDeferredCaptureCompositeTensorSpecTypeMismatch(self):
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64))
@def_function.function
def lazy_capture():
return ops.get_default_graph().capture_call_time_value(
lambda: value,
indexed_slices.IndexedSlicesSpec(dtype=dtypes.int32))
# Type matches spec.
lazy_capture()
# Extra dense shape component.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
with self.assertRaises(ValueError):
lazy_capture()
# Index dtype mismatch int32 vs. int64.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1]))
with self.assertRaises(ValueError):
lazy_capture()
def testFunctoolsLruCache(self):
self.skipTest(
"b/194845243: inspect.getfullargspec doesn't unwrap Python decorators.")
@def_function.function
@functools.lru_cache(maxsize=2)
def f(a):
return 2 * a
self.assertAllEqual(f(1), array_ops.constant(2))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
the-stack_0_3583 | import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import time
import ot
from scipy import linalg
from scipy import sparse
import gromovWassersteinAveraging as gwa
import spectralGW as sgw
from geodesicVisualization import *
from GromovWassersteinGraphToolkit import *
import json
# Import Graph Partitioning Packages
from infomap import Infomap
# Load the S-GWL code
import DataIO as DataIO
import EvaluationMeasure as Eval
import GromovWassersteinGraphToolkit as GwGt
import pickle
import warnings
# Load modules for network partitioning experiments
from networkx.algorithms.community import greedy_modularity_communities
from networkx.algorithms.community.asyn_fluid import asyn_fluidc
from networkx.algorithms.community.quality import performance, coverage, modularity
from sklearn import metrics
from scipy.cluster.hierarchy import dendrogram, linkage, cut_tree
from scipy.signal import find_peaks
"""
Define some helper functions
"""
def graph_partition_gd2(cost_s, p_s, p_t,idx2node, ot_hyperpara, trans0=None):
"""
** May 19, 2020: Gradient descent version of graph_partition
Achieve a single graph partition via calculating Gromov-Wasserstein discrepancy
between the target graph and proposed one
Args:
cost_s: (n_s, n_s) adjacency matrix of source graph
p_s: (n_s, 1) the distribution of source nodes
p_t: (n_t, 1) the distribution of target nodes
idx2node: a dictionary {key = idx of row in cost, value = name of node}
ot_hyperpara: a dictionary of hyperparameters
Returns:
sub_costs: a dictionary {key: cluster idx,
value: sub cost matrices}
sub_probs: a dictionary {key: cluster idx,
value: sub distribution of nodes}
sub_idx2nodes: a dictionary {key: cluster idx,
value: a dictionary mapping indices to nodes' names
trans: (n_s, n_t) the optimal transport
"""
cost_t = np.diag(p_t[:, 0])
cost_s = np.asarray(cost_s)
# cost_t = 1 / (1 + cost_t)
trans, log = gwa.gromov_wasserstein_asym_fixed_initialization(cost_s, cost_t, p_s.flatten(), p_t.flatten(), trans0)
d_gw = log['gw_dist']
sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(cost_s, trans, p_s, p_t, idx2node)
return sub_costs, sub_probs, sub_idx2nodes, trans, d_gw
def get_partition(coup):
est_idx = np.argmax(coup, axis=1)
num_clusters = np.max(est_idx)
partition = []
for j in range(num_clusters+1):
partition.append(set(np.argwhere(est_idx == j).T[0]))
return partition
"""
Main Experiment
"""
num_trials = 10
num_nodes = 1000
clique_size = 150
p_in = 0.5
ps_out = [0.08, 0.10, 0.12, 0.15]
ot_dict = {'loss_type': 'L2', # the key hyperparameters of GW distance
'ot_method': 'proximal',
'beta': 0.15,
'outer_iteration': 2 * num_nodes, # outer, inner iterations and error bound of optimal transport
'iter_bound': 1e-30,
'inner_iteration': 5,
'sk_bound': 1e-30,
'node_prior': 0.0001,
'max_iter': 1, # iteration and error bound for calcuating barycenter
'cost_bound': 1e-16,
'update_p': False, # optional updates of source distribution
'lr': 0,
'alpha': 0}
# Range to search for optimal number of clusters over
num_clusts = list(range(3,10))
train_times = []
specGW_avg_amis = []
specGW_avg_times = []
GWL_avg_amis = []
GWL_avg_times = []
infoMap_avg_amis = []
infoMap_avg_times = []
for pn in range(len(ps_out)):
print('Starting p_out index = ',pn)
##############################################
# Training specGW
##############################################
G = nx.gaussian_random_partition_graph(n=num_nodes, s=clique_size, v=8,
p_in=p_in, p_out=ps_out[pn], directed=True)
p_s, cost_s, idx2node = DataIO.extract_graph_info(G)
p_s = (p_s + 1) ** 0.01
p_s /= np.sum(p_s)
start = time.time()
t = 10
cost = sgw.directed_heat_kernel(G,t)
modularities = []
for j in num_clusts:
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=j)
sub_costs, sub_probs, sub_idx2nodes, coup, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
partition = get_partition(coup)
modularities.append(modularity(G,partition))
est_num_clust = num_clusts[np.argmax(modularities)]
ts = np.linspace(5,15,10)
modularities = []
for t in ts:
cost = sgw.directed_heat_kernel(G,t)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=est_num_clust)
sub_costs, sub_probs, sub_idx2nodes, coup, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
partition = get_partition(coup)
modularities.append(modularity(G,partition))
est_t_value = ts[np.argmax(modularities)]
end = time.time()
training_time = end - start
train_times.append(training_time)
print('Time to Train:', training_time)
print('Estimated Clusters:', est_num_clust)
print('Estimated t value:', est_t_value)
##############################################
# Main Experiment
##############################################
gwl_amis = []
gwl_times = []
specGW_amis = []
specGW_times = []
infoMap_amis = []
infoMap_times = []
for j in range(num_trials):
# Create Graph
G = nx.gaussian_random_partition_graph(n=num_nodes, s=clique_size, v=5,
p_in=p_in, p_out=ps_out[pn], directed=True)
gt = np.zeros((num_nodes,))
for i in range(len(G.nodes)):
gt[i] = G.nodes[i]['block']
num_partitions = int(np.max(gt) + 1)
p_s, cost_s, idx2node = DataIO.extract_graph_info(G)
p_s = (p_s + 1) ** 0.01
p_s /= np.sum(p_s)
# Run SpecGW
start = time.time()
cost = sgw.directed_heat_kernel(G,est_t_value)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=est_num_clust)
sub_costs, sub_probs, sub_idx2nodes, coup, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
est_idx = np.argmax(coup, axis=1)
ami = metrics.adjusted_mutual_info_score(gt, est_idx, average_method='max')
end = time.time()
specGW_amis.append(ami)
specGW_times.append(end - start)
# print('SpecGW AMI:',ami,' Time:',end -start)
# Run GWL
start = time.time()
sub_costs, sub_probs, sub_idx2nodes = GwGt.recursive_graph_partition(cost_s,
p_s,
idx2node,
ot_dict,
max_node_num=300)
est_idx = np.zeros((num_nodes,))
for n_cluster in range(len(sub_idx2nodes)):
for key in sub_idx2nodes[n_cluster].keys():
idx = sub_idx2nodes[n_cluster][key]
est_idx[idx] = n_cluster
ami = metrics.adjusted_mutual_info_score(gt, est_idx, average_method='max')
end = time.time()
gwl_amis.append(ami)
gwl_times.append(end-start)
# print('GWL AMI:',ami,' Time:',end -start)
# Run InfoMap
start = time.time()
im = Infomap()
for edge in G.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
ami = metrics.adjusted_mutual_info_score(gt, est_idx, average_method='max')
end = time.time()
infoMap_amis.append(ami)
infoMap_times.append(end-start)
# print('InfoMap AMI:',ami,' Time:',end -start)
specGW_avg_amis.append(np.mean(specGW_amis))
specGW_avg_times.append(np.mean(specGW_times))
GWL_avg_amis.append(np.mean(gwl_amis))
GWL_avg_times.append(np.mean(gwl_times))
infoMap_avg_amis.append(np.mean(infoMap_amis))
infoMap_avg_times.append(np.mean(infoMap_times))
print('Average AMIs:')
print('p_out','specGW','GWL','Infomap')
for j in range(len(ps_out)):
print(ps_out[j],np.round(specGW_avg_amis,3)[j],np.round(GWL_avg_amis,3)[j],np.round(infoMap_avg_amis,3)[j])
print('Average times:')
print('p_out','specGW','GWL','Infomap')
for j in range(len(ps_out)):
print(ps_out[j],np.round(specGW_avg_times,2)[j],np.round(GWL_avg_times,2)[j],np.round(infoMap_avg_times,2)[j])
## Store results
ami_p_out = []
ami_specGW = []
ami_GWL = []
ami_Infomap = []
times_p_out = []
times_specGW = []
times_GWL = []
times_Infomap = []
for j in range(len(ps_out)):
ami_p_out.append(ps_out[j])
ami_specGW.append(np.round(specGW_avg_amis,3)[j])
ami_GWL.append(np.round(GWL_avg_amis,3)[j])
ami_Infomap.append(np.round(infoMap_avg_amis,3)[j])
times_p_out.append(ps_out[j])
times_specGW.append(np.round(specGW_avg_times,2)[j])
times_GWL.append(np.round(GWL_avg_times,2)[j])
times_Infomap.append(np.round(infoMap_avg_times,2)[j])
res_ami = {}#pd.DataFrame()
res_ami['p_out'] = ami_p_out
res_ami['specGW'] = ami_specGW
res_ami['GWL'] = ami_GWL
res_ami['Infomap'] = ami_Infomap
res_times = {}#pd.DataFrame()
res_times['p_out'] = times_p_out
res_times['specGW'] = times_specGW
res_times['GWL'] = times_GWL
res_times['Infomap'] = times_Infomap
with open('res_randomGraphPartitioning.txt', 'w') as outfile:
json.dump(['Average AMIs',
res_ami,
'Average times',
res_times], outfile,indent=0) |
the-stack_0_3586 | import gzip
import os
import struct
import tempfile
import unittest
from io import BytesIO, StringIO, TextIOWrapper
from unittest import mock
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.move import file_move_safe
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, UploadedFile,
)
try:
from PIL import Image
except ImportError:
Image = None
else:
from django.core.files import images
class FileTests(unittest.TestCase):
def test_unicode_uploadedfile_name(self):
uf = UploadedFile(name='¿Cómo?', content_type='text')
self.assertIs(type(repr(uf)), str)
def test_unicode_file_name(self):
f = File(None, 'djángö')
self.assertIs(type(repr(f)), str)
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_open_resets_opened_file_to_start_and_returns_context_manager(self):
file = File(BytesIO(b'content'))
file.read()
with file.open() as f:
self.assertEqual(f.read(), b'content')
def test_open_reopens_closed_file_and_returns_context_manager(self):
temporary_file = tempfile.NamedTemporaryFile(delete=False)
file = File(temporary_file)
try:
file.close()
with file.open() as f:
self.assertFalse(f.closed)
finally:
# remove temporary file
os.unlink(file.name)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
def test_file_iteration_windows_newlines(self):
"""
#8149 - File objects with \r\n line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_mac_newlines(self):
"""
#8149 - File objects with \r line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\rtwo\rthree'))
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_mixed_newlines(self):
f = File(BytesIO(b'one\rtwo\nthree\r\nfour'))
self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\ntwo\nthree'))
# Set chunk size to create a boundary after \n:
# b'one\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\n', b'two\n', b'three'])
def test_file_iteration_with_windows_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
# Set chunk size to create a boundary between \r and \n:
# b'one\r\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_mac_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\rtwo\rthree'))
# Set chunk size to create a boundary after \r:
# b'one\r...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_with_text(self):
f = File(StringIO('one\ntwo\nthree'))
self.assertEqual(list(f), ['one\n', 'two\n', 'three'])
def test_readable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.readable())
self.assertFalse(test_file.readable())
def test_writable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.writable())
self.assertFalse(test_file.writable())
with tempfile.TemporaryFile('rb') as temp, File(temp, name='something.txt') as test_file:
self.assertFalse(test_file.writable())
def test_seekable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.seekable())
self.assertFalse(test_file.seekable())
def test_io_wrapper(self):
content = "vive l'été\n"
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
test_file.write(content.encode())
test_file.seek(0)
wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n')
self.assertEqual(wrapper.read(), content)
wrapper.write(content)
wrapper.seek(0)
self.assertEqual(wrapper.read(), content * 2)
test_file = wrapper.detach()
test_file.seek(0)
self.assertEqual(test_file.read(), (content * 2).encode())
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertIsNone(File(BytesIO(b'A file with no name')).name)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class ContentFileTestCase(unittest.TestCase):
def test_content_file_default_name(self):
self.assertIsNone(ContentFile(b"content").name)
def test_content_file_custom_name(self):
"""
The constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
ContentFile can accept both bytes and strings and the retrieved content
is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
self.assertIsInstance(ContentFile("español").read(), str)
def test_open_resets_file_to_start_and_returns_context_manager(self):
file = ContentFile(b'content')
with file.open() as f:
self.assertEqual(f.read(), b'content')
with file.open() as f:
self.assertEqual(f.read(), b'content')
class InMemoryUploadedFileTests(unittest.TestCase):
def test_open_resets_file_to_start_and_returns_context_manager(self):
uf = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
uf.read()
with uf.open() as f:
self.assertEqual(f.read(), '1')
class DimensionClosingBug(unittest.TestCase):
"""
get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper:
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
img_path = os.path.join(os.path.dirname(__file__), "test.png")
with open(img_path, 'rb') as fh:
image = images.ImageFile(fh)
image_pil = Image.open(fh)
size_1 = images.get_image_dimensions(image)
size_2 = images.get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(__file__), "magic.png")
size = images.get_image_dimensions(img_path)
with open(img_path, 'rb') as fh:
self.assertEqual(size, Image.open(fh).size)
@unittest.skipUnless(Image, "Pillow not installed")
class GetImageDimensionsTests(unittest.TestCase):
def test_invalid_image(self):
"""
get_image_dimensions() should return (None, None) for the dimensions of
invalid images (#24441).
brokenimg.png is not a valid image and it has been generated by:
$ echo "123" > brokenimg.png
"""
img_path = os.path.join(os.path.dirname(__file__), "brokenimg.png")
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
def test_valid_image(self):
"""
get_image_dimensions() should catch struct.error while feeding the PIL
Image parser (#24544).
Emulates the Parser feed error. Since the error is raised on every feed
attempt, the resulting image size should be invalid: (None, None).
"""
img_path = os.path.join(os.path.dirname(__file__), "test.png")
with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error):
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
with self.assertRaises(IOError):
file_move_safe(self.file_a, self.file_b, allow_overwrite=False)
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
class SpooledTempTests(unittest.TestCase):
def test_in_memory_spooled_temp(self):
with tempfile.SpooledTemporaryFile() as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
def test_written_spooled_temp(self):
with tempfile.SpooledTemporaryFile(max_size=4) as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
|
the-stack_0_3587 | from commands import commands
from socket import socket
import json
# All these "magic" variables come from reverse engineering the HS100 app, Kasa
# We decompiled it and found their encryption function, then wrote this to try
# to connect and manipulate a HS100, which it does! It appears they use no form
# of authentication or fancy crypto algorithms for encryption
# -85 in the Kasa app, but bytes are unsigned,
# so 256 - 85 = 171
STARTING_BYTE = 171
# 4 hard coded null characters pad each string sent and received.
STARTING_PAD = b"\0\0\0\0"
# Revealed via netcat
PORT = 9999
def encrypt(string):
"""Encrypts a string for transferring to an HS100, they use a simple
autokey cipher padded by 4 null characters
Args:
string: a json string the HS100 should understand
Returns:
bytearray: a bytearray of encrypted bytes using the reversed engineered
autokey cipher
"""
byte = STARTING_BYTE
encrypted = bytearray(STARTING_PAD)
for char in string:
byte = byte ^ ord(char)
encrypted.append(byte)
return encrypted
def decrypt(bytes):
"""Decrypts a bytes sent from an HS100 response
Args:
bytes: the raw bytes sent back from an HS100 to decrypt
Returns:
str: should be a JSON string if a valid command was sent prior to
decyption, but could also be empty string if no response.
Regardless it will now be decrypted
"""
# chop off the beginning with with padded nulls
bytes = bytes[len(STARTING_PAD):]
key = STARTING_BYTE
decrypted = ""
for byte in bytes:
decrypted += chr(key ^ byte)
key = byte
return decrypted
def query(host, command):
"""Simply given a host an a shorthand command alias, runs that command and
returns the response from the HS100
Args:
host: string of the valid hostname that is the location of the HS100
command: string that is a valid command to run, from commands.py
Returns:
str: the returned str from the HS100, empty string means an error
"""
if command not in commands:
# make sure it is valid json
try:
json.loads(command)
command_string = command
except ValueError:
raise Exception(
"Command {} not known and is not valid JSON".format(command)
)
else:
# the command is a shorthand name, so look it up
command_string = commands[command]
tcp = socket()
tcp.connect((host, PORT))
send = encrypt(command_string)
tcp.send(send)
# 4KB of data should be enough for any response
data = tcp.recv(4096)
# we are done with the query, now we need to parse it
tcp.close()
response = decrypt(data)
return response
|
the-stack_0_3589 | # coding: utf-8
"""
Feed API
<p>The <strong>Feed API</strong> lets sellers upload input files, download reports and files including their status, filter reports using URI parameters, and retrieve customer service metrics task details.</p> # noqa: E501
OpenAPI spec version: v1.3.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InventoryFilterCriteria(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'creation_date_range': 'DateRange',
'modified_date_range': 'DateRange',
'listing_format': 'str',
'listing_status': 'str'
}
attribute_map = {
'creation_date_range': 'creationDateRange',
'modified_date_range': 'modifiedDateRange',
'listing_format': 'listingFormat',
'listing_status': 'listingStatus'
}
def __init__(self, creation_date_range=None, modified_date_range=None, listing_format=None, listing_status=None): # noqa: E501
"""InventoryFilterCriteria - a model defined in Swagger""" # noqa: E501
self._creation_date_range = None
self._modified_date_range = None
self._listing_format = None
self._listing_status = None
self.discriminator = None
if creation_date_range is not None:
self.creation_date_range = creation_date_range
if modified_date_range is not None:
self.modified_date_range = modified_date_range
if listing_format is not None:
self.listing_format = listing_format
if listing_status is not None:
self.listing_status = listing_status
@property
def creation_date_range(self):
"""Gets the creation_date_range of this InventoryFilterCriteria. # noqa: E501
:return: The creation_date_range of this InventoryFilterCriteria. # noqa: E501
:rtype: DateRange
"""
return self._creation_date_range
@creation_date_range.setter
def creation_date_range(self, creation_date_range):
"""Sets the creation_date_range of this InventoryFilterCriteria.
:param creation_date_range: The creation_date_range of this InventoryFilterCriteria. # noqa: E501
:type: DateRange
"""
self._creation_date_range = creation_date_range
@property
def modified_date_range(self):
"""Gets the modified_date_range of this InventoryFilterCriteria. # noqa: E501
:return: The modified_date_range of this InventoryFilterCriteria. # noqa: E501
:rtype: DateRange
"""
return self._modified_date_range
@modified_date_range.setter
def modified_date_range(self, modified_date_range):
"""Sets the modified_date_range of this InventoryFilterCriteria.
:param modified_date_range: The modified_date_range of this InventoryFilterCriteria. # noqa: E501
:type: DateRange
"""
self._modified_date_range = modified_date_range
@property
def listing_format(self):
"""Gets the listing_format of this InventoryFilterCriteria. # noqa: E501
The type of buying option for the order. Supports LMS_ACTIVE_INVENTORY_REPORT. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingFormatEnum'>eBay API documentation</a> # noqa: E501
:return: The listing_format of this InventoryFilterCriteria. # noqa: E501
:rtype: str
"""
return self._listing_format
@listing_format.setter
def listing_format(self, listing_format):
"""Sets the listing_format of this InventoryFilterCriteria.
The type of buying option for the order. Supports LMS_ACTIVE_INVENTORY_REPORT. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingFormatEnum'>eBay API documentation</a> # noqa: E501
:param listing_format: The listing_format of this InventoryFilterCriteria. # noqa: E501
:type: str
"""
self._listing_format = listing_format
@property
def listing_status(self):
"""Gets the listing_status of this InventoryFilterCriteria. # noqa: E501
The status of the listing (whether the listing was unsold or is active). The UNSOLD value does not apply to LMS_ACTIVE_INVENTORY_REPORT feed types. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingStatusEnum'>eBay API documentation</a> # noqa: E501
:return: The listing_status of this InventoryFilterCriteria. # noqa: E501
:rtype: str
"""
return self._listing_status
@listing_status.setter
def listing_status(self, listing_status):
"""Sets the listing_status of this InventoryFilterCriteria.
The status of the listing (whether the listing was unsold or is active). The UNSOLD value does not apply to LMS_ACTIVE_INVENTORY_REPORT feed types. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingStatusEnum'>eBay API documentation</a> # noqa: E501
:param listing_status: The listing_status of this InventoryFilterCriteria. # noqa: E501
:type: str
"""
self._listing_status = listing_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InventoryFilterCriteria, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InventoryFilterCriteria):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_3591 | # _*_ coding: utf-8 _*_
"""
parse.py by xianhu
"""
import logging
import multiprocessing
from .base import TPEnum, BaseThread
from ...utilities import CONFIG_ERROR_MESSAGE, check_url_legal, get_dict_buildin
class ParseThread(BaseThread):
"""
class of ParseThread, as the subclass of BaseThread
"""
def __init__(self, name, worker, pool):
"""
constructor
"""
BaseThread.__init__(self, name, worker, pool)
self._pool_multiprocssing = multiprocessing.Pool()
return
def working(self):
"""
procedure of parsing, auto running, and return True
"""
# ----1----
task_list = [self._pool.get_a_task(TPEnum.HTM_PARSE) for _ in range(max(1, self._pool.get_number_dict(TPEnum.HTM_PARSE_NOT)))]
# ----2----
result_list = [self._pool_multiprocssing.apply_async(self._worker.working, args=task) for task in task_list]
for index in range(len(task_list)):
priority, url, keys, deep, content = task_list[index]
parse_state, url_list, item = result_list[index].get(timeout=None)
# ----3----
self._pool.accept_state_from_task(TPEnum.HTM_PARSE, parse_state, (priority, url, keys, deep, content))
# ----4----
if parse_state > 0:
self._pool.update_number_dict(TPEnum.HTM_PARSE_SUCC, +1)
for _url, _keys, _priority in filter(lambda x: check_url_legal(x[0]), url_list):
self._pool.add_a_task(TPEnum.URL_FETCH, (_priority, _url, _keys, deep+1, 0))
if item:
self._pool.add_a_task(TPEnum.ITEM_SAVE, (priority, url, keys, deep, item))
else:
self._pool.update_number_dict(TPEnum.HTM_PARSE_FAIL, +1)
logging.error("%s error: %s, %s", url_list[0], url_list[1], CONFIG_ERROR_MESSAGE % (priority, get_dict_buildin(keys), deep, url))
# ----5----
self._pool.finish_a_task(TPEnum.HTM_PARSE)
# ----6----
return True
|
the-stack_0_3595 | r"""
Genomics operations
"""
import collections
import os
import re
from functools import reduce
from itertools import chain
from operator import add
from typing import Any, Callable, List, Mapping, Optional, Union
import anndata
import networkx as nx
import numpy as np
import pandas as pd
import pybedtools
from pybedtools import BedTool
from pybedtools.cbedtools import Interval
from .check import check_deps
from .graph import compose_multigraph, reachable_vertices
from .typehint import RandomState
from .utils import ConstrainedDataFrame, logged, smart_tqdm, get_rs
class Bed(ConstrainedDataFrame):
r"""
BED format data frame
"""
COLUMNS = pd.Index([
"chrom", "chromStart", "chromEnd", "name", "score",
"strand", "thickStart", "thickEnd", "itemRgb",
"blockCount", "blockSizes", "blockStarts"
])
@classmethod
def rectify(cls, df: pd.DataFrame) -> pd.DataFrame:
df = super(Bed, cls).rectify(df)
COLUMNS = cls.COLUMNS.copy(deep=True)
for item in COLUMNS:
if item in df:
if item in ("chromStart", "chromEnd"):
df[item] = df[item].astype(int)
else:
df[item] = df[item].astype(str)
elif item not in ("chrom", "chromStart", "chromEnd"):
df[item] = "."
else:
raise ValueError(f"Required column {item} is missing!")
return df.loc[:, COLUMNS]
@classmethod
def verify(cls, df: pd.DataFrame) -> None:
super(Bed, cls).verify(df)
if len(df.columns) != len(cls.COLUMNS) or np.any(df.columns != cls.COLUMNS):
raise ValueError("Invalid BED format!")
@classmethod
def read_bed(cls, fname: os.PathLike) -> "Bed":
r"""
Read BED file
Parameters
----------
fname
BED file
Returns
-------
bed
Loaded :class:`Bed` object
"""
COLUMNS = cls.COLUMNS.copy(deep=True)
loaded = pd.read_csv(fname, sep="\t", header=None, comment="#")
loaded.columns = COLUMNS[:loaded.shape[1]]
return cls(loaded)
def write_bed(self, fname: os.PathLike, ncols: Optional[int] = None) -> None:
r"""
Write BED file
Parameters
----------
fname
BED file
ncols
Number of columns to write (by default write all columns)
"""
if ncols and ncols < 3:
raise ValueError("`ncols` must be larger than 3!")
df = self.df.iloc[:, :ncols] if ncols else self
df.to_csv(fname, sep="\t", header=False, index=False)
def to_bedtool(self) -> pybedtools.BedTool:
r"""
Convert to a :class:`pybedtools.BedTool` object
Returns
-------
bedtool
Converted :class:`pybedtools.BedTool` object
"""
return BedTool(Interval(
row["chrom"], row["chromStart"], row["chromEnd"],
name=row["name"], score=row["score"], strand=row["strand"]
) for _, row in self.iterrows())
def nucleotide_content(self, fasta: os.PathLike) -> pd.DataFrame:
r"""
Compute nucleotide content in the BED regions
Parameters
----------
fasta
Genomic sequence file in FASTA format
Returns
-------
nucleotide_stat
Data frame containing nucleotide content statistics for each region
"""
result = self.to_bedtool().nucleotide_content(fi=os.fspath(fasta), s=True) # pylint: disable=unexpected-keyword-arg
result = pd.DataFrame(
np.stack([interval.fields[6:15] for interval in result]),
columns=[
r"%AT", r"%GC",
r"#A", r"#C", r"#G", r"#T", r"#N",
r"#other", r"length"
]
).astype({
r"%AT": float, r"%GC": float,
r"#A": int, r"#C": int, r"#G": int, r"#T": int, r"#N": int,
r"#other": int, r"length": int
})
pybedtools.cleanup()
return result
def strand_specific_start_site(self) -> "Bed":
r"""
Convert to strand-specific start sites of genomic features
Returns
-------
start_site_bed
A new :class:`Bed` object, containing strand-specific start sites
of the current :class:`Bed` object
"""
if set(self["strand"]) != set(["+", "-"]):
raise ValueError("Not all features are strand specific!")
df = pd.DataFrame(self, copy=True)
pos_strand = df.query("strand == '+'").index
neg_strand = df.query("strand == '-'").index
df.loc[pos_strand, "chromEnd"] = df.loc[pos_strand, "chromStart"] + 1
df.loc[neg_strand, "chromStart"] = df.loc[neg_strand, "chromEnd"] - 1
return type(self)(df)
def strand_specific_end_site(self) -> "Bed":
r"""
Convert to strand-specific end sites of genomic features
Returns
-------
end_site_bed
A new :class:`Bed` object, containing strand-specific end sites
of the current :class:`Bed` object
"""
if set(self["strand"]) != set(["+", "-"]):
raise ValueError("Not all features are strand specific!")
df = pd.DataFrame(self, copy=True)
pos_strand = df.query("strand == '+'").index
neg_strand = df.query("strand == '-'").index
df.loc[pos_strand, "chromStart"] = df.loc[pos_strand, "chromEnd"] - 1
df.loc[neg_strand, "chromEnd"] = df.loc[neg_strand, "chromStart"] + 1
return type(self)(df)
def expand(
self, upstream: int, downstream: int,
chr_len: Optional[Mapping[str, int]] = None
) -> "Bed":
r"""
Expand genomic features towards upstream and downstream
Parameters
----------
upstream
Number of bps to expand in the upstream direction
downstream
Number of bps to expand in the downstream direction
chr_len
Length of each chromosome
Returns
-------
expanded_bed
A new :class:`Bed` object, containing expanded features
of the current :class:`Bed` object
Note
----
Starting position < 0 after expansion is always trimmed.
Ending position exceeding chromosome length is trimed only if
``chr_len`` is specified.
"""
if upstream == downstream == 0:
return self
df = pd.DataFrame(self, copy=True)
if upstream == downstream: # symmetric
df["chromStart"] -= upstream
df["chromEnd"] += downstream
else: # asymmetric
if set(df["strand"]) != set(["+", "-"]):
raise ValueError("Not all features are strand specific!")
pos_strand = df.query("strand == '+'").index
neg_strand = df.query("strand == '-'").index
if upstream:
df.loc[pos_strand, "chromStart"] -= upstream
df.loc[neg_strand, "chromEnd"] += upstream
if downstream:
df.loc[pos_strand, "chromEnd"] += downstream
df.loc[neg_strand, "chromStart"] -= downstream
df["chromStart"] = np.maximum(df["chromStart"], 0)
if chr_len:
chr_len = df["chrom"].map(chr_len)
df["chromEnd"] = np.minimum(df["chromEnd"], chr_len)
return type(self)(df)
class Gtf(ConstrainedDataFrame): # gffutils is too slow
r"""
GTF format data frame
"""
COLUMNS = pd.Index([
"seqname", "source", "feature", "start", "end",
"score", "strand", "frame", "attribute"
]) # Additional columns after "attribute" is allowed
@classmethod
def rectify(cls, df: pd.DataFrame) -> pd.DataFrame:
df = super(Gtf, cls).rectify(df)
COLUMNS = cls.COLUMNS.copy(deep=True)
for item in COLUMNS:
if item in df:
if item in ("start", "end"):
df[item] = df[item].astype(int)
else:
df[item] = df[item].astype(str)
elif item not in ("seqname", "start", "end"):
df[item] = "."
else:
raise ValueError(f"Required column {item} is missing!")
return df.sort_index(axis=1, key=cls._column_key)
@classmethod
def _column_key(cls, x: pd.Index) -> np.ndarray:
x = cls.COLUMNS.get_indexer(x)
x[x < 0] = x.max() + 1 # Put additional columns after "attribute"
return x
@classmethod
def verify(cls, df: pd.DataFrame) -> None:
super(Gtf, cls).verify(df)
if len(df.columns) < len(cls.COLUMNS) or \
np.any(df.columns[:len(cls.COLUMNS)] != cls.COLUMNS):
raise ValueError("Invalid GTF format!")
@classmethod
def read_gtf(cls, fname: os.PathLike) -> "Gtf":
r"""
Read GTF file
Parameters
----------
fname
GTF file
Returns
-------
gtf
Loaded :class:`Gtf` object
"""
COLUMNS = cls.COLUMNS.copy(deep=True)
loaded = pd.read_csv(fname, sep="\t", header=None, comment="#")
loaded.columns = COLUMNS[:loaded.shape[1]]
return cls(loaded)
def split_attribute(self) -> "Gtf":
r"""
Extract all attributes from the "attribute" column
and append them to existing columns
Returns
-------
splitted
Gtf with splitted attribute columns appended
"""
pattern = re.compile(r'([^\s]+) "([^"]+)";')
splitted = pd.DataFrame.from_records(np.vectorize(lambda x: {
key: val for key, val in pattern.findall(x)
})(self["attribute"]), index=self.index)
if set(self.COLUMNS).intersection(splitted.columns):
self.logger.warning(
"Splitted attribute names overlap standard GTF fields! "
"The standard fields are overwritten!"
)
return self.assign(**splitted)
def to_bed(self, name: Optional[str] = None) -> Bed:
r"""
Convert GTF to BED format
Parameters
----------
name
Specify a column to be converted to the "name" column in bed format,
otherwise the "name" column would be filled with "."
Returns
-------
bed
Converted :class:`Bed` object
"""
bed_df = pd.DataFrame(self, copy=True).loc[
:, ("seqname", "start", "end", "score", "strand")
]
bed_df.insert(3, "name", np.repeat(
".", len(bed_df)
) if name is None else self[name])
bed_df["start"] -= 1 # Convert to zero-based
bed_df.columns = (
"chrom", "chromStart", "chromEnd", "name", "score", "strand"
)
return Bed(bed_df)
def interval_dist(x: Interval, y: Interval) -> int:
r"""
Compute distance and relative position between two bed intervals
Parameters
----------
x
First interval
y
Second interval
Returns
-------
dist
Signed distance between ``x`` and ``y``
"""
if x.chrom != y.chrom:
return np.inf * (-1 if x.chrom < y.chrom else 1)
if x.start < y.stop and y.start < x.stop:
return 0
if x.stop <= y.start:
return x.stop - y.start - 1
if y.stop <= x.start:
return x.start - y.stop + 1
def window_graph(
left: Union[Bed, str], right: Union[Bed, str], window_size: int,
left_sorted: bool = False, right_sorted: bool = False,
attr_fn: Optional[Callable[[Interval, Interval, float], Mapping[str, Any]]] = None
) -> nx.MultiDiGraph:
r"""
Construct a window graph between two sets of genomic features, where
features pairs within a window size are connected.
Parameters
----------
left
First feature set, either a :class:`Bed` object or path to a bed file
right
Second feature set, either a :class:`Bed` object or path to a bed file
window_size
Window size (in bp)
left_sorted
Whether ``left`` is already sorted
right_sorted
Whether ``right`` is already sorted
attr_fn
Function to compute edge attributes for connected features,
should accept the following three positional arguments:
- l: left interval
- r: right interval
- d: signed distance between the intervals
By default no edge attribute is created.
Returns
-------
graph
Window graph
"""
check_deps("bedtools")
if isinstance(left, Bed):
pbar_total = len(left)
left = left.to_bedtool()
else:
pbar_total = None
left = pybedtools.BedTool(left)
if not left_sorted:
left = left.sort(stream=True)
left = iter(left) # Resumable iterator
if isinstance(right, Bed):
right = right.to_bedtool()
else:
right = pybedtools.BedTool(right)
if not right_sorted:
right = right.sort(stream=True)
right = iter(right) # Resumable iterator
attr_fn = attr_fn or (lambda l, r, d: {})
if pbar_total is not None:
left = smart_tqdm(left, total=pbar_total)
graph = nx.MultiDiGraph()
window = collections.OrderedDict() # Used as ordered set
for l in left:
for r in list(window.keys()): # Allow remove during iteration
d = interval_dist(l, r)
if -window_size <= d <= window_size:
graph.add_edge(l.name, r.name, **attr_fn(l, r, d))
elif d > window_size:
del window[r]
else: # dist < -window_size
break # No need to expand window
else:
for r in right: # Resume from last break
d = interval_dist(l, r)
if -window_size <= d <= window_size:
graph.add_edge(l.name, r.name, **attr_fn(l, r, d))
elif d > window_size:
continue
window[r] = None # Placeholder
if d < -window_size:
break
pybedtools.cleanup()
return graph
def dist_power_decay(x: int) -> float:
r"""
Distance-based power decay weight, computed as
:math:`w = {\left( \frac {d + 1000} {1000} \right)} ^ {-0.75}`
Parameters
----------
x
Distance (in bp)
Returns
-------
weight
Decaying weight
"""
return ((x + 1000) / 1000) ** (-0.75)
@logged
def rna_anchored_prior_graph(
rna: anndata.AnnData, *others: anndata.AnnData,
gene_region: str = "combined", promoter_len: int = 2000,
extend_range: int = 0, extend_fn: Callable[[int], float] = dist_power_decay,
signs: Optional[List[int]] = None, propagate_highly_variable: bool = True,
corrupt_rate: float = 0.0, random_state: RandomState = None
) -> nx.MultiDiGraph:
r"""
Build prior regulatory graph anchored on RNA genes
Parameters
----------
rna
Anchor RNA dataset
*others
Other datasets
gene_region
Defines the genomic region of genes, must be one of
``{"gene_body", "promoter", "combined"}``.
promoter_len
Defines the length of gene promoters (bp upstream of TSS)
extend_range
Maximal extend distance beyond gene regions
extend_fn
Distance-decreasing weight function for the extended regions
(by default :func:`dist_power_decay`)
signs
Sign of edges between RNA genes and features in each ``*others``
dataset, must have the same length as ``*others``. Signs must be
one of ``{-1, 1}``. By default, all edges have positive signs of ``1``.
propagate_highly_variable
Whether to propagate highly variable genes to other datasets,
datasets in ``*others`` would be modified in place.
corrupt_rate
**CAUTION: DO NOT USE**, only for evaluation purpose
random_state
**CAUTION: DO NOT USE**, only for evaluation purpose
Returns
-------
graph
Prior regulatory graph
Note
----
In this function, features in the same dataset can only connect to
anchor genes via the same edge sign. For more flexibility, please
construct the prior graph manually.
"""
signs = signs or [1] * len(others)
if len(others) != len(signs):
raise RuntimeError("Length of ``others`` and ``signs`` must match!")
if set(signs).difference({-1, 1}):
raise RuntimeError("``signs`` can only contain {-1, 1}!")
rna_bed = Bed(rna.var.assign(name=rna.var_names))
other_beds = [Bed(other.var.assign(name=other.var_names)) for other in others]
if gene_region == "promoter":
rna_bed = rna_bed.strand_specific_start_site().expand(promoter_len, 0)
elif gene_region == "combined":
rna_bed = rna_bed.expand(promoter_len, 0)
elif gene_region != "gene_body":
raise ValueError("Unrecognized `gene_range`!")
graphs = [window_graph(
rna_bed, other_bed, window_size=extend_range,
attr_fn=lambda l, r, d, s=sign: {
"dist": abs(d), "weight": extend_fn(abs(d)), "sign": s
}
) for other_bed, sign in zip(other_beds, signs)]
graph = compose_multigraph(*graphs)
corrupt_num = round(corrupt_rate * graph.number_of_edges())
if corrupt_num:
rna_anchored_prior_graph.logger.warning("Corrupting prior graph!")
rs = get_rs(random_state)
rna_var_names = rna.var_names.tolist()
other_var_names = reduce(add, [other.var_names.tolist() for other in others])
corrupt_remove = set(rs.choice(graph.number_of_edges(), corrupt_num, replace=False))
corrupt_remove = set(edge for i, edge in enumerate(graph.edges) if i in corrupt_remove)
corrupt_add = []
while len(corrupt_add) < corrupt_num:
corrupt_add += [
(u, v) for u, v in zip(
rs.choice(rna_var_names, corrupt_num - len(corrupt_add)),
rs.choice(other_var_names, corrupt_num - len(corrupt_add))
) if not graph.has_edge(u, v)
]
graph.add_edges_from([
(add[0], add[1], graph.edges[remove])
for add, remove in zip(corrupt_add, corrupt_remove)
])
graph.remove_edges_from(corrupt_remove)
if propagate_highly_variable:
hvg_reachable = reachable_vertices(graph, rna.var.query("highly_variable").index)
for other in others:
other.var["highly_variable"] = [
item in hvg_reachable for item in other.var_names
]
graph = compose_multigraph(graph, graph.reverse())
all_features = set(chain.from_iterable(
map(lambda x: x.var_names, [rna, *others])
))
for item in all_features:
graph.add_edge(item, item, weight=1.0, sign=1)
return graph
def get_chr_len_from_fai(fai: os.PathLike) -> Mapping[str, int]:
r"""
Get chromosome length information from fasta index file
Parameters
----------
fai
Fasta index file
Returns
-------
chr_len
Length of each chromosome
"""
return pd.read_table(fai, header=None, index_col=0)[1].to_dict()
def ens_trim_version(x: str) -> str:
r"""
Trim version suffix from Ensembl ID
Parameters
----------
x
Ensembl ID
Returns
-------
trimmed
Ensembl ID with version suffix trimmed
"""
return re.sub(r"\.[0-9_-]+$", "", x)
# Aliases
read_bed = Bed.read_bed
read_gtf = Gtf.read_gtf
|
the-stack_0_3597 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, Dict, Optional
from flask import Request
from marshmallow import ValidationError
from superset import cache
from superset.charts.commands.exceptions import (
ChartDataCacheLoadError,
ChartDataQueryFailedError,
)
from superset.charts.schemas import ChartDataQueryContextSchema
from superset.commands.base import BaseCommand
from superset.common.query_context import QueryContext
from superset.exceptions import CacheLoadError
from superset.extensions import async_query_manager
from superset.tasks.async_queries import load_chart_data_into_cache
logger = logging.getLogger(__name__)
class ChartDataCommand(BaseCommand):
def __init__(self) -> None:
self._form_data: Dict[str, Any]
self._query_context: QueryContext
self._async_channel_id: str
def run(self, **kwargs: Any) -> Dict[str, Any]:
# caching is handled in query_context.get_df_payload
# (also evals `force` property)
cache_query_context = kwargs.get("cache", False)
force_cached = kwargs.get("force_cached", False)
try:
payload = self._query_context.get_payload(
cache_query_context=cache_query_context, force_cached=force_cached
)
except CacheLoadError as exc:
raise ChartDataCacheLoadError(exc.message)
# TODO: QueryContext should support SIP-40 style errors
for query in payload["queries"]:
if query.get("error"):
raise ChartDataQueryFailedError(f"Error: {query['error']}")
return_value = {
"query_context": self._query_context,
"queries": payload["queries"],
}
if cache_query_context:
return_value.update(cache_key=payload["cache_key"])
return return_value
def run_async(self, user_id: Optional[str]) -> Dict[str, Any]:
job_metadata = async_query_manager.init_job(self._async_channel_id, user_id)
load_chart_data_into_cache.delay(job_metadata, self._form_data)
return job_metadata
def set_query_context(self, form_data: Dict[str, Any]) -> QueryContext:
self._form_data = form_data
try:
self._query_context = ChartDataQueryContextSchema().load(self._form_data)
except KeyError:
raise ValidationError("Request is incorrect")
except ValidationError as error:
raise error
return self._query_context
def validate(self) -> None:
self._query_context.raise_for_access()
def validate_async_request(self, request: Request) -> None:
jwt_data = async_query_manager.parse_jwt_from_request(request)
self._async_channel_id = jwt_data["channel"]
def load_query_context_from_cache( # pylint: disable=no-self-use
self, cache_key: str
) -> Dict[str, Any]:
cache_value = cache.get(cache_key)
if not cache_value:
raise ChartDataCacheLoadError("Cached data not found")
return cache_value["data"]
|
the-stack_0_3598 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default value constants exposed by core utilities."""
DEFAULT_REGISTRY = 'gcr.io'
REGIONAL_REGISTRIES = ['us.gcr.io', 'eu.gcr.io', 'asia.gcr.io']
BUCKET_REGISTRIES = ['b.gcr.io', 'bucket.gcr.io']
APPENGINE_REGISTRY = 'appengine.gcr.io'
SPECIALTY_REGISTRIES = BUCKET_REGISTRIES + [APPENGINE_REGISTRY]
ALL_SUPPORTED_REGISTRIES = ([DEFAULT_REGISTRY] + REGIONAL_REGISTRIES
+ SPECIALTY_REGISTRIES)
DEFAULT_DEVSHELL_IMAGE = (DEFAULT_REGISTRY +
'/dev_con/cloud-dev-common:prod')
METADATA_IMAGE = DEFAULT_REGISTRY + '/google_appengine/faux-metadata:latest'
|
the-stack_0_3599 | from os import path
from splashgen import MetaTags, SplashSite, launch
from splashgen.integrations import MailchimpSignup
site = SplashSite(title="ZenWeb – Python Internal Web Apps",
logo=path.join(path.dirname(__file__), "zenweb-logo.png"),
theme="dark")
site.headline = "Effortless internal tools for your backend services"
site.subtext = """
Write simple code that plugs directly into your infrastructure, and let ZenWeb
turn it into a web app that anyone on your team can use.
Stop getting pinged every time an on-call engineer needs a script run,
and start automating your domain expertise.
"""
site.meta = MetaTags(title=site.headline,
description="Automate your domain expertise. Sign up to join our pilot program!",
image="https://t3dmedia.s3.amazonaws.com/_notvideos/zwbg.png",
canonical_url="https://zenweb.dev")
site.call_to_action = MailchimpSignup(
"http://eepurl.com/hw4od9", button_text="Join our pilot")
launch(site)
|
the-stack_0_3600 | import math
from typing import Any
import torch
import torch.nn as nn
from torch.nn import functional as f
import numpy as np
BETA_START = 0.4
BETA_FRAMES = 100000
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
w = torch.full((out_features, in_features), sigma_init)
self.sigma_weight = nn.Parameter(w)
z = torch.zeros(out_features, in_features)
self.register_buffer("epsilon_weight", z)
if bias:
w = torch.full((out_features,), sigma_init)
self.sigma_bias = nn.Parameter(w)
z = torch.zeros(out_features)
self.register_buffer("epsilon_bias", z)
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(3 / self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, x):
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias.data
v = self.sigma_weight * self.epsilon_weight.data + self.weight
return f.linear(x, v, bias)
def _forward_unimplemented(self, *input_forward: Any) -> None:
pass
class NoisyDQN(nn.Module):
def __init__(self, input_shape, num_actions):
super(NoisyDQN, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.noisy_layers = [
NoisyLinear(conv_out_size, 512),
NoisyLinear(512, num_actions)
]
self.fc = nn.Sequential(
self.noisy_layers[0],
nn.ReLU(),
self.noisy_layers[1]
)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
fx = x.float() / 256
conv_out = self.conv(fx).view(fx.size()[0], -1)
return self.fc(conv_out)
def noisy_layers_sigma_snr(self):
return [
((layer.weight ** 2).mean().sqrt() / (layer.sigma_weight ** 2).mean().sqrt()).item()
for layer in self.noisy_layers
]
def _forward_unimplemented(self, *input_forward: Any) -> None:
pass
class PrioritizedReplayBuffer:
def __init__(self, exp_source, buf_size, prob_alpha=0.6):
self.exp_source_iter = iter(exp_source)
self.prob_alpha = prob_alpha
self.capacity = buf_size
self.pos = 0
self.buffer = []
self.priorities = np.zeros((buf_size,), dtype=np.float32)
self.beta = BETA_START
def update_beta(self, idx):
v = BETA_START + idx * (1.0 - BETA_START) / BETA_FRAMES
self.beta = min(1.0, v)
return self.beta
def __len__(self):
return len(self.buffer)
def populate(self, count):
max_priority = self.priorities.max(initial=1.0) if self.buffer else 1.0
for _ in range(count):
sample = next(self.exp_source_iter)
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.priorities[self.pos] = max_priority
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size):
if len(self.buffer) == self.capacity:
priorities = self.priorities
else:
priorities = self.priorities[:self.pos]
probabilities = priorities ** self.prob_alpha
probabilities /= probabilities.sum()
indices = np.random.choice(len(self.buffer), batch_size, p=probabilities)
samples = [self.buffer[idx] for idx in indices]
total = len(self.buffer)
weights = (total * probabilities[indices]) ** (-self.beta)
weights /= weights.max()
return samples, indices, np.array(weights, dtype=np.float32)
def update_priorities(self, batch_indices, batch_priorities):
for idx, priority in zip(batch_indices, batch_priorities):
self.priorities[idx] = priority
|
the-stack_0_3603 | """empty message
Revision ID: 914d00d1492a
Revises:
Create Date: 2020-07-01 23:19:51.549022
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '914d00d1492a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=32), nullable=False),
sa.Column('_password', sa.String(length=128), nullable=False),
sa.Column('is_delete', sa.Boolean(), nullable=False),
sa.Column('extension', sa.Integer(), nullable=True),
sa.Column('permission', sa.Integer(), nullable=False),
sa.Column('gender', sa.String(length=2), nullable=True),
sa.Column('is_super', sa.Boolean(), nullable=True),
sa.Column('address', sa.String(length=128), nullable=True),
sa.Column('e_mail', sa.String(length=128), nullable=True),
sa.Column('phone', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
# ### end Alembic commands ###
|
the-stack_0_3604 | import logging
import time
class Stage:
def __init__(self, total_tasks, seconds_per_tasks):
self.current_task_number = 0
self.total_tasks = total_tasks
self.seconds_per_task = seconds_per_tasks
def update_task_number(self, task_number):
self.current_task_number = task_number
def seconds_remaining(self):
return self.seconds_per_task * (self.total_tasks - self.current_task_number)
class Progress:
def __init__(self, localconfig):
self.time_start = time.time()
self.stages = {}
self.localconfig = localconfig
def add_stage(self, stage_name, num_tasks, seconds_per_task):
self.stages[stage_name] = Stage(num_tasks, seconds_per_task)
def report_message(self, message):
if self.localconfig.job:
self.localconfig.job.set_message(message)
logging.info({"message": message})
def report(self, num, message, stage_name="default"):
if stage_name in self.stages:
stage = self.stages[stage_name]
stage.update_task_number(num)
else:
logging.critical(f"Bad stage={stage_name} in {type(self).__name__}.report()")
return
seconds_left = sum(stage.seconds_remaining() for stage in self.stages.values())
# Write to db
if self.localconfig.job:
estimated_completion_timestamp = int(time.time() + seconds_left)
self.localconfig.job.set_in_progress(message, estimated_completion_timestamp)
else:
logging.info(
"message: %s, seconds_left: %s, time_elapsed: %s", message, seconds_left, time.time() - self.time_start
)
|
the-stack_0_3607 | import gc
import os
import time
# Import required modules
from pyaedt import Circuit
from pyaedt.generic.filesystem import Scratch
from pyaedt.generic.TouchstoneParser import read_touchstone
# Setup paths for module imports
from _unittest.conftest import local_path, scratch_path, config
try:
import pytest # noqa: F401
except ImportError:
import _unittest_ironpython.conf_unittest as pytest # noqa: F401
original_project_name = "Galileo_t21"
test_project_name = "Galileo_t21"
netlist1 = "netlist_small.cir"
netlist2 = "Schematic1.qcv"
touchstone = "SSN_ssn.s6p"
touchstone2 = "Galileo_V3P3S0.ts"
ami_project = "AMI_Example"
class TestClass:
def setup_class(self):
with Scratch(scratch_path) as self.local_scratch:
time.sleep(2)
example_project = os.path.join(local_path, "example_models", original_project_name + ".aedt")
netlist_file1 = os.path.join(local_path, "example_models", netlist1)
netlist_file2 = os.path.join(local_path, "example_models", netlist2)
touchstone_file = os.path.join(local_path, "example_models", touchstone)
touchstone_file2 = os.path.join(local_path, "example_models", touchstone2)
self.test_project = self.local_scratch.copyfile(
example_project, os.path.join(self.local_scratch.path, test_project_name + ".aedt")
)
self.local_scratch.copyfile(netlist_file1)
self.local_scratch.copyfile(netlist_file2)
self.local_scratch.copyfile(touchstone_file)
self.local_scratch.copyfile(touchstone_file2)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", original_project_name + ".aedb"),
os.path.join(self.local_scratch.path, test_project_name + ".aedb"),
)
ami_example_project = os.path.join(local_path, "example_models", ami_project + ".aedt")
self.ami_example_project = self.local_scratch.copyfile(ami_example_project)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", ami_project + ".aedb"),
os.path.join(self.local_scratch.path, ami_project + ".aedb"),
)
self.aedtapp = Circuit(self.test_project)
def teardown_class(self):
self.aedtapp._desktop.ClearMessages("", "", 3)
for proj in self.aedtapp.project_list:
try:
self.aedtapp.close_project(proj, saveproject=False)
except:
pass
self.local_scratch.remove()
gc.collect()
def test_01_create_inductor(self):
myind = self.aedtapp.modeler.schematic.create_inductor(value=1e-9, location=[0.2, 0.2])
assert type(myind.id) is int
assert myind.parameters["L"] == "1e-09"
def test_02_create_resistor(self):
myres = self.aedtapp.modeler.schematic.create_resistor(value=50, location=[0.4, 0.2])
assert type(myres.id) is int
assert myres.parameters["R"] == "50"
def test_03_create_capacitor(self):
mycap = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12, location=[0.6, 0.2])
assert type(mycap.id) is int
assert mycap.parameters["C"] == "1e-12"
def test_04_getpin_names(self):
mycap2 = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12)
pinnames = self.aedtapp.modeler.schematic.get_pins(mycap2)
pinnames2 = self.aedtapp.modeler.schematic.get_pins(mycap2.id)
pinnames3 = self.aedtapp.modeler.schematic.get_pins(mycap2.composed_name)
assert pinnames2 == pinnames3
assert type(pinnames) is list
assert len(pinnames) == 2
def test_05_getpin_location(self):
for el in self.aedtapp.modeler.schematic.components:
pinnames = self.aedtapp.modeler.schematic.get_pins(el)
for pinname in pinnames:
pinlocation = self.aedtapp.modeler.schematic.get_pin_location(el, pinname)
assert len(pinlocation) == 2
def test_06_add_3dlayout_component(self):
myedb = self.aedtapp.modeler.schematic.add_subcircuit_3dlayout("Galileo_G87173_204")
assert type(myedb.id) is int
def test_07_add_hfss_component(self):
my_model, myname = self.aedtapp.modeler.schematic.create_field_model(
"uUSB", "Setup1 : Sweep", ["usb_N_conn", "usb_N_pcb", "usb_P_conn", "usb_P_pcb"]
)
assert type(my_model) is int
def test_07a_push_excitation(self):
setup_name = "LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=False)
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=True)
def test_08_import_mentor_netlist(self):
self.aedtapp.insert_design("MentorSchematicImport")
assert self.aedtapp.create_schematic_from_mentor_netlist(os.path.join(self.local_scratch.path, netlist2))
pass
def test_09_import_netlist(self):
self.aedtapp.insert_design("SchematicImport")
assert self.aedtapp.create_schematic_from_netlist(os.path.join(self.local_scratch.path, netlist1))
def test_10_import_touchstone(self):
self.aedtapp.insert_design("Touchstone_import")
ports = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone))
ports2 = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone2))
numports = len(ports)
assert numports == 6
numports2 = len(ports2)
assert numports2 == 3
tx = ports[: int(numports / 2)]
rx = ports[int(numports / 2) :]
insertions = ["dB(S({},{}))".format(i, j) for i, j in zip(tx, rx)]
assert self.aedtapp.create_touchstone_report("Insertion Losses", insertions)
touchstone_data = self.aedtapp.get_touchstone_data(insertions)
assert touchstone_data
def test_11_export_fullwave(self):
output = self.aedtapp.export_fullwave_spice(
os.path.join(self.local_scratch.path, touchstone), is_solution_file=True
)
assert output
def test_12_connect_components(self):
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.schematic.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.schematic.create_capacitor("C100", 1e-12)
portname = self.aedtapp.modeler.schematic.create_interface_port("Port1")
assert "Port1" in portname.name
assert self.aedtapp.modeler.connect_schematic_components(myind.id, myind.id, pinnum_second=2)
assert self.aedtapp.modeler.connect_schematic_components(myres.id, mycap.id, pinnum_first=1)
# create_interface_port
L1_pins = myind.pins
L1_pin2location = {}
for pin in L1_pins:
L1_pin2location[pin.name] = pin.location
C1_pins = mycap.pins
C1_pin2location = {}
for pin in C1_pins:
C1_pin2location[pin.name] = pin.location
portname = self.aedtapp.modeler.schematic.create_interface_port(
"P1_1", [L1_pin2location["n1"][0], L1_pin2location["n1"][1]]
)
assert "P1_1" in portname.name
portname = self.aedtapp.modeler.schematic.create_interface_port(
"P2_2", [C1_pin2location["negative"][0], C1_pin2location["negative"][1]]
)
assert "P2_2" in portname.name
# create_page_port
portname = self.aedtapp.modeler.schematic.create_page_port(
"Link_1", [L1_pin2location["n2"][0], L1_pin2location["n2"][1]]
)
assert "Link_1" in portname.name
portname = self.aedtapp.modeler.schematic.create_page_port(
"Link_2", [C1_pin2location["positive"][0], C1_pin2location["positive"][1]], 180
)
assert "Link_2" in portname.name
def test_13_properties(self):
assert self.aedtapp.modeler.model_units
def test_14_move(self):
assert self.aedtapp.modeler.move("L100", [0.00508, 0.00508])
assert self.aedtapp.modeler.move("L100", [200, 200], "mil")
def test_15_rotate(self):
assert self.aedtapp.modeler.rotate("L100")
def test_16_read_touchstone(self):
data = read_touchstone(os.path.join(self.local_scratch.path, touchstone))
assert len(data.expressions) > 0
assert data.data_real()
assert data.data_imag()
assert data.data_db()
def test_17_create_setup(self):
setup_name = "Dom_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.SweepDefinition = [
("Variable", "Freq"),
("Data", "LIN 1GHz 5GHz 1001"),
("OffsetF1", False),
("Synchronize", 0),
]
assert LNA_setup.update()
@pytest.mark.skipif(os.name == "posix", reason="To be investigated on linux.")
def test_18_export_touchstone(self):
assert self.aedtapp.analyze_nominal()
time.sleep(30)
assert self.aedtapp.export_touchstone("Dom_LNA", "Dom_LNA", os.path.join(self.local_scratch.path, "new.s2p"))
def test_19A_create_sweeps(self):
setup_name = "Sweep_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.add_sweep_step("Freq", 1, 2, 0.01, "GHz", override_existing_sweep=True)
assert LNA_setup.props["SweepDefinition"]["Data"] == "LIN 1GHz 2GHz 0.01GHz"
LNA_setup.add_sweep_points("Freq", [11, 12, 13.4], "GHz", override_existing_sweep=False)
assert "13.4GHz" in LNA_setup.props["SweepDefinition"]["Data"]
assert "LIN 1GHz 2GHz 0.01GHz" in LNA_setup.props["SweepDefinition"]["Data"]
LNA_setup.add_sweep_count("Temp", 20, 100, 81, "cel", count_type="Decade", override_existing_sweep=True)
assert isinstance(LNA_setup.props["SweepDefinition"], list)
assert LNA_setup.props["SweepDefinition"][1]["Variable"] == "Temp"
assert LNA_setup.props["SweepDefinition"][1]["Data"] == "DEC 20cel 100cel 81"
def test_19B_create_EyE_setups(self):
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
setup_name = "Dom_AMI"
assert self.aedtapp.create_setup(setup_name, "NexximAMI")
def test_20_create_AMI_plots(self):
self.aedtapp.load_project(self.ami_example_project, close_active_proj=True)
report_name = "MyReport"
assert (
self.aedtapp.post.create_ami_initial_response_plot(
"AMIAnalysis",
"b_input_15",
self.aedtapp.available_variations.nominal,
plot_type="Rectangular Stacked Plot",
plot_final_response=True,
plot_intermediate_response=True,
plotname=report_name,
)
== report_name
)
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
assert (
self.aedtapp.post.create_ami_statistical_eye_plot(
"AMIAnalysis", "b_output4_14", self.aedtapp.available_variations.nominal, plotname="MyReport1"
)
== "MyReport1"
)
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Quick",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportQ",
)
== "MyReportQ"
)
@pytest.mark.skipif(config["desktopVersion"] > "2021.2", reason="Skipped on versions higher than 2021.2")
def test_20B_create_AMI_plots(self):
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Verify",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportV",
)
== "MyReportV"
)
def test_21_assign_voltage_sinusoidal_excitation_to_ports(self):
settings = ["123 V", "10deg", "", "", "0V", "15GHz", "0s", "0", "0deg", ""]
ports_list = ["P1_1", "P2_2"]
assert self.aedtapp.assign_voltage_sinusoidal_excitation_to_ports(ports_list, settings)
def test_22_assign_current_sinusoidal_excitation_to_ports(self):
settings = ["", "", "20A", "50A", "4A", "", "0s", "0", "0deg", "1", "20Hz"]
ports_list = ["P1_1"]
assert self.aedtapp.assign_current_sinusoidal_excitation_to_ports(ports_list, settings)
def test_23_assign_power_sinusoidal_excitation_to_ports(self):
settings = ["", "", "", "", "20W", "14GHz", "0s", "0", "0deg", "0Hz"]
ports_list = ["P2_2"]
assert self.aedtapp.assign_power_sinusoidal_excitation_to_ports(ports_list, settings)
def test_24_new_connect_components(self):
self.aedtapp.insert_design("Components")
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.components.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.components.create_capacitor("C100", 1e-12)
myind2 = self.aedtapp.modeler.components.create_inductor("L101", 1e-9)
port = self.aedtapp.modeler.components.create_interface_port("Port1")
assert self.aedtapp.modeler.schematic.connect_components_in_series([myind, myres.composed_name])
assert self.aedtapp.modeler.schematic.connect_components_in_parallel([mycap, port, myind2.id])
def test_25_import_model(self):
self.aedtapp.insert_design("Touch_import")
touch = os.path.join(local_path, "example_models", "SSN_ssn.s6p")
t1 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t1
assert len(t1.pins) == 6
t2 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t2
|
the-stack_0_3611 | import sys
import json
import time
from itertools import combinations
import requests
from ratelimit import limits, sleep_and_retry
def loop_possibilities(upper_bound, n_combinations=3):
return combinations(range(1, upper_bound + 1), n_combinations)
@sleep_and_retry
@limits(calls=5, period=30) # decorator
def _http_request(url, method='GET', params={}, json_data={}):
''' Returns a json from the url inputted
input(s): Any url
output: r_json
'''
prep = requests.Request(method, url).prepare()
if json_data:
prep.prepare_body(data=json_data, files=None, json=True)
return requests.Session().send(prep)
def main():
# pegar endpoint que retorna a lista de possibilidades do formulário anterior
# ao endpoint dos desafios
list_possibilities = loop_possibilities(27, 3)
list_data = []
for combination in list_possibilities:
url = 'https://www.brfhub.com/backend/public/api/get-challenges?language=pt&profile=startup&areas%5B%5D={}&areas%5B%5D={}&areas%5B%5D={}'.format(combination[0], combination[1], combination[2])
r = _http_request(url, 'GET')
if r.status_code != 200:
continue
dict_data = r.json()
list_data.append(dict_data['data']) # olhar depois
return list_data
if __name__ == '__main__':
print(main())
|
the-stack_0_3614 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
from pkg_resources import get_distribution
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Do not generate APIdocs for members missing docstrings (undoc-members)
os.environ['APIDOC_OPTIONS'] = 'members,show-inheritence,inherited-members'
# Set APIDOC options
#os.environ['SPHINX_APIDOC_OPTIONS'] = 'members,undoc-members,show-inheritance,special-members'
os.environ['SPHINX_APIDOC_OPTIONS'] = 'members'
# -- Project information -----------------------------------------------------
project = 'VerMoUTH'
copyright = '2018, University of Groningen'
author = 'Peter C Kroon, Jonathan Barnoud, Tsjerk A Wassenaar, Siewert-Jan Marrink'
# The full version, including alpha/beta/rc tags
release = get_distribution('vermouth').version
# The short X.Y version
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxcontrib.apidoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
nitpick_ignore = [
('py:class', 'networkx.algorithms.isomorphism.vf2userfunc.GraphMatcher'),
('py:class', 'networkx.algorithms.isomorphism.isomorphvf2.GraphMatcher'),
('py:class', 'networkx.classes.graph.Graph'),
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'VerMoUTHdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VerMoUTH.tex', 'VerMoUTH Documentation',
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vermouth', 'VerMoUTH Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VerMoUTH', 'VerMoUTH Documentation',
author, 'VerMoUTH', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
apidoc_module_dir = '../../vermouth'
apidoc_output_dir = 'api'
apidoc_separate_modules = True
apidoc_excluded_paths = ['tests', 'redistributed']
autoclass_content = 'both'
autodoc_default_options = {'members': None,
'undoc-members': None,
'show-inheritance': None}
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'networkx': ('https://networkx.github.io/documentation/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
}
|
the-stack_0_3615 | # Copyright 2018 Lenovo (Beijing) Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as object_base
from cyborg.objects import base
from cyborg.objects.control_path import ControlpathID
from cyborg.objects.device import Device
from cyborg.objects.driver_objects.driver_controlpath_id import \
DriverControlPathID
from cyborg.objects.driver_objects.driver_deployable import DriverDeployable
from cyborg.objects import fields as object_fields
@base.CyborgObjectRegistry.register
class DriverDevice(base.DriverObjectBase,
object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vendor': object_fields.StringField(nullable=False),
'model': object_fields.StringField(nullable=False),
'type': object_fields.DeviceTypeField(nullable=False),
'std_board_info': object_fields.StringField(nullable=True),
# vendor board info should be a dict for driver-specific resource
# provider.
'vendor_board_info': object_fields.StringField(nullable=True),
# hostname will be set by the agent, so driver don't need to report.
# Each controlpath_id corresponds to a different PF. For now
# we are sticking with a single cpid.
'controlpath_id': object_fields.ObjectField('DriverControlPathID',
nullable=False),
'deployable_list': object_fields.ListOfObjectsField('DriverDeployable',
default=[],
nullable=False),
'stub': object_fields.BooleanField(nullable=False, default=False)
}
def create(self, context, host):
"""Create a driver-side Device Object into DB. This object will be
stored in many db tables: device, deployable, attach_handle,
controlpath_id etc. by calling related Object.
"""
# first store in device table through Device Object.
device_obj = Device(context=context,
type=self.type,
vendor=self.vendor,
model=self.model,
hostname=host
)
if hasattr(self, 'std_board_info'):
device_obj.std_board_info = self.std_board_info
if hasattr(self, 'vendor_board_info'):
device_obj.vendor_board_info = self.vendor_board_info
device_obj.create(context)
# for the controlpath_id, call driver_controlpath_id to create.
cpid_obj = self.controlpath_id.create(context, device_obj.id)
# for deployable_list, call internal layer object: driver_deployable
# to create.
for driver_deployable in self.deployable_list:
driver_deployable.create(context, device_obj.id, cpid_obj.id)
def destroy(self, context, host):
"""Delete a driver-side Device Object from db. This should
delete the internal layer objects.
"""
# get dev_obj_list from hostname
device_obj = self.get_device_obj(context, host)
# delete deployable_list first.
for driver_deployable in self.deployable_list:
driver_deployable.destroy(context, device_obj.id)
if hasattr(self.controlpath_id, 'cpid_info'):
cpid_obj = ControlpathID.get_by_device_id_cpidinfo(
context, device_obj.id, self.controlpath_id.cpid_info)
# delete controlpath_id
cpid_obj.destroy(context)
# delete the device
device_obj.destroy(context)
def get_device_obj(self, context, host):
"""Get a driver-side Device Object from db.
:param context: requested context.
:param host: hostname of the node.
:return: a device object of current driver device object. It will
return on value because it has controlpath_id.
"""
# get dev_obj_list from hostname
device_obj_list = Device.get_list_by_hostname(context, host)
# use controlpath_id.cpid_info to identiy one Device.
for device_obj in device_obj_list:
# get cpid_obj, could be empty or only one value.
cpid_obj = ControlpathID.get_by_device_id_cpidinfo(
context, device_obj.id, self.controlpath_id.cpid_info)
# find the one cpid_obj with cpid_info
if cpid_obj is not None:
return device_obj
@classmethod
def list(cls, context, host):
"""Form driver-side device object list from DB for one host.
A list may contains driver_device_object without controlpath_id.(In
the case some of controlpath_id can't store successfully but its
devices stores successfully.)
"""
# get dev_obj_list from hostname
dev_obj_list = Device.get_list_by_hostname(context, host)
driver_dev_obj_list = []
for dev_obj in dev_obj_list:
cpid = DriverControlPathID.get(context, dev_obj.id)
# NOTE: will not return device without controlpath_id.
if cpid is not None:
driver_dev_obj = \
cls(context=context, vendor=dev_obj.vendor,
model=dev_obj.model, type=dev_obj.type,
std_board_info=dev_obj.std_board_info,
vendor_board_info=dev_obj.vendor_board_info,
controlpath_id=cpid,
deployable_list=DriverDeployable.list(context,
dev_obj.id)
)
driver_dev_obj_list.append(driver_dev_obj)
return driver_dev_obj_list
def get_device_obj_by_device_id(self, context, device_id):
"""Get device object by device id.
:param context: requested context.
:param host: hostname of the node.
:return: a device object of current driver device object. It will
return on value because it has controlpath_id.
"""
# get dev_obj_list from hostname
device_obj = Device.get_by_device_id(context, device_id)
# use controlpath_id.cpid_info to identiy one Device.
# get cpid_obj, could be empty or only one value.
ControlpathID.get_by_device_id_cpidinfo(
context, device_obj.id, self.controlpath_id.cpid_info)
# find the one cpid_obj with cpid_info
return device_obj
|
the-stack_0_3616 | from django.utils.html import escape
from core.templatetags import register
from django.utils.safestring import mark_safe
@register.simple_tag(takes_context=True)
def text_element(
context,
id,
label,
errors=None,
data_mode=None,
name=None,
textarea=None,
value=None,
hint=None,
password=False,
readonly=False,
autocomplete=None,
):
"""
Display one or more error messages
"""
output = []
type = "password" if password else "text"
readonly = "readonly" if readonly else ""
if autocomplete:
autocomplete = f'autocomplete="{autocomplete}" '
else:
autocomplete = ""
if value is None:
value = context.get(id, "")
else:
value = escape(value)
output.append('<div class="form-group type-text ')
if id and errors and id in errors:
output.append("form-group-error ")
output.append('"')
if data_mode:
output.append(' data-attach="Typeahead"')
output.append(">")
output.append(f'<label class="form-label" for="{ id }">{ label }')
if hint:
output.append(f'<span class="form-hint">{ hint }</span>')
output.append("</label>")
name = name or id
if name and errors and name in errors:
message = errors[name]
output.append(f'<span class="error-message" id="{ name }_error">{ message }</span>')
if data_mode: # for typeahead elements
output.append(
f'<input { autocomplete } class="form-control" id="{ id }" type="text" data-mode="{ data_mode }" name="{ name }" { readonly } value="{ value }">' # noqa: E501
)
elif textarea:
output.append(
f'<textarea class="form-control" id="{ id }" name="{ name }" { readonly }>{ value }</textarea>' # noqa: E501
)
else:
output.append(
f'<input { autocomplete }class="form-control" id="{ id }" type="{ type }" name="{ name }" value="{ value }" { readonly }>' # noqa: E501
)
output.append("</div>")
return mark_safe("".join(output))
|
the-stack_0_3617 | from typing import List
class Grid:
def __init__(self):
self.grid_mat = Grid.__initialise_grid_mat()
def visualise(self) -> None:
grid_mat_element_gen = self.__create_grid_mat_element_generator()
str_rows = list()
for ri in range(7):
if ri % 2 == 0:
str_rows.append('+---+---+---+')
continue
str_row = ''
for ci in range(13):
if ci % 4 == 0:
str_row += '|'
elif ci % 2 != 0:
str_row += ' '
else:
str_row += next(grid_mat_element_gen)
str_rows.append(str_row)
print('\n'.join(str_rows))
def visualise_v2(self):
str_rows = [
' {} | {} | {} '.format(*self.grid_mat[0][:]),
'----+---+-----',
' {} | {} | {} '.format(*self.grid_mat[1][:]),
'----+---+-----',
' {} | {} | {} '.format(*self.grid_mat[2][:])
]
print('\n'.join(str_rows))
def __create_grid_mat_element_generator(self):
for ri in range(3):
for ci in range(3):
yield self.grid_mat[ri][ci]
@staticmethod
def __initialise_grid_mat() -> List[List[str]]:
grid = [[' ' for _ in range(3)] for _ in range(3)]
return grid
print('Visualise grid:')
grid = Grid()
print('Step 1')
grid.grid_mat[1][1] = 'X'
grid.visualise()
print()
print('Step 2')
grid.grid_mat[0][2] = 'O'
grid.visualise()
print()
print('New visualisation method:')
grid = Grid()
print('Step 1')
grid.grid_mat[1][1] = 'X'
grid.visualise_v2()
print()
print('Step 2')
grid.grid_mat[0][2] = 'O'
grid.visualise_v2()
print()
|
the-stack_0_3618 | # Copyright 2011 OpenStack LLC.
# aLL Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from cinder.api.v2 import types
from cinder.api.views import types as views_types
from cinder import exception
from cinder.openstack.common import timeutils
from cinder import test
from cinder.tests.api import fakes
from cinder.volume import volume_types
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='vol_type_%s' % str(id),
extra_specs=specs,
)
def return_volume_types_get_all_types(context):
return dict(
vol_type_1=stub_volume_type(1),
vol_type_2=stub_volume_type(2),
vol_type_3=stub_volume_type(3)
)
def return_empty_volume_types_get_all_types(context):
return {}
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(int(id))
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
self.controller = types.VolumeTypesController()
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['volume_types']))
expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
actual_names = map(lambda e: e['name'], res_dict['volume_types'])
self.assertEqual(set(actual_names), set(expected_names))
for entry in res_dict['volume_types']:
self.assertEqual('value1', entry['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['volume_types']))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(1, len(res_dict))
self.assertEqual('1', res_dict['volume_type']['id'])
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_type = dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertTrue('volume_type' in output)
expected_volume_type = dict(
name='new_type',
extra_specs={},
id=42,
)
self.assertDictMatch(output['volume_type'], expected_volume_type)
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_types = []
for i in range(0, 10):
raw_volume_types.append(
dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_volume_types)
self.assertTrue('volume_types' in output)
for i in range(0, 10):
expected_volume_type = dict(
name='new_type',
extra_specs={},
id=42 + i
)
self.assertDictMatch(output['volume_types'][i],
expected_volume_type)
class VolumeTypesSerializerTest(test.TestCase):
def _verify_volume_type(self, vtype, tree):
self.assertEqual('volume_type', tree.tag)
self.assertEqual(vtype['name'], tree.get('name'))
self.assertEqual(str(vtype['id']), tree.get('id'))
self.assertEqual(1, len(tree))
extra_specs = tree[0]
self.assertEqual('extra_specs', extra_specs.tag)
seen = set(vtype['extra_specs'].keys())
for child in extra_specs:
self.assertTrue(child.tag in seen)
self.assertEqual(vtype['extra_specs'][child.tag], child.text)
seen.remove(child.tag)
self.assertEqual(len(seen), 0)
def test_index_serializer(self):
serializer = types.VolumeTypesTemplate()
# Just getting some input data
vtypes = return_volume_types_get_all_types(None)
text = serializer.serialize({'volume_types': vtypes.values()})
tree = etree.fromstring(text)
self.assertEqual('volume_types', tree.tag)
self.assertEqual(len(vtypes), len(tree))
for child in tree:
name = child.get('name')
self.assertTrue(name in vtypes)
self._verify_volume_type(vtypes[name], child)
def test_voltype_serializer(self):
serializer = types.VolumeTypeTemplate()
vtype = stub_volume_type(1)
text = serializer.serialize(dict(volume_type=vtype))
tree = etree.fromstring(text)
self._verify_volume_type(vtype, tree)
|
the-stack_0_3619 | def main():
n = int(input())
for i in range(n):
a = int(input())
b = set([int(x) for x in input().split()])
if (max(b) - min(b)) < len(b):
print("YES")
else:
print("NO")
if __name__ == '__main__':
main()
|
the-stack_0_3620 | import sqlite3
from datetime import datetime
from os import listdir
import os
import re
import json
import shutil
import pandas as pd
from application_logging.logger import App_Logger
class Prediction_Data_validation:
"""
This class shall be used for handling all the validation done on the Raw Prediction Data!!.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self,path):
self.Batch_Directory = path
self.schema_path = 'schema_prediction.json'
self.logger = App_Logger()
def valuesFromSchema(self):
"""
Method Name: valuesFromSchema
Description: This method extracts all the relevant information from the pre-defined "Schema" file.
Output: LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, Number of Columns
On Failure: Raise ValueError,KeyError,Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
with open(self.schema_path, 'r') as f:
dic = json.load(f)
f.close()
pattern = dic['SampleFileName']
LengthOfDateStampInFile = dic['LengthOfDateStampInFile']
LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']
column_names = dic['ColName']
NumberofColumns = dic['NumberofColumns']
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
message ="LengthOfDateStampInFile:: %s" %LengthOfDateStampInFile + "\t" + "LengthOfTimeStampInFile:: %s" % LengthOfTimeStampInFile +"\t " + "NumberofColumns:: %s" % NumberofColumns + "\n"
self.logger.log(file,message)
file.close()
except ValueError:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file,"ValueError:Value not found inside schema_training.json")
file.close()
raise ValueError
except KeyError:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, "KeyError:Key value error incorrect key passed")
file.close()
raise KeyError
except Exception as e:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, str(e))
file.close()
raise e
return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns
def manualRegexCreation(self):
"""
Method Name: manualRegexCreation
Description: This method contains a manually defined regex based on the "FileName" given in "Schema" file.
This Regex is used to validate the filename of the prediction data.
Output: Regex pattern
On Failure: None
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
regex = "['visibility']+['\_'']+[\d_]+[\d]+\.csv"
return regex
def createDirectoryForGoodBadRawData(self):
"""
Method Name: createDirectoryForGoodBadRawData
Description: This method creates directories to store the Good Data and Bad Data
after validating the prediction data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = os.path.join("Prediction_Raw_Files_Validated/", "Good_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
path = os.path.join("Prediction_Raw_Files_Validated/", "Bad_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
except OSError as ex:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while creating Directory %s:" % ex)
file.close()
raise OSError
def deleteExistingGoodDataTrainingFolder(self):
"""
Method Name: deleteExistingGoodDataTrainingFolder
Description: This method deletes the directory made to store the Good Data
after loading the data in the table. Once the good files are
loaded in the DB,deleting the directory ensures space optimization.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Prediction_Raw_Files_Validated/'
# if os.path.isdir("ids/" + userName):
# if os.path.isdir(path + 'Bad_Raw/'):
# shutil.rmtree(path + 'Bad_Raw/')
if os.path.isdir(path + 'Good_Raw/'):
shutil.rmtree(path + 'Good_Raw/')
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"GoodRaw directory deleted successfully!!!")
file.close()
except OSError as s:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def deleteExistingBadDataTrainingFolder(self):
"""
Method Name: deleteExistingBadDataTrainingFolder
Description: This method deletes the directory made to store the bad Data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Prediction_Raw_Files_Validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"BadRaw directory deleted before starting validation!!!")
file.close()
except OSError as s:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def moveBadFilesToArchiveBad(self):
"""
Method Name: moveBadFilesToArchiveBad
Description: This method deletes the directory made to store the Bad Data
after moving the data in an archive folder. We archive the bad
files to send them back to the client for invalid data issue.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
now = datetime.now()
date = now.date()
time = now.strftime("%H%M%S")
try:
path= "PredictionArchivedBadData"
if not os.path.isdir(path):
os.makedirs(path)
source = 'Prediction_Raw_Files_Validated/Bad_Raw/'
dest = 'PredictionArchivedBadData/BadData_' + str(date)+"_"+str(time)
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(source)
for f in files:
if f not in os.listdir(dest):
shutil.move(source + f, dest)
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Bad files moved to archive")
path = 'Prediction_Raw_Files_Validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
self.logger.log(file,"Bad Raw Data Folder Deleted successfully!!")
file.close()
except OSError as e:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file, "Error while moving bad files to archive:: %s" % e)
file.close()
raise OSError
def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):
"""
Method Name: validationFileNameRaw
Description: This function validates the name of the prediction csv file as per given name in the schema!
Regex pattern is used to do the validation.If name format do not match the file is moved
to Bad Raw Data folder else in Good raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.
self.deleteExistingBadDataTrainingFolder()
self.deleteExistingGoodDataTrainingFolder()
self.createDirectoryForGoodBadRawData()
onlyfiles = [f for f in listdir(self.Batch_Directory)]
try:
f = open("Prediction_Logs/nameValidationLog.txt", 'a+')
for filename in onlyfiles:
if (re.match(regex, filename)):
splitAtDot = re.split('.csv', filename)
splitAtDot = (re.split('_', splitAtDot[0]))
if len(splitAtDot[1]) == LengthOfDateStampInFile:
if len(splitAtDot[2]) == LengthOfTimeStampInFile:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Good_Raw")
self.logger.log(f,"Valid File name!! File moved to GoodRaw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f, "Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
f.close()
except Exception as e:
f = open("Prediction_Logs/nameValidationLog.txt", 'a+')
self.logger.log(f, "Error occured while validating FileName %s" % e)
f.close()
raise e
def validateColumnLength(self,NumberofColumns):
"""
Method Name: validateColumnLength
Description: This function validates the number of columns in the csv files.
It is should be same as given in the schema file.
If not same file is not suitable for processing and thus is moved to Bad Raw Data folder.
If the column number matches, file is kept in Good Raw Data for processing.
The csv file is missing the first column name, this function changes the missing name to "Wafer".
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f,"Column Length Validation Started!!")
for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):
csv = pd.read_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file)
if csv.shape[1] == NumberofColumns:
csv.rename(columns={"Unnamed: 0": "Wafer"}, inplace=True)
csv.to_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file, index=None, header=True)
else:
shutil.move("Prediction_Raw_Files_Validated/Good_Raw/" + file, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f, "Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
self.logger.log(f, "Column Length Validation Completed!!")
except OSError:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
def deletePredictionFile(self):
if os.path.exists('Prediction_Output_File/Predictions.csv'):
os.remove('Prediction_Output_File/Predictions.csv')
def validateMissingValuesInWholeColumn(self):
"""
Method Name: validateMissingValuesInWholeColumn
Description: This function validates if any column in the csv file has all values missing.
If all the values are missing, the file is not suitable for processing.
SUch files are moved to bad raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Missing Values Validation Started!!")
for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):
csv = pd.read_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file)
count = 0
for columns in csv:
if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):
count+=1
shutil.move("Prediction_Raw_Files_Validated/Good_Raw/" + file,
"Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
break
if count==0:
csv.rename(columns={"Unnamed: 0": "Wafer"}, inplace=True)
csv.to_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file, index=None, header=True)
except OSError:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
|
the-stack_0_3621 | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import unittest
from sawtooth_sdk.consensus.zmq_service import ZmqService
from sawtooth_sdk.messaging.future import Future
from sawtooth_sdk.messaging.future import FutureResult
from sawtooth_sdk.protobuf import consensus_pb2
from sawtooth_sdk.protobuf.validator_pb2 import Message
class TestService(unittest.TestCase):
def setUp(self):
self.mock_stream = unittest.mock.Mock()
self.service = ZmqService(
stream=self.mock_stream,
timeout=10)
def _make_future(self, message_type, content):
fut = Future('test')
fut.set_result(FutureResult(
message_type=message_type,
content=content))
return fut
def test_send_to(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_SEND_TO_RESPONSE,
content=consensus_pb2.ConsensusSendToResponse(
status=consensus_pb2.ConsensusSendToResponse.OK
).SerializeToString())
self.service.send_to(
receiver_id=b'receiver_id',
message_type='message_type',
payload=b'payload')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_SEND_TO_REQUEST,
content=consensus_pb2.ConsensusSendToRequest(
message_type='message_type',
content=b'payload',
receiver_id=b'receiver_id').SerializeToString())
def test_broadcast(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_BROADCAST_RESPONSE,
content=consensus_pb2.ConsensusBroadcastResponse(
status=consensus_pb2.ConsensusBroadcastResponse.OK
).SerializeToString())
self.service.broadcast(
message_type='message_type',
payload=b'payload')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_BROADCAST_REQUEST,
content=consensus_pb2.ConsensusBroadcastRequest(
message_type='message_type',
content=b'payload').SerializeToString())
def test_initialize_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_INITIALIZE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusInitializeBlockResponse(
status=consensus_pb2.ConsensusInitializeBlockResponse.OK
).SerializeToString())
self.service.initialize_block(previous_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_INITIALIZE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusInitializeBlockRequest(
previous_id=b'test').SerializeToString())
def test_summarize_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_SUMMARIZE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusSummarizeBlockResponse(
status=consensus_pb2.ConsensusSummarizeBlockResponse.OK,
summary=b'summary').SerializeToString())
result = self.service.summarize_block()
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_SUMMARIZE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusSummarizeBlockRequest()
.SerializeToString())
self.assertEqual(result, b'summary')
def test_finalize_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_FINALIZE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusFinalizeBlockResponse(
status=consensus_pb2.ConsensusFinalizeBlockResponse.OK,
block_id=b'block_id').SerializeToString())
result = self.service.finalize_block(data=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_FINALIZE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusFinalizeBlockRequest(
data=b'test').SerializeToString())
self.assertEqual(result, b'block_id')
def test_cancel_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_CANCEL_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusCancelBlockResponse(
status=consensus_pb2.ConsensusCancelBlockResponse.OK
).SerializeToString())
self.service.cancel_block()
request = consensus_pb2.ConsensusCancelBlockRequest()
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_CANCEL_BLOCK_REQUEST,
content=request.SerializeToString())
def test_check_blocks(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_CHECK_BLOCKS_RESPONSE,
content=consensus_pb2.ConsensusCheckBlocksResponse(
status=consensus_pb2.ConsensusCheckBlocksResponse.OK
).SerializeToString())
self.service.check_blocks(priority=[b'test1', b'test2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_CHECK_BLOCKS_REQUEST,
content=consensus_pb2.ConsensusCheckBlocksRequest(
block_ids=[b'test1', b'test2']).SerializeToString())
def test_commit_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_COMMIT_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusCommitBlockResponse(
status=consensus_pb2.ConsensusCommitBlockResponse.OK
).SerializeToString())
self.service.commit_block(block_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_COMMIT_BLOCK_REQUEST,
content=consensus_pb2.ConsensusCommitBlockRequest(
block_id=b'test').SerializeToString())
def test_ignore_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_IGNORE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusIgnoreBlockResponse(
status=consensus_pb2.ConsensusIgnoreBlockResponse.OK
).SerializeToString())
self.service.ignore_block(block_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_IGNORE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusIgnoreBlockRequest(
block_id=b'test').SerializeToString())
def test_fail_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_FAIL_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusFailBlockResponse(
status=consensus_pb2.ConsensusFailBlockResponse.OK
).SerializeToString())
self.service.fail_block(block_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_FAIL_BLOCK_REQUEST,
content=consensus_pb2.ConsensusFailBlockRequest(
block_id=b'test').SerializeToString())
def test_get_blocks(self):
block_1 = consensus_pb2.ConsensusBlock(
block_id=b'block1',
previous_id=b'block0',
signer_id=b'signer1',
block_num=1,
payload=b'test1')
block_2 = consensus_pb2.ConsensusBlock(
block_id=b'block2',
previous_id=b'block1',
signer_id=b'signer2',
block_num=2,
payload=b'test2')
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_BLOCKS_GET_RESPONSE,
content=consensus_pb2.ConsensusBlocksGetResponse(
status=consensus_pb2.ConsensusBlocksGetResponse.OK,
blocks=[block_1, block_2]).SerializeToString())
blocks = self.service.get_blocks(block_ids=[b'id1', b'id2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_BLOCKS_GET_REQUEST,
content=consensus_pb2.ConsensusBlocksGetRequest(
block_ids=[b'id1', b'id2']).SerializeToString())
self.assertEqual({
block_id: (
block.previous_id,
block.signer_id,
block.block_num,
block.payload)
for block_id, block in blocks.items()
}, {
b'block1': (b'block0', b'signer1', 1, b'test1'),
b'block2': (b'block1', b'signer2', 2, b'test2'),
})
def test_get_chain_head(self):
block = consensus_pb2.ConsensusBlock(
block_id=b'block',
previous_id=b'block0',
signer_id=b'signer',
block_num=1,
payload=b'test')
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_CHAIN_HEAD_GET_RESPONSE,
content=consensus_pb2.ConsensusChainHeadGetResponse(
status=consensus_pb2.ConsensusChainHeadGetResponse.OK,
block=block).SerializeToString())
chain_head = self.service.get_chain_head()
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_CHAIN_HEAD_GET_REQUEST,
content=consensus_pb2.ConsensusChainHeadGetRequest()
.SerializeToString())
self.assertEqual(chain_head.block_id, b'block')
self.assertEqual(chain_head.previous_id, b'block0')
self.assertEqual(chain_head.signer_id, b'signer')
self.assertEqual(chain_head.block_num, 1)
self.assertEqual(chain_head.payload, b'test')
def test_get_settings(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_SETTINGS_GET_RESPONSE,
content=consensus_pb2.ConsensusSettingsGetResponse(
status=consensus_pb2.ConsensusSettingsGetResponse.OK,
entries=[
consensus_pb2.ConsensusSettingsEntry(
key='key1',
value='value1'),
consensus_pb2.ConsensusSettingsEntry(
key='key2',
value='value2')]).SerializeToString())
entries = self.service.get_settings(
block_id=b'test',
settings=['test1', 'test2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_SETTINGS_GET_REQUEST,
content=consensus_pb2.ConsensusSettingsGetRequest(
block_id=b'test',
keys=['test1', 'test2']).SerializeToString())
self.assertEqual(
entries, {
'key1': 'value1',
'key2': 'value2',
})
def test_get_state(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_STATE_GET_RESPONSE,
content=consensus_pb2.ConsensusStateGetResponse(
status=consensus_pb2.ConsensusStateGetResponse.OK,
entries=[
consensus_pb2.ConsensusStateEntry(
address='address1',
data=b'data1'),
consensus_pb2.ConsensusStateEntry(
address='address2',
data=b'data2')]).SerializeToString())
entries = self.service.get_state(
block_id=b'test',
addresses=['test1', 'test2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_STATE_GET_REQUEST,
content=consensus_pb2.ConsensusStateGetRequest(
block_id=b'test',
addresses=['test1', 'test2']).SerializeToString())
self.assertEqual(
entries, {
'address1': b'data1',
'address2': b'data2',
})
|
the-stack_0_3623 | '''
Function:
音悦台MV下载: http://www.yinyuetai.com
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import re
import requests
from utils.utils import *
'''
Input:
--url: 视频地址
--savepath: 视频下载后保存的路径
Output:
--is_success: 下载是否成功的BOOL值
'''
class yinyuetai():
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
self.info_url = 'http://www.yinyuetai.com/insite/get-video-info?flex=true&videoId={}'
'''外部调用'''
def get(self, url, savepath='videos'):
video_infos = self.__getvideoinfos(url)
is_success = self.__download(video_infos, savepath)
return is_success
'''下载'''
def __download(self, video_infos, savepath):
checkFolder(savepath)
download_url = video_infos[0]
video_name = 'yinyuetai_' + video_infos[1] + '.mp4'
try:
is_success = downloadBASE(url=download_url, savename=video_name, savepath=savepath, headers=self.headers, stream=True, verify=False)
except:
is_success = False
return is_success
'''获得视频信息'''
def __getvideoinfos(self, url):
mvid = url.split('/')[-1]
res = requests.get(self.info_url.format(mvid), headers=self.headers)
pattern = re.compile(r'http://\w*?\.yinyuetai\.com/uploads/videos/common/.*?(?=&br)')
re_result = re.findall(pattern, res.text)
# 选择质量最佳的视频
download_url = re_result[-1]
video_infos = [download_url, mvid]
return video_infos
'''test'''
if __name__ == '__main__':
url = 'http://v.yinyuetai.com/video/3247548'
yinyuetai().get(url, savepath='videos') |
the-stack_0_3624 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
import warnings
from django.template import Origin, Template, TemplateDoesNotExist
from django.template.backends.django import copy_exception
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
from django.utils.inspect import func_supports_parameter
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.template_cache = {}
self.find_template_cache = {} # RemovedInDjango20Warning
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super(Loader, self).__init__(engine)
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, template_dirs=None, skip=None):
"""
Perform the caching that gives this loader its name. Often many of the
templates attempted will be missing, so memory use is of concern here.
To keep it in check, caching behavior is a little complicated when a
template is not found. See ticket #26306 for more details.
With template debugging disabled, cache the TemplateDoesNotExist class
for every missing template and raise a new instance of it after
fetching it from the cache.
With template debugging enabled, a unique TemplateDoesNotExist object
is cached for each missing template to preserve debug data. When
raising an exception, Python sets __traceback__, __context__, and
__cause__ attributes on it. Those attributes can contain references to
all sorts of objects up the call chain and caching them creates a
memory leak. Thus, unraised copies of the exceptions are cached and
copies of those copies are raised after they're fetched from the cache.
"""
key = self.cache_key(template_name, template_dirs, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, type) and issubclass(cached, TemplateDoesNotExist):
raise cached(template_name)
elif isinstance(cached, TemplateDoesNotExist):
raise copy_exception(cached)
return cached
try:
template = super(Loader, self).get_template(
template_name, template_dirs, skip,
)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = copy_exception(e) if self.engine.debug else TemplateDoesNotExist
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name, template_dirs=None):
for loader in self.loaders:
args = [template_name]
# RemovedInDjango20Warning: Add template_dirs for compatibility
# with old loaders
if func_supports_parameter(loader.get_template_sources, 'template_dirs'):
args.append(template_dirs)
for origin in loader.get_template_sources(*args):
yield origin
def cache_key(self, template_name, template_dirs, skip=None):
"""
Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
dirs_prefix = ''
skip_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
if template_dirs:
dirs_prefix = self.generate_hash(template_dirs)
return '-'.join(filter(bool, [template_name, skip_prefix, dirs_prefix]))
def generate_hash(self, values):
return hashlib.sha1(force_bytes('|'.join(values))).hexdigest()
@property
def supports_recursion(self):
"""
RemovedInDjango20Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return all(hasattr(loader, 'get_contents') for loader in self.loaders)
def find_template(self, name, dirs=None):
"""
RemovedInDjango20Warning: An internal method to lookup the template
name in all the configured loaders.
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except KeyError:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = Origin(
name=display_name,
template_name=name,
loader=loader,
)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango20Warning,
)
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
# A cached previous failure:
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist(template_name)
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = Template(template, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.find_template_cache.clear() # RemovedInDjango20Warning
self.get_template_cache.clear()
|
the-stack_0_3626 | from __future__ import unicode_literals
import re
import uuid
from datetime import datetime
from random import random, randint
import pytz
from boto3 import Session
from moto.core.exceptions import JsonRESTError
from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
from moto.ec2 import ec2_backends
from copy import copy
from .exceptions import ServiceNotFoundException, TaskDefinitionNotFoundException
class BaseObject(BaseModel):
def camelCase(self, key):
words = []
for i, word in enumerate(key.split("_")):
if i > 0:
words.append(word.title())
else:
words.append(word)
return "".join(words)
def gen_response_object(self):
response_object = copy(self.__dict__)
for key, value in self.__dict__.items():
if "_" in key:
response_object[self.camelCase(key)] = value
del response_object[key]
return response_object
@property
def response_object(self):
return self.gen_response_object()
class Cluster(BaseObject):
def __init__(self, cluster_name, region_name):
self.active_services_count = 0
self.arn = "arn:aws:ecs:{0}:012345678910:cluster/{1}".format(
region_name, cluster_name
)
self.name = cluster_name
self.pending_tasks_count = 0
self.registered_container_instances_count = 0
self.running_tasks_count = 0
self.status = "ACTIVE"
self.region_name = region_name
@property
def physical_resource_id(self):
return self.name
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["clusterArn"] = self.arn
response_object["clusterName"] = self.name
del response_object["arn"], response_object["name"]
return response_object
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# if properties is not provided, cloudformation will use the default values for all properties
if "Properties" in cloudformation_json:
properties = cloudformation_json["Properties"]
else:
properties = {}
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random
# name if necessary
cluster_name=properties.get(
"ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6))
)
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
if original_resource.name != properties["ClusterName"]:
ecs_backend = ecs_backends[region_name]
ecs_backend.delete_cluster(original_resource.arn)
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a
# random name if necessary
cluster_name=properties.get(
"ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6))
)
)
else:
# no-op when nothing changed between old and new resources
return original_resource
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
class TaskDefinition(BaseObject):
def __init__(
self,
family,
revision,
container_definitions,
region_name,
network_mode=None,
volumes=None,
tags=None,
placement_constraints=None,
):
self.family = family
self.revision = revision
self.arn = "arn:aws:ecs:{0}:012345678910:task-definition/{1}:{2}".format(
region_name, family, revision
)
self.container_definitions = container_definitions
self.tags = tags if tags is not None else []
if volumes is None:
self.volumes = []
else:
self.volumes = volumes
if network_mode is None:
self.network_mode = "bridge"
else:
self.network_mode = network_mode
self.placement_constraints = (
placement_constraints if placement_constraints is not None else []
)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["taskDefinitionArn"] = response_object["arn"]
del response_object["arn"]
del response_object["tags"]
return response_object
@property
def physical_resource_id(self):
return self.arn
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
family = properties.get(
"Family", "task-definition-{0}".format(int(random() * 10 ** 6))
)
container_definitions = properties["ContainerDefinitions"]
volumes = properties.get("Volumes")
ecs_backend = ecs_backends[region_name]
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
family = properties.get(
"Family", "task-definition-{0}".format(int(random() * 10 ** 6))
)
container_definitions = properties["ContainerDefinitions"]
volumes = properties.get("Volumes")
if (
original_resource.family != family
or original_resource.container_definitions != container_definitions
or original_resource.volumes != volumes
):
# currently TaskRoleArn isn't stored at TaskDefinition
# instances
ecs_backend = ecs_backends[region_name]
ecs_backend.deregister_task_definition(original_resource.arn)
return ecs_backend.register_task_definition(
family=family,
container_definitions=container_definitions,
volumes=volumes,
)
else:
# no-op when nothing changed between old and new resources
return original_resource
class Task(BaseObject):
def __init__(
self,
cluster,
task_definition,
container_instance_arn,
resource_requirements,
overrides={},
started_by="",
):
self.cluster_arn = cluster.arn
self.task_arn = "arn:aws:ecs:{0}:012345678910:task/{1}".format(
cluster.region_name, str(uuid.uuid4())
)
self.container_instance_arn = container_instance_arn
self.last_status = "RUNNING"
self.desired_status = "RUNNING"
self.task_definition_arn = task_definition.arn
self.overrides = overrides
self.containers = []
self.started_by = started_by
self.stopped_reason = ""
self.resource_requirements = resource_requirements
@property
def response_object(self):
response_object = self.gen_response_object()
return response_object
class Service(BaseObject):
def __init__(
self,
cluster,
service_name,
task_definition,
desired_count,
load_balancers=None,
scheduling_strategy=None,
tags=None,
):
self.cluster_arn = cluster.arn
self.arn = "arn:aws:ecs:{0}:012345678910:service/{1}".format(
cluster.region_name, service_name
)
self.name = service_name
self.status = "ACTIVE"
self.running_count = 0
self.task_definition = task_definition.arn
self.desired_count = desired_count
self.events = []
self.deployments = [
{
"createdAt": datetime.now(pytz.utc),
"desiredCount": self.desired_count,
"id": "ecs-svc/{}".format(randint(0, 32 ** 12)),
"pendingCount": self.desired_count,
"runningCount": 0,
"status": "PRIMARY",
"taskDefinition": task_definition.arn,
"updatedAt": datetime.now(pytz.utc),
}
]
self.load_balancers = load_balancers if load_balancers is not None else []
self.scheduling_strategy = (
scheduling_strategy if scheduling_strategy is not None else "REPLICA"
)
self.tags = tags if tags is not None else []
self.pending_count = 0
@property
def physical_resource_id(self):
return self.arn
@property
def response_object(self):
response_object = self.gen_response_object()
del response_object["name"], response_object["arn"], response_object["tags"]
response_object["serviceName"] = self.name
response_object["serviceArn"] = self.arn
response_object["schedulingStrategy"] = self.scheduling_strategy
for deployment in response_object["deployments"]:
if isinstance(deployment["createdAt"], datetime):
deployment["createdAt"] = unix_time(
deployment["createdAt"].replace(tzinfo=None)
)
if isinstance(deployment["updatedAt"], datetime):
deployment["updatedAt"] = unix_time(
deployment["updatedAt"].replace(tzinfo=None)
)
return response_object
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
if isinstance(properties["Cluster"], Cluster):
cluster = properties["Cluster"].name
else:
cluster = properties["Cluster"]
if isinstance(properties["TaskDefinition"], TaskDefinition):
task_definition = properties["TaskDefinition"].family
else:
task_definition = properties["TaskDefinition"]
service_name = "{0}Service{1}".format(cluster, int(random() * 10 ** 6))
desired_count = properties["DesiredCount"]
# TODO: LoadBalancers
# TODO: Role
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_service(
cluster, service_name, task_definition, desired_count
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
if isinstance(properties["Cluster"], Cluster):
cluster_name = properties["Cluster"].name
else:
cluster_name = properties["Cluster"]
if isinstance(properties["TaskDefinition"], TaskDefinition):
task_definition = properties["TaskDefinition"].family
else:
task_definition = properties["TaskDefinition"]
desired_count = properties["DesiredCount"]
ecs_backend = ecs_backends[region_name]
service_name = original_resource.name
if original_resource.cluster_arn != Cluster(cluster_name, region_name).arn:
# TODO: LoadBalancers
# TODO: Role
ecs_backend.delete_service(cluster_name, service_name)
new_service_name = "{0}Service{1}".format(
cluster_name, int(random() * 10 ** 6)
)
return ecs_backend.create_service(
cluster_name, new_service_name, task_definition, desired_count
)
else:
return ecs_backend.update_service(
cluster_name, service_name, task_definition, desired_count
)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Name":
return self.name
raise UnformattedGetAttTemplateException()
class ContainerInstance(BaseObject):
def __init__(self, ec2_instance_id, region_name):
self.ec2_instance_id = ec2_instance_id
self.agent_connected = True
self.status = "ACTIVE"
self.registered_resources = [
{
"doubleValue": 0.0,
"integerValue": 4096,
"longValue": 0,
"name": "CPU",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 7482,
"longValue": 0,
"name": "MEMORY",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS",
"stringSetValue": ["22", "2376", "2375", "51678", "51679"],
"type": "STRINGSET",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS_UDP",
"stringSetValue": [],
"type": "STRINGSET",
},
]
self.container_instance_arn = "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format(
region_name, str(uuid.uuid4())
)
self.pending_tasks_count = 0
self.remaining_resources = [
{
"doubleValue": 0.0,
"integerValue": 4096,
"longValue": 0,
"name": "CPU",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 7482,
"longValue": 0,
"name": "MEMORY",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS",
"stringSetValue": ["22", "2376", "2375", "51678", "51679"],
"type": "STRINGSET",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS_UDP",
"stringSetValue": [],
"type": "STRINGSET",
},
]
self.running_tasks_count = 0
self.version_info = {
"agentVersion": "1.0.0",
"agentHash": "4023248",
"dockerVersion": "DockerVersion: 1.5.0",
}
ec2_backend = ec2_backends[region_name]
ec2_instance = ec2_backend.get_instance(ec2_instance_id)
self.attributes = {
"ecs.ami-id": ec2_instance.image_id,
"ecs.availability-zone": ec2_instance.placement,
"ecs.instance-type": ec2_instance.instance_type,
"ecs.os-type": ec2_instance.platform
if ec2_instance.platform == "windows"
else "linux", # options are windows and linux, linux is default
}
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["attributes"] = [
self._format_attribute(name, value)
for name, value in response_object["attributes"].items()
]
return response_object
def _format_attribute(self, name, value):
formatted_attr = {"name": name}
if value is not None:
formatted_attr["value"] = value
return formatted_attr
class ClusterFailure(BaseObject):
def __init__(self, reason, cluster_name, region_name):
self.reason = reason
self.arn = "arn:aws:ecs:{0}:012345678910:cluster/{1}".format(
region_name, cluster_name
)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["reason"] = self.reason
response_object["arn"] = self.arn
return response_object
class ContainerInstanceFailure(BaseObject):
def __init__(self, reason, container_instance_id, region_name):
self.reason = reason
self.arn = "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format(
region_name, container_instance_id
)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["reason"] = self.reason
response_object["arn"] = self.arn
return response_object
class EC2ContainerServiceBackend(BaseBackend):
def __init__(self, region_name):
super(EC2ContainerServiceBackend, self).__init__()
self.clusters = {}
self.task_definitions = {}
self.tasks = {}
self.services = {}
self.container_instances = {}
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def describe_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split("/")[-1]
if ":" in task_definition_name:
family, revision = task_definition_name.split(":")
revision = int(revision)
else:
family = task_definition_name
revision = self._get_last_task_definition_revision_id(family)
if (
family in self.task_definitions
and revision in self.task_definitions[family]
):
return self.task_definitions[family][revision]
else:
raise Exception("{0} is not a task_definition".format(task_definition_name))
def create_cluster(self, cluster_name):
cluster = Cluster(cluster_name, self.region_name)
self.clusters[cluster_name] = cluster
return cluster
def list_clusters(self):
"""
maxSize and pagination not implemented
"""
return [cluster.arn for cluster in self.clusters.values()]
def describe_clusters(self, list_clusters_name=None):
list_clusters = []
failures = []
if list_clusters_name is None:
if "default" in self.clusters:
list_clusters.append(self.clusters["default"].response_object)
else:
for cluster in list_clusters_name:
cluster_name = cluster.split("/")[-1]
if cluster_name in self.clusters:
list_clusters.append(self.clusters[cluster_name].response_object)
else:
failures.append(
ClusterFailure("MISSING", cluster_name, self.region_name)
)
return list_clusters, failures
def delete_cluster(self, cluster_str):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
return self.clusters.pop(cluster_name)
else:
raise Exception("{0} is not a cluster".format(cluster_name))
def register_task_definition(
self,
family,
container_definitions,
volumes=None,
network_mode=None,
tags=None,
placement_constraints=None,
):
if family in self.task_definitions:
last_id = self._get_last_task_definition_revision_id(family)
revision = (last_id or 0) + 1
else:
self.task_definitions[family] = {}
revision = 1
task_definition = TaskDefinition(
family,
revision,
container_definitions,
self.region_name,
volumes=volumes,
network_mode=network_mode,
tags=tags,
placement_constraints=placement_constraints,
)
self.task_definitions[family][revision] = task_definition
return task_definition
def list_task_definitions(self, family_prefix):
task_arns = []
for task_definition_list in self.task_definitions.values():
task_arns.extend(
[
task_definition.arn
for task_definition in task_definition_list.values()
if family_prefix is None or task_definition.family == family_prefix
]
)
return task_arns
def deregister_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split("/")[-1]
family, revision = task_definition_name.split(":")
revision = int(revision)
if (
family in self.task_definitions
and revision in self.task_definitions[family]
):
return self.task_definitions[family].pop(revision)
else:
raise Exception("{0} is not a task_definition".format(task_definition_name))
def run_task(self, cluster_str, task_definition_str, count, overrides, started_by):
if cluster_str:
cluster_name = cluster_str.split("/")[-1]
else:
cluster_name = "default"
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
container_instances = list(
self.container_instances.get(cluster_name, {}).keys()
)
if not container_instances:
raise Exception("No instances found in cluster {}".format(cluster_name))
active_container_instances = [
x
for x in container_instances
if self.container_instances[cluster_name][x].status == "ACTIVE"
]
resource_requirements = self._calculate_task_resource_requirements(
task_definition
)
# TODO: return event about unable to place task if not able to place enough tasks to meet count
placed_count = 0
for container_instance in active_container_instances:
container_instance = self.container_instances[cluster_name][
container_instance
]
container_instance_arn = container_instance.container_instance_arn
try_to_place = True
while try_to_place:
can_be_placed, message = self._can_be_placed(
container_instance, resource_requirements
)
if can_be_placed:
task = Task(
cluster,
task_definition,
container_instance_arn,
resource_requirements,
overrides or {},
started_by or "",
)
self.update_container_instance_resources(
container_instance, resource_requirements
)
tasks.append(task)
self.tasks[cluster_name][task.task_arn] = task
placed_count += 1
if placed_count == count:
return tasks
else:
try_to_place = False
return tasks
@staticmethod
def _calculate_task_resource_requirements(task_definition):
resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []}
for container_definition in task_definition.container_definitions:
# cloudformation uses capitalized properties, while boto uses all lower case
# CPU is optional
resource_requirements["CPU"] += container_definition.get(
"cpu", container_definition.get("Cpu", 0)
)
# either memory or memory reservation must be provided
if (
"Memory" in container_definition
or "MemoryReservation" in container_definition
):
resource_requirements["MEMORY"] += container_definition.get(
"Memory", container_definition.get("MemoryReservation")
)
else:
resource_requirements["MEMORY"] += container_definition.get(
"memory", container_definition.get("memoryReservation")
)
port_mapping_key = (
"PortMappings"
if "PortMappings" in container_definition
else "portMappings"
)
for port_mapping in container_definition.get(port_mapping_key, []):
if "hostPort" in port_mapping:
resource_requirements["PORTS"].append(port_mapping.get("hostPort"))
elif "HostPort" in port_mapping:
resource_requirements["PORTS"].append(port_mapping.get("HostPort"))
return resource_requirements
@staticmethod
def _can_be_placed(container_instance, task_resource_requirements):
"""
:param container_instance: The container instance trying to be placed onto
:param task_resource_requirements: The calculated resource requirements of the task in the form of a dict
:return: A boolean stating whether the given container instance has enough resources to have the task placed on
it as well as a description, if it cannot be placed this will describe why.
"""
# TODO: Implement default and other placement strategies as well as constraints:
# docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement.html
remaining_cpu = 0
remaining_memory = 0
reserved_ports = []
for resource in container_instance.remaining_resources:
if resource.get("name") == "CPU":
remaining_cpu = resource.get("integerValue")
elif resource.get("name") == "MEMORY":
remaining_memory = resource.get("integerValue")
elif resource.get("name") == "PORTS":
reserved_ports = resource.get("stringSetValue")
if task_resource_requirements.get("CPU") > remaining_cpu:
return False, "Not enough CPU credits"
if task_resource_requirements.get("MEMORY") > remaining_memory:
return False, "Not enough memory"
ports_needed = task_resource_requirements.get("PORTS")
for port in ports_needed:
if str(port) in reserved_ports:
return False, "Port clash"
return True, "Can be placed"
def start_task(
self,
cluster_str,
task_definition_str,
container_instances,
overrides,
started_by,
):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
if not container_instances:
raise Exception("No container instance list provided")
container_instance_ids = [x.split("/")[-1] for x in container_instances]
resource_requirements = self._calculate_task_resource_requirements(
task_definition
)
for container_instance_id in container_instance_ids:
container_instance = self.container_instances[cluster_name][
container_instance_id
]
task = Task(
cluster,
task_definition,
container_instance.container_instance_arn,
resource_requirements,
overrides or {},
started_by or "",
)
tasks.append(task)
self.update_container_instance_resources(
container_instance, resource_requirements
)
self.tasks[cluster_name][task.task_arn] = task
return tasks
def describe_tasks(self, cluster_str, tasks):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
if not tasks:
raise Exception("tasks cannot be empty")
response = []
for cluster, cluster_tasks in self.tasks.items():
for task_arn, task in cluster_tasks.items():
task_id = task_arn.split("/")[-1]
if (
task_arn in tasks
or task.task_arn in tasks
or any(task_id in task for task in tasks)
):
response.append(task)
return response
def list_tasks(
self,
cluster_str,
container_instance,
family,
started_by,
service_name,
desiredStatus,
):
filtered_tasks = []
for cluster, tasks in self.tasks.items():
for arn, task in tasks.items():
filtered_tasks.append(task)
if cluster_str:
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
filtered_tasks = list(
filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks)
)
if container_instance:
filtered_tasks = list(
filter(
lambda t: container_instance in t.container_instance_arn,
filtered_tasks,
)
)
if started_by:
filtered_tasks = list(
filter(lambda t: started_by == t.started_by, filtered_tasks)
)
return [t.task_arn for t in filtered_tasks]
def stop_task(self, cluster_str, task_str, reason):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
if not task_str:
raise Exception("A task ID or ARN is required")
task_id = task_str.split("/")[-1]
tasks = self.tasks.get(cluster_name, None)
if not tasks:
raise Exception("Cluster {} has no registered tasks".format(cluster_name))
for task in tasks.keys():
if task.endswith(task_id):
container_instance_arn = tasks[task].container_instance_arn
container_instance = self.container_instances[cluster_name][
container_instance_arn.split("/")[-1]
]
self.update_container_instance_resources(
container_instance, tasks[task].resource_requirements, removing=True
)
tasks[task].last_status = "STOPPED"
tasks[task].desired_status = "STOPPED"
tasks[task].stopped_reason = reason
return tasks[task]
raise Exception(
"Could not find task {} on cluster {}".format(task_str, cluster_name)
)
def create_service(
self,
cluster_str,
service_name,
task_definition_str,
desired_count,
load_balancers=None,
scheduling_strategy=None,
tags=None,
):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
desired_count = desired_count if desired_count is not None else 0
service = Service(
cluster,
service_name,
task_definition,
desired_count,
load_balancers,
scheduling_strategy,
tags,
)
cluster_service_pair = "{0}:{1}".format(cluster_name, service_name)
self.services[cluster_service_pair] = service
return service
def list_services(self, cluster_str, scheduling_strategy=None):
cluster_name = cluster_str.split("/")[-1]
service_arns = []
for key, value in self.services.items():
if cluster_name + ":" in key:
service = self.services[key]
if (
scheduling_strategy is None
or service.scheduling_strategy == scheduling_strategy
):
service_arns.append(service.arn)
return sorted(service_arns)
def describe_services(self, cluster_str, service_names_or_arns):
cluster_name = cluster_str.split("/")[-1]
result = []
for existing_service_name, existing_service_obj in sorted(
self.services.items()
):
for requested_name_or_arn in service_names_or_arns:
cluster_service_pair = "{0}:{1}".format(
cluster_name, requested_name_or_arn
)
if (
cluster_service_pair == existing_service_name
or existing_service_obj.arn == requested_name_or_arn
):
result.append(existing_service_obj)
return result
def update_service(
self, cluster_str, service_name, task_definition_str, desired_count
):
cluster_name = cluster_str.split("/")[-1]
cluster_service_pair = "{0}:{1}".format(cluster_name, service_name)
if cluster_service_pair in self.services:
if task_definition_str is not None:
self.describe_task_definition(task_definition_str)
self.services[
cluster_service_pair
].task_definition = task_definition_str
if desired_count is not None:
self.services[cluster_service_pair].desired_count = desired_count
return self.services[cluster_service_pair]
else:
raise ServiceNotFoundException(service_name)
def delete_service(self, cluster_name, service_name):
cluster_service_pair = "{0}:{1}".format(cluster_name, service_name)
if cluster_service_pair in self.services:
service = self.services[cluster_service_pair]
if service.desired_count > 0:
raise Exception("Service must have desiredCount=0")
else:
return self.services.pop(cluster_service_pair)
else:
raise Exception(
"cluster {0} or service {1} does not exist".format(
cluster_name, service_name
)
)
def register_container_instance(self, cluster_str, ec2_instance_id):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
container_instance = ContainerInstance(ec2_instance_id, self.region_name)
if not self.container_instances.get(cluster_name):
self.container_instances[cluster_name] = {}
container_instance_id = container_instance.container_instance_arn.split("/")[-1]
self.container_instances[cluster_name][
container_instance_id
] = container_instance
self.clusters[cluster_name].registered_container_instances_count += 1
return container_instance
def list_container_instances(self, cluster_str):
cluster_name = cluster_str.split("/")[-1]
container_instances_values = self.container_instances.get(
cluster_name, {}
).values()
container_instances = [
ci.container_instance_arn for ci in container_instances_values
]
return sorted(container_instances)
def describe_container_instances(self, cluster_str, list_container_instance_ids):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
if not list_container_instance_ids:
raise JsonRESTError(
"InvalidParameterException", "Container instance cannot be empty"
)
failures = []
container_instance_objects = []
for container_instance_id in list_container_instance_ids:
container_instance_id = container_instance_id.split("/")[-1]
container_instance = self.container_instances[cluster_name].get(
container_instance_id, None
)
if container_instance is not None:
container_instance_objects.append(container_instance)
else:
failures.append(
ContainerInstanceFailure(
"MISSING", container_instance_id, self.region_name
)
)
return container_instance_objects, failures
def update_container_instances_state(
self, cluster_str, list_container_instance_ids, status
):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
status = status.upper()
if status not in ["ACTIVE", "DRAINING"]:
raise Exception(
"An error occurred (InvalidParameterException) when calling the UpdateContainerInstancesState operation: \
Container instances status should be one of [ACTIVE,DRAINING]"
)
failures = []
container_instance_objects = []
list_container_instance_ids = [
x.split("/")[-1] for x in list_container_instance_ids
]
for container_instance_id in list_container_instance_ids:
container_instance = self.container_instances[cluster_name].get(
container_instance_id, None
)
if container_instance is not None:
container_instance.status = status
container_instance_objects.append(container_instance)
else:
failures.append(
ContainerInstanceFailure(
"MISSING", container_instance_id, self.region_name
)
)
return container_instance_objects, failures
def update_container_instance_resources(
self, container_instance, task_resources, removing=False
):
resource_multiplier = 1
if removing:
resource_multiplier = -1
for resource in container_instance.remaining_resources:
if resource.get("name") == "CPU":
resource["integerValue"] -= (
task_resources.get("CPU") * resource_multiplier
)
elif resource.get("name") == "MEMORY":
resource["integerValue"] -= (
task_resources.get("MEMORY") * resource_multiplier
)
elif resource.get("name") == "PORTS":
for port in task_resources.get("PORTS"):
if removing:
resource["stringSetValue"].remove(str(port))
else:
resource["stringSetValue"].append(str(port))
container_instance.running_tasks_count += resource_multiplier * 1
def deregister_container_instance(self, cluster_str, container_instance_str, force):
failures = []
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
container_instance_id = container_instance_str.split("/")[-1]
container_instance = self.container_instances[cluster_name].get(
container_instance_id
)
if container_instance is None:
raise Exception("{0} is not a container id in the cluster")
if not force and container_instance.running_tasks_count > 0:
raise Exception("Found running tasks on the instance.")
# Currently assume that people might want to do something based around deregistered instances
# with tasks left running on them - but nothing if no tasks were running already
elif force and container_instance.running_tasks_count > 0:
if not self.container_instances.get("orphaned"):
self.container_instances["orphaned"] = {}
self.container_instances["orphaned"][
container_instance_id
] = container_instance
del self.container_instances[cluster_name][container_instance_id]
self._respond_to_cluster_state_update(cluster_str)
return container_instance, failures
def _respond_to_cluster_state_update(self, cluster_str):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
pass
def put_attributes(self, cluster_name, attributes=None):
if cluster_name is None or cluster_name not in self.clusters:
raise JsonRESTError(
"ClusterNotFoundException", "Cluster not found", status=400
)
if attributes is None:
raise JsonRESTError(
"InvalidParameterException", "attributes value is required"
)
for attr in attributes:
self._put_attribute(
cluster_name,
attr["name"],
attr.get("value"),
attr.get("targetId"),
attr.get("targetType"),
)
def _put_attribute(
self, cluster_name, name, value=None, target_id=None, target_type=None
):
if target_id is None and target_type is None:
for instance in self.container_instances[cluster_name].values():
instance.attributes[name] = value
elif target_type is None:
# targetId is full container instance arn
try:
arn = target_id.rsplit("/", 1)[-1]
self.container_instances[cluster_name][arn].attributes[name] = value
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
else:
# targetId is container uuid, targetType must be container-instance
try:
if target_type != "container-instance":
raise JsonRESTError(
"TargetNotFoundException",
"Could not find {0}".format(target_id),
)
self.container_instances[cluster_name][target_id].attributes[
name
] = value
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
def list_attributes(
self,
target_type,
cluster_name=None,
attr_name=None,
attr_value=None,
max_results=None,
next_token=None,
):
if target_type != "container-instance":
raise JsonRESTError(
"InvalidParameterException", "targetType must be container-instance"
)
filters = [lambda x: True]
# item will be {0 cluster_name, 1 arn, 2 name, 3 value}
if cluster_name is not None:
filters.append(lambda item: item[0] == cluster_name)
if attr_name:
filters.append(lambda item: item[2] == attr_name)
if attr_name:
filters.append(lambda item: item[3] == attr_value)
all_attrs = []
for cluster_name, cobj in self.container_instances.items():
for container_instance in cobj.values():
for key, value in container_instance.attributes.items():
all_attrs.append(
(
cluster_name,
container_instance.container_instance_arn,
key,
value,
)
)
return filter(lambda x: all(f(x) for f in filters), all_attrs)
def delete_attributes(self, cluster_name, attributes=None):
if cluster_name is None or cluster_name not in self.clusters:
raise JsonRESTError(
"ClusterNotFoundException", "Cluster not found", status=400
)
if attributes is None:
raise JsonRESTError(
"InvalidParameterException", "attributes value is required"
)
for attr in attributes:
self._delete_attribute(
cluster_name,
attr["name"],
attr.get("value"),
attr.get("targetId"),
attr.get("targetType"),
)
def _delete_attribute(
self, cluster_name, name, value=None, target_id=None, target_type=None
):
if target_id is None and target_type is None:
for instance in self.container_instances[cluster_name].values():
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
elif target_type is None:
# targetId is full container instance arn
try:
arn = target_id.rsplit("/", 1)[-1]
instance = self.container_instances[cluster_name][arn]
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
else:
# targetId is container uuid, targetType must be container-instance
try:
if target_type != "container-instance":
raise JsonRESTError(
"TargetNotFoundException",
"Could not find {0}".format(target_id),
)
instance = self.container_instances[cluster_name][target_id]
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
def list_task_definition_families(
self, family_prefix=None, status=None, max_results=None, next_token=None
):
for task_fam in self.task_definitions:
if family_prefix is not None and not task_fam.startswith(family_prefix):
continue
yield task_fam
@staticmethod
def _parse_resource_arn(resource_arn):
match = re.match(
"^arn:aws:ecs:(?P<region>[^:]+):(?P<account_id>[^:]+):(?P<service>[^:]+)/(?P<id>.*)$",
resource_arn,
)
if not match:
raise JsonRESTError(
"InvalidParameterException", "The ARN provided is invalid."
)
return match.groupdict()
def list_tags_for_resource(self, resource_arn):
"""Currently implemented only for task definitions and services"""
parsed_arn = self._parse_resource_arn(resource_arn)
if parsed_arn["service"] == "task-definition":
for task_definition in self.task_definitions.values():
for revision in task_definition.values():
if revision.arn == resource_arn:
return revision.tags
else:
raise TaskDefinitionNotFoundException()
elif parsed_arn["service"] == "service":
for service in self.services.values():
if service.arn == resource_arn:
return service.tags
else:
raise ServiceNotFoundException(service_name=parsed_arn["id"])
raise NotImplementedError()
def _get_last_task_definition_revision_id(self, family):
definitions = self.task_definitions.get(family, {})
if definitions:
return max(definitions.keys())
def tag_resource(self, resource_arn, tags):
"""Currently implemented only for services"""
parsed_arn = self._parse_resource_arn(resource_arn)
if parsed_arn["service"] == "service":
for service in self.services.values():
if service.arn == resource_arn:
service.tags = self._merge_tags(service.tags, tags)
return {}
else:
raise ServiceNotFoundException(service_name=parsed_arn["id"])
raise NotImplementedError()
def _merge_tags(self, existing_tags, new_tags):
merged_tags = new_tags
new_keys = self._get_keys(new_tags)
for existing_tag in existing_tags:
if existing_tag["key"] not in new_keys:
merged_tags.append(existing_tag)
return merged_tags
@staticmethod
def _get_keys(tags):
return [tag["key"] for tag in tags]
def untag_resource(self, resource_arn, tag_keys):
"""Currently implemented only for services"""
parsed_arn = self._parse_resource_arn(resource_arn)
if parsed_arn["service"] == "service":
for service in self.services.values():
if service.arn == resource_arn:
service.tags = [
tag for tag in service.tags if tag["key"] not in tag_keys
]
return {}
else:
raise ServiceNotFoundException(service_name=parsed_arn["id"])
raise NotImplementedError()
ecs_backends = {}
for region in Session().get_available_regions("ecs"):
ecs_backends[region] = EC2ContainerServiceBackend(region)
for region in Session().get_available_regions("ecs", partition_name="aws-us-gov"):
ecs_backends[region] = EC2ContainerServiceBackend(region)
for region in Session().get_available_regions("ecs", partition_name="aws-cn"):
ecs_backends[region] = EC2ContainerServiceBackend(region)
|
the-stack_0_3627 | """
AES IGE implementation in Python.
If available, tgcrypto will be used instead, otherwise
if available, cryptg will be used instead, otherwise
if available, libssl will be used instead, otherwise
the Python implementation will be used.
"""
import os
import pyaes
import logging
from . import libssl
__log__ = logging.getLogger(__name__)
try:
import tgcrypto
__log__.debug('tgcrypto detected, it will be used for encryption')
except ImportError:
tgcrypto = None
try:
import cryptg
__log__.debug('cryptg detected, it will be used for encryption')
except ImportError:
cryptg = None
if libssl.encrypt_ige and libssl.decrypt_ige:
__log__.debug('libssl detected, it will be used for encryption')
else:
__log__.debug('tgcrypto or cryptg modules not installed and libssl not found, '
'falling back to (slower) Python encryption')
class AES:
"""
Class that servers as an interface to encrypt and decrypt
text through the AES IGE mode.
"""
@staticmethod
def decrypt_ige(cipher_text, key, iv):
"""
Decrypts the given text in 16-bytes blocks by using the
given key and 32-bytes initialization vector.
"""
if tgcrypto:
return tgcrypto.ige256_decrypt(cipher_text, key, iv)
if cryptg:
return cryptg.decrypt_ige(cipher_text, key, iv)
if libssl.decrypt_ige:
return libssl.decrypt_ige(cipher_text, key, iv)
iv1 = iv[:len(iv) // 2]
iv2 = iv[len(iv) // 2:]
aes = pyaes.AES(key)
plain_text = []
blocks_count = len(cipher_text) // 16
cipher_text_block = [0] * 16
for block_index in range(blocks_count):
for i in range(16):
cipher_text_block[i] = \
cipher_text[block_index * 16 + i] ^ iv2[i]
plain_text_block = aes.decrypt(cipher_text_block)
for i in range(16):
plain_text_block[i] ^= iv1[i]
iv1 = cipher_text[block_index * 16:block_index * 16 + 16]
iv2 = plain_text_block
plain_text.extend(plain_text_block)
return bytes(plain_text)
@staticmethod
def encrypt_ige(plain_text, key, iv):
"""
Encrypts the given text in 16-bytes blocks by using the
given key and 32-bytes initialization vector.
"""
padding = len(plain_text) % 16
if padding:
plain_text += os.urandom(16 - padding)
if tgcrypto:
return tgcrypto.ige256_encrypt(plain_text, key, iv)
if cryptg:
return cryptg.encrypt_ige(plain_text, key, iv)
if libssl.encrypt_ige:
return libssl.encrypt_ige(plain_text, key, iv)
iv1 = iv[:len(iv) // 2]
iv2 = iv[len(iv) // 2:]
aes = pyaes.AES(key)
cipher_text = []
blocks_count = len(plain_text) // 16
for block_index in range(blocks_count):
plain_text_block = list(
plain_text[block_index * 16:block_index * 16 + 16]
)
for i in range(16):
plain_text_block[i] ^= iv1[i]
cipher_text_block = aes.encrypt(plain_text_block)
for i in range(16):
cipher_text_block[i] ^= iv2[i]
iv1 = cipher_text_block
iv2 = plain_text[block_index * 16:block_index * 16 + 16]
cipher_text.extend(cipher_text_block)
return bytes(cipher_text)
|
the-stack_0_3628 | """edsr_slim.py"""
# import mindspore
from src import common
# from src.edsr_model import Upsampler, default_conv
import mindspore.nn as nn
import mindspore.ops as ops
# import mindspore.ops.operations as P
from mindspore import Tensor
class EDSR(nn.Cell):
"""[EDSR]
Args:
nn ([type]): [description]
"""
def __init__(self, args):
super(EDSR, self).__init__()
self.n_colors = args.n_colors
n_resblocks = args.n_resblocks
self.n_feats = args.n_feats
self.kernel_size = 3
scale = args.scale[0]
act = nn.ReLU()
self.rgb_range = args.rgb_range
# self.head = nn.Conv2d(in_channels=args.n_colors, out_channels=self.n_feats, kernel_size=self.kernel_size, pad_mode='pad', padding=self.kernel_size // 2, has_bias=True)
self.head = common.conv(args.n_colors, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
m_body = [
common.ResidualBlock(
self.n_feats, self.kernel_size, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
self.body = nn.CellList(m_body)
# self.body = m_body ###如果用这行,body这部分参数不会被训练
self.body_conv = common.conv(self.n_feats, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
self.upsampler = common.Upsampler(scale, self.n_feats)
self.tail_conv = common.conv(self.n_feats, args.n_colors, self.kernel_size, padding=self.kernel_size//2)
def construct(self, x, width_mult=Tensor(1.0)):
"""construct"""
# def construct(self, x, width_mult):
width_mult = width_mult.asnumpy().item()
feature_width = int(self.n_feats * width_mult)
conv2d = ops.Conv2D(out_channel=feature_width, kernel_size=self.kernel_size, mode=1, pad_mode='pad',
pad=self.kernel_size // 2)
biasadd = ops.BiasAdd()
x = common.mean_shift(x, self.rgb_range)
#原来写的是weight.clone()[]
weight = self.head.weight[:feature_width, :self.n_colors, :, :]
bias = self.head.bias[:feature_width]
x = conv2d(x, weight)
x = biasadd(x, bias)
residual = x
for block in self.body:
residual = block(residual, width_mult)
weight = self.body_conv.weight[:feature_width, :feature_width, :, :]
bias = self.body_conv.bias[:feature_width]
residual = conv2d(residual, weight)
residual = biasadd(residual, bias)
residual += x
x = self.upsampler(residual, width_mult)
weight = self.tail_conv.weight[:self.n_colors, :feature_width, :, :]
bias = self.tail_conv.bias[:self.n_colors]
conv2d = ops.Conv2D(out_channel=self.n_colors, kernel_size=self.kernel_size,
mode=1, pad_mode='pad', pad=self.kernel_size//2)
x = conv2d(x, weight)
x = biasadd(x, bias)
x = common.mean_shift(x, self.rgb_range, sign=1)
return x
|
the-stack_0_3629 | #!/usr/bin/env python3
# coding: UTF-8
from configparser import ConfigParser
from contextlib import contextmanager
import os
import datetime
from os.path import abspath, basename, exists, dirname, join, isdir, expanduser
import platform
import sys
import subprocess
import time
import logging
import logging.config
import click
import termcolor
import colorlog
import pymysql
import telnetlib
logger = logging.getLogger('.utils')
DEBUG_ENABLED = os.environ.get('SEAFILE_DOCKER_VERBOSE', '').lower() in ('true', '1', 'yes')
def eprint(*a, **kw):
kw['file'] = sys.stderr
print(*a, **kw)
def identity(msg, *a, **kw):
return msg
colored = identity if not os.isatty(sys.stdin.fileno()) else termcolor.colored
red = lambda s: colored(s, 'red')
green = lambda s: colored(s, 'green')
def underlined(msg):
return '\x1b[4m{}\x1b[0m'.format(msg)
def sudo(*a, **kw):
call('sudo ' + a[0], *a[1:], **kw)
def _find_flag(args, *opts, **kw):
is_flag = kw.get('is_flag', False)
if is_flag:
return any([opt in args for opt in opts])
else:
for opt in opts:
try:
return args[args.index(opt) + 1]
except ValueError:
pass
def call(*a, **kw):
dry_run = kw.pop('dry_run', False)
quiet = kw.pop('quiet', DEBUG_ENABLED)
cwd = kw.get('cwd', os.getcwd())
check_call = kw.pop('check_call', True)
reduct_args = kw.pop('reduct_args', [])
if not quiet:
toprint = a[0]
args = [x.strip('"') for x in a[0].split() if '=' not in x]
for arg in reduct_args:
value = _find_flag(args, arg)
toprint = toprint.replace(value, '{}**reducted**'.format(value[:3]))
logdbg('calling: ' + green(toprint))
logdbg('cwd: ' + green(cwd))
kw.setdefault('shell', True)
if not dry_run:
if check_call:
return subprocess.check_call(*a, **kw)
else:
return subprocess.Popen(*a, **kw).wait()
@contextmanager
def cd(path):
path = expanduser(path)
olddir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(olddir)
def must_makedir(p):
p = expanduser(p)
if not exists(p):
logger.info('created folder %s', p)
os.makedirs(p)
else:
logger.debug('folder %s already exists', p)
def setup_colorlog():
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'colored': {
'()': 'colorlog.ColoredFormatter',
'format': "%(log_color)s[%(asctime)s]%(reset)s %(blue)s%(message)s",
'datefmt': '%m/%d/%Y %H:%M:%S',
},
},
'handlers': {
'default': {
'level': 'INFO',
'formatter': 'colored',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'django.request': {
'handlers': ['default'],
'level': 'WARN',
'propagate': False
},
}
})
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
def setup_logging(level=logging.INFO):
kw = {
'format': '[%(asctime)s][%(module)s]: %(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
'level': level,
'stream': sys.stdout
}
logging.basicConfig(**kw)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
def get_process_cmd(pid, env=False):
env = 'e' if env else ''
try:
return subprocess.check_output('ps {} -o command {}'.format(env, pid),
shell=True).decode('utf8').strip().splitlines()[1]
# except Exception, e:
# print(e)
except:
return None
def get_match_pids(pattern):
pgrep_output = subprocess.check_output(
'pgrep -f "{}" || true'.format(pattern),
shell=True).decode('utf8').strip()
return [int(pid) for pid in pgrep_output.splitlines()]
def ask_for_confirm(msg):
confirm = click.prompt(msg, default='Y')
return confirm.lower() in ('y', 'yes')
def confirm_command_to_run(cmd):
if ask_for_confirm('Run the command: {} ?'.format(green(cmd))):
call(cmd)
else:
sys.exit(1)
def git_current_commit():
return get_command_output('git rev-parse --short HEAD').strip()
def get_command_output(cmd):
shell = not isinstance(cmd, list)
return subprocess.check_output(cmd, shell=shell).decode('utf8')
def ask_yes_or_no(msg, prompt='', default=None):
print('\n' + msg + '\n')
while True:
answer = input(prompt + ' [yes/no] ').lower()
if not answer:
continue
if answer not in ('yes', 'no', 'y', 'n'):
continue
if answer in ('yes', 'y'):
return True
else:
return False
def git_branch_exists(branch):
return call('git rev-parse --short --verify {}'.format(branch)) == 0
def to_unicode(s):
if isinstance(s, str):
return s.decode('utf-8')
else:
return s
def to_utf8(s):
if isinstance(s, unicode):
return s.encode('utf-8')
else:
return s
def git_commit_time(refspec):
return int(get_command_output('git log -1 --format="%ct" {}'.format(
refspec)).strip())
def get_seafile_version():
return os.environ['SEAFILE_VERSION']
def get_install_dir():
return join('/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-{}'.format(get_seafile_version()))
def get_script(script):
return join(get_install_dir(), script)
_config = None
def get_conf(key, default=None):
key = key.upper()
return os.environ.get(key, default)
def _add_default_context(context):
default_context = {
'current_timestr': datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S'),
}
for k in default_context:
context.setdefault(k, default_context[k])
def render_template(template, target, context):
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(dirname(template)))
_add_default_context(context)
content = env.get_template(basename(template)).render(**context)
with open(target, 'w') as fp:
fp.write(content)
def logdbg(msg):
if DEBUG_ENABLED:
msg = '[debug] ' + msg
loginfo(msg)
def loginfo(msg):
msg = '[{}] {}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), green(msg))
eprint(msg)
def cert_has_valid_days(cert, days):
assert exists(cert)
secs = 86400 * int(days)
retcode = call('openssl x509 -checkend {} -noout -in {}'.format(secs, cert), check_call=False)
return retcode == 0
def get_version_stamp_file():
return '/shared/seafile/seafile-data/current_version'
def read_version_stamp(fn=get_version_stamp_file()):
assert exists(fn), 'version stamp file {} does not exist!'.format(fn)
with open(fn, 'r') as fp:
return fp.read().strip()
def update_version_stamp(version, fn=get_version_stamp_file()):
with open(fn, 'w') as fp:
fp.write(version + '\n')
def wait_for_mysql():
db_host = get_conf('DB_HOST', '127.0.0.1')
db_user = 'root'
db_passwd = get_conf('DB_ROOT_PASSWD', '')
while True:
try:
pymysql.connect(host=db_host, port=3306, user=db_user, passwd=db_passwd)
except Exception as e:
print ('waiting for mysql server to be ready: %s', e)
time.sleep(2)
continue
logdbg('mysql server is ready')
return
def wait_for_memcached():
while True:
try:
with telnetlib.Telnet(host='memcached', port=11211, timeout=3) as tn:
pass
except Exception as e:
print ('waiting for memcached to be ready: %s', e)
time.sleep(2)
continue
logdbg('memcached is ready')
return
def wait_for_nginx():
while True:
logdbg('waiting for nginx server to be ready')
output = get_command_output('netstat -nltp')
if ':80 ' in output:
logdbg(output)
logdbg('nginx is ready')
return
time.sleep(2)
def replace_file_pattern(fn, pattern, replacement):
with open(fn, 'r') as fp:
content = fp.read()
with open(fn, 'w') as fp:
fp.write(content.replace(pattern, replacement))
|
the-stack_0_3633 | # WxPython Demo
from typing import List, Optional
import os.path
import wx
import QRCodeLib.qrcodelib as qr
from QRCodeLib.qrcodelib import Symbols
class FormMain(wx.Frame):
def __init__(self, **kw) -> None:
super().__init__(**kw)
self._init_widgets()
self._images: List[wx.Bitmap] = []
self._module_size = int()
def _init_widgets(self) -> None:
# self
self.Title = "QR Code"
self.SetSize(700, 550)
self.SetMinSize(self.GetSize())
font = wx.Font(
10,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL
)
self.SetFont(font)
# create panel
self._pnl_top = self._create_top_panel()
self._pnl_middle = self._create_middle_panel()
self._pnl_bottom = self._create_bottom_panel()
# sizer
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
sizer.Add(self._pnl_top, proportion=1, flag=wx.EXPAND)
sizer.Add(self._pnl_middle, flag=wx.EXPAND)
sizer.Add(self._pnl_bottom, flag=wx.EXPAND)
def _create_top_panel(self) -> wx.Panel:
panel = wx.Panel(self)
return panel
def _create_middle_panel(self) -> wx.Panel:
panel = wx.Panel(self, size=(self.GetSize().Width, 120))
# lbl_data
lbl_data = wx.StaticText(panel, label='Data :')
# txt_data
self._txt_data = wx.TextCtrl(
panel,
style=wx.TE_MULTILINE | wx.TE_PROCESS_TAB
)
self._txt_data.SetFocus()
self._txt_data.Bind(wx.EVT_TEXT, self.update_image)
# sizer
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
sizer.Add(
lbl_data,
flag=wx.TOP | wx.LEFT,
border=10
)
sizer.Add(
self._txt_data,
proportion=1,
flag=wx.EXPAND | wx.BOTTOM | wx.LEFT | wx.RIGHT,
border=10
)
return panel
def _create_bottom_panel(self) -> wx.Panel:
panel = wx.Panel(
self,
size=(self.GetSize().width, 70)
)
# lbl_ec_levell
wx.StaticText(panel, label="Error Correction Level :", pos=(10, 9), size=(143, 21))
# cmb_ec_level
self._cmb_ec_level = wx.ComboBox(
panel,
pos=(160, 5),
size=(48, 21),
choices=["L", "M", "Q", "H"],
style=wx.CB_READONLY
)
self._cmb_ec_level.SetValue("M")
self._cmb_ec_level.Bind(wx.EVT_COMBOBOX, self.update_image)
# lbl_byte_enc
self._lbl_byte_enc = wx.StaticText(
panel,
label="Byte mode Encoding :",
pos=(225, 8)
)
# cmb_byte_enc
self._cmb_byte_enc = wx.ComboBox(
panel,
pos=(358, 5),
size=(315, 21),
choices=["Shift_JIS", "UTF-8"],
style=wx.CB_READONLY
)
self._cmb_byte_enc.SetValue("Shift_JIS")
self._cmb_byte_enc.Bind(wx.EVT_COMBOBOX, self.update_image)
# lbl_max_ver
wx.StaticText(panel, label="Max Version :", pos=(10, 39))
# cmb_max_ver
self._cmb_max_ver = wx.ComboBox(
panel,
pos=(160, 35),
size=(48, 21),
choices=[str(item + 1) for item in range(40)],
style=wx.CB_READONLY
)
self._cmb_max_ver.SetValue(str(40))
self._cmb_max_ver.Bind(wx.EVT_COMBOBOX, self.update_image)
# chk_structured_append
self._chk_structured_append = wx.CheckBox(
panel,
label="Structured Append",
pos=(225, 39)
)
self._chk_structured_append.SetValue(False)
self._chk_structured_append.Bind(wx.EVT_CHECKBOX, self.update_image)
# lbl_module_size
wx.StaticText(panel, label="Module Size :", pos=(380, 39))
# spn_module_size
self._spn_module_size = wx.SpinCtrlDouble(
panel,
pos=(460, 35),
size=(48, 21),
min=1,
max=100,
initial=5
)
self._spn_module_size.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_image)
# btn_save
self._btn_save = wx.Button(
panel,
label="Save",
pos=(553, 35),
size=(120, 23)
)
self._btn_save.Bind(wx.EVT_BUTTON, self.on_btn_save_clicked)
return panel
def create_symbols(self) -> Optional[Symbols]:
data = self._txt_data.GetValue()
if not data:
return None
ec_level = qr.ErrorCorrectionLevel.to_int(self._cmb_ec_level.GetValue())
max_ver = int(self._cmb_max_ver.GetValue())
structured_append = self._chk_structured_append.GetValue()
enc_mode = self._cmb_byte_enc.GetValue()
symbols = qr.Symbols(ec_level, max_ver, structured_append, enc_mode)
try:
symbols.append_text(self._txt_data.GetValue())
except Exception as e:
wx.MessageBox(str(e), parent=self)
return None
return symbols
def update_image(self, event) -> None:
self._pnl_top.DestroyChildren()
symbols = self.create_symbols()
if not symbols:
return
self._images.clear()
sizer = wx.BoxSizer(wx.HORIZONTAL)
self._pnl_top.SetSizer(sizer)
self._pnl_top.Freeze()
module_size = int(self._spn_module_size.GetValue())
for symbol in symbols:
(data, width, height) = symbol.get_rgb_bytes(module_size)
bitmap = wx.Bitmap.FromBuffer(width, height, data)
self._images.append(bitmap)
for image in self._images:
static_bitmap = wx.StaticBitmap(self._pnl_top, bitmap=image)
sizer.Add(static_bitmap, flag=wx.ALL, border=2)
self._pnl_top.Layout()
self._pnl_top.Thaw()
def on_btn_save_clicked(self, event) -> None:
symbols = self.create_symbols()
if not symbols:
return
wildcard = (
"Monochrome Bitmap (*.bmp)|*.bmp|"
"24-bit Bitmap (*.bmp)|*.bmp|"
"Portable Pixmap (*.ppm)|*.ppm|"
"X11 Bitmap (*.xbm)|*.xbm|"
"SVG (*.svg)|*.svg"
)
dlg = wx.FileDialog(self, wildcard=wildcard,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_CANCEL:
return
(root, ext) = os.path.splitext(dlg.GetPath())
module_size = int(self._spn_module_size.GetValue())
for i, symbol in enumerate(symbols):
if symbols.count == 1:
path = root
else:
path = root + "_" + str(i)
if dlg.FilterIndex == 0:
path += ".bmp"
symbol.save_bitmap(path, module_size, True)
if dlg.FilterIndex == 1:
path += ".bmp"
symbol.save_bitmap(path, module_size, False)
if dlg.FilterIndex == 2:
path += ".ppm"
symbol.save_ppm(path, module_size)
if dlg.FilterIndex == 3:
path += ".xbm"
symbol.save_xbm(path, module_size)
if dlg.FilterIndex == 4:
path += ".svg"
symbol.save_svg(path, module_size)
dlg.Destroy()
def main() -> None:
app = wx.App()
form = FormMain(parent=None)
form.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
the-stack_0_3635 | # MIT License
# Copyright (c) 2020 Mitchell Lane
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
class Config:
def __init__(self, fileName, contents, configType):
self.fileName = fileName
self.contents = contents
self.configType = configType
with open("config_bundle.ini", "r") as bundleContents:
line = bundleContents.readline()
while line and not line.startswith("["):
line = bundleContents.readline()
configurationsFound = []
while line:
rawConfigHeader = line[1:-2]
if rawConfigHeader == "presets":
break
print(line)
configHeaderComponents = rawConfigHeader.split(":", 1)
configType = configHeaderComponents[0]
fileName = (configHeaderComponents[1] + ".ini").replace(" ", "_")
print("Found config section: " + configHeaderComponents[1])
line = bundleContents.readline()
contents=[]
while line and not line.startswith("["):
contents.append(line)
line = bundleContents.readline()
configurationsFound.append(Config(fileName, contents, configType))
print("//////////////////////////////////////////")
print("-----------------------------------\n" + "Found: " + str(len(configurationsFound)) + " configurations in total")
outputDir = "config_files"
for configuration in configurationsFound:
outputFileName = os.path.join(outputDir, configuration.fileName)
print("Writing configuration to '" + outputFileName + "'")
with open(outputFileName, 'w') as f:
for configLine in configuration.contents:
if configLine.rstrip():
f.write(configLine)
print("All configuration written to seperate files")
|
the-stack_0_3638 | #/*
# *
# * TuneIn Radio for Kodi.
# *
# * Copyright (C) 2013 Diego Fernando Nieto
# *
# * This program is free software: you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation, either version 3 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# */
from random import choice as choise
import urllib.request, urllib.error, urllib.parse
import xml.dom.minidom as minidom
class StreamTheWorld:
## Example XML document we are parsing follows, as the minidom code is so beautiful to follow
# http://playerservices.streamtheworld.com/api/livestream?version=1.4&mount=CARACOL_RADIOAAC&lang=EN
#
#<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
#<live_stream_config version="1.4">
# <mountpoints>
# <mountpoint>
# <status>
# <status-code>200</status-code>
# <status-message>OK</status-message>
# </status>
#
# <transports>
# <transport>http</transport>
# </transports>
#
# <servers>
# <server sid="3653">
# <ip>3653.live.streamtheworld.com</ip>
# <ports>
# <port type="http">80</port>
# <port type="http">3690</port>
# <port type="http">443</port>
# </ports>
# </server>
#
# <server sid="1351">
# <ip>1351.live.streamtheworld.com</ip>
# <ports>
# <port type="http">80</port>
# <port type="http">3690</port>
# <port type="http">443</port>
# </ports>
# </server>
# </servers>
#
# <mount>CARACOL_RADIOAAC</mount>
# <format>FLV</format>
# <bitrate>32000</bitrate>
# <media-format container="flv" cuepoints="andoxml">
# <audio index="0" samplerate="44100" codec="heaacv2" bitrate="32000" channels="2"/>
# </media-format>
# <authentication>0</authentication>
# <timeout>0</timeout>
# </mountpoint>
# </mountpoints>
#</live_stream_config>
''' Parse streamtheworld URL to HTTP Stream'''
def __init__(self, cs):
self.__cs__ = cs
return
def __validate_callsign(self, cs, acc=True):
'''
Normal callsign format is 'WWWWAAA', where 'WWWW' is the radio station
callsign and 'AAA' is always 'AAC'.
'''
if not cs or not isinstance(cs, str):
raise ValueError('callsign \'%s\' is not a string.' % cs)
if len(cs) < 6:
raise ValueError('callsign \'%s\' is too short.' % cs)
if acc and not cs.endswith('AAC'):
cs = cs + 'AAC'
return cs
def __make_request(self, callsign):
''' Make a Call to StreamTheWorld API v1.5'''
host = 'playerservices.streamtheworld.com'
req = urllib.request.Request(
'http://%s/api/livestream?version=1.5&mount=%s&lang=en' %
(host, callsign))
req.add_header('User-Agent', 'Mozilla/5.0')
return req
def __t(self, element):
'''get the text of a DOM element'''
return element.firstChild.data
def __check_status(self, ele):
''' should only be one status element inside a mountpoint'''
status = ele.getElementsByTagName('status')[0]
if self.__t(status.getElementsByTagName('status-code')[0]) != '200':
msg = self.__t(status.getElementsByTagName('status-message')[0])
raise Exception('Error locating stream: ' + msg)
def __create_stream_urls(self, srcfile):
''' Return an array with all URLs'''
doc = minidom.parse(srcfile)
mp = doc.getElementsByTagName('mountpoint')[0]
self.__check_status(mp)
mt = self.__t(mp.getElementsByTagName('mount')[0])
allurls = []
for s in mp.getElementsByTagName('server'):
# a thing of beauty, right?
ip = self.__t(s.getElementsByTagName('ip')[0])
ports = [self.__t(p) for p in s.getElementsByTagName('port')]
# yes, it is always HTTP. We see ports 80, 443, and 3690 usually
urls = ['http://%s:%s/%s' % (ip, p, mt) for p in ports]
allurls.extend(urls)
return allurls
def get_stream_url(self, cs):
''' Get one URL from CS'''
try:
callsign = self.__validate_callsign(cs)
req = self.__make_request(callsign)
result = urllib.request.urlopen(req)
urls = self.__create_stream_urls(result)
except:
callsign = self.__validate_callsign(cs, False)
req = self.__make_request(callsign)
result = urllib.request.urlopen(req)
urls = self.__create_stream_urls(result)
if len(urls) > 0:
u = choise(urls)
if not u.endswith('_SC'):
u = u + '_SC'
return u
|
the-stack_0_3639 |
from plotly.graph_objs import Layout
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Data(_BaseLayoutHierarchyType):
# area
# ----
@property
def area(self):
"""
The 'area' property is a tuple of instances of
Area that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Area
- A list or tuple of dicts of string/value properties that
will be passed to the Area constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Area]
"""
return self['area']
@area.setter
def area(self, val):
self['area'] = val
# barpolar
# --------
@property
def barpolar(self):
"""
The 'barpolar' property is a tuple of instances of
Barpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Barpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Barpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Barpolar]
"""
return self['barpolar']
@barpolar.setter
def barpolar(self, val):
self['barpolar'] = val
# bar
# ---
@property
def bar(self):
"""
The 'bar' property is a tuple of instances of
Bar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Bar
- A list or tuple of dicts of string/value properties that
will be passed to the Bar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Bar]
"""
return self['bar']
@bar.setter
def bar(self, val):
self['bar'] = val
# box
# ---
@property
def box(self):
"""
The 'box' property is a tuple of instances of
Box that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Box
- A list or tuple of dicts of string/value properties that
will be passed to the Box constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Box]
"""
return self['box']
@box.setter
def box(self, val):
self['box'] = val
# candlestick
# -----------
@property
def candlestick(self):
"""
The 'candlestick' property is a tuple of instances of
Candlestick that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Candlestick
- A list or tuple of dicts of string/value properties that
will be passed to the Candlestick constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Candlestick]
"""
return self['candlestick']
@candlestick.setter
def candlestick(self, val):
self['candlestick'] = val
# carpet
# ------
@property
def carpet(self):
"""
The 'carpet' property is a tuple of instances of
Carpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Carpet
- A list or tuple of dicts of string/value properties that
will be passed to the Carpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Carpet]
"""
return self['carpet']
@carpet.setter
def carpet(self, val):
self['carpet'] = val
# choropleth
# ----------
@property
def choropleth(self):
"""
The 'choropleth' property is a tuple of instances of
Choropleth that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Choropleth
- A list or tuple of dicts of string/value properties that
will be passed to the Choropleth constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Choropleth]
"""
return self['choropleth']
@choropleth.setter
def choropleth(self, val):
self['choropleth'] = val
# cone
# ----
@property
def cone(self):
"""
The 'cone' property is a tuple of instances of
Cone that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Cone
- A list or tuple of dicts of string/value properties that
will be passed to the Cone constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Cone]
"""
return self['cone']
@cone.setter
def cone(self, val):
self['cone'] = val
# contourcarpet
# -------------
@property
def contourcarpet(self):
"""
The 'contourcarpet' property is a tuple of instances of
Contourcarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contourcarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Contourcarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contourcarpet]
"""
return self['contourcarpet']
@contourcarpet.setter
def contourcarpet(self, val):
self['contourcarpet'] = val
# contour
# -------
@property
def contour(self):
"""
The 'contour' property is a tuple of instances of
Contour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contour
- A list or tuple of dicts of string/value properties that
will be passed to the Contour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contour]
"""
return self['contour']
@contour.setter
def contour(self, val):
self['contour'] = val
# heatmapgl
# ---------
@property
def heatmapgl(self):
"""
The 'heatmapgl' property is a tuple of instances of
Heatmapgl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmapgl
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmapgl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmapgl]
"""
return self['heatmapgl']
@heatmapgl.setter
def heatmapgl(self, val):
self['heatmapgl'] = val
# heatmap
# -------
@property
def heatmap(self):
"""
The 'heatmap' property is a tuple of instances of
Heatmap that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmap
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmap constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmap]
"""
return self['heatmap']
@heatmap.setter
def heatmap(self, val):
self['heatmap'] = val
# histogram2dcontour
# ------------------
@property
def histogram2dcontour(self):
"""
The 'histogram2dcontour' property is a tuple of instances of
Histogram2dContour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2dContour
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2dContour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2dContour]
"""
return self['histogram2dcontour']
@histogram2dcontour.setter
def histogram2dcontour(self, val):
self['histogram2dcontour'] = val
# histogram2d
# -----------
@property
def histogram2d(self):
"""
The 'histogram2d' property is a tuple of instances of
Histogram2d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2d
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2d]
"""
return self['histogram2d']
@histogram2d.setter
def histogram2d(self, val):
self['histogram2d'] = val
# histogram
# ---------
@property
def histogram(self):
"""
The 'histogram' property is a tuple of instances of
Histogram that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram]
"""
return self['histogram']
@histogram.setter
def histogram(self, val):
self['histogram'] = val
# isosurface
# ----------
@property
def isosurface(self):
"""
The 'isosurface' property is a tuple of instances of
Isosurface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Isosurface
- A list or tuple of dicts of string/value properties that
will be passed to the Isosurface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Isosurface]
"""
return self['isosurface']
@isosurface.setter
def isosurface(self, val):
self['isosurface'] = val
# mesh3d
# ------
@property
def mesh3d(self):
"""
The 'mesh3d' property is a tuple of instances of
Mesh3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Mesh3d
- A list or tuple of dicts of string/value properties that
will be passed to the Mesh3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Mesh3d]
"""
return self['mesh3d']
@mesh3d.setter
def mesh3d(self, val):
self['mesh3d'] = val
# ohlc
# ----
@property
def ohlc(self):
"""
The 'ohlc' property is a tuple of instances of
Ohlc that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Ohlc
- A list or tuple of dicts of string/value properties that
will be passed to the Ohlc constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Ohlc]
"""
return self['ohlc']
@ohlc.setter
def ohlc(self, val):
self['ohlc'] = val
# parcats
# -------
@property
def parcats(self):
"""
The 'parcats' property is a tuple of instances of
Parcats that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcats
- A list or tuple of dicts of string/value properties that
will be passed to the Parcats constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcats]
"""
return self['parcats']
@parcats.setter
def parcats(self, val):
self['parcats'] = val
# parcoords
# ---------
@property
def parcoords(self):
"""
The 'parcoords' property is a tuple of instances of
Parcoords that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcoords
- A list or tuple of dicts of string/value properties that
will be passed to the Parcoords constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcoords]
"""
return self['parcoords']
@parcoords.setter
def parcoords(self, val):
self['parcoords'] = val
# pie
# ---
@property
def pie(self):
"""
The 'pie' property is a tuple of instances of
Pie that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pie
- A list or tuple of dicts of string/value properties that
will be passed to the Pie constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pie]
"""
return self['pie']
@pie.setter
def pie(self, val):
self['pie'] = val
# pointcloud
# ----------
@property
def pointcloud(self):
"""
The 'pointcloud' property is a tuple of instances of
Pointcloud that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pointcloud
- A list or tuple of dicts of string/value properties that
will be passed to the Pointcloud constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pointcloud]
"""
return self['pointcloud']
@pointcloud.setter
def pointcloud(self, val):
self['pointcloud'] = val
# sankey
# ------
@property
def sankey(self):
"""
The 'sankey' property is a tuple of instances of
Sankey that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sankey
- A list or tuple of dicts of string/value properties that
will be passed to the Sankey constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sankey]
"""
return self['sankey']
@sankey.setter
def sankey(self, val):
self['sankey'] = val
# scatter3d
# ---------
@property
def scatter3d(self):
"""
The 'scatter3d' property is a tuple of instances of
Scatter3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter3d
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter3d]
"""
return self['scatter3d']
@scatter3d.setter
def scatter3d(self, val):
self['scatter3d'] = val
# scattercarpet
# -------------
@property
def scattercarpet(self):
"""
The 'scattercarpet' property is a tuple of instances of
Scattercarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattercarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Scattercarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattercarpet]
"""
return self['scattercarpet']
@scattercarpet.setter
def scattercarpet(self, val):
self['scattercarpet'] = val
# scattergeo
# ----------
@property
def scattergeo(self):
"""
The 'scattergeo' property is a tuple of instances of
Scattergeo that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergeo
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergeo constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergeo]
"""
return self['scattergeo']
@scattergeo.setter
def scattergeo(self, val):
self['scattergeo'] = val
# scattergl
# ---------
@property
def scattergl(self):
"""
The 'scattergl' property is a tuple of instances of
Scattergl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergl
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergl]
"""
return self['scattergl']
@scattergl.setter
def scattergl(self, val):
self['scattergl'] = val
# scattermapbox
# -------------
@property
def scattermapbox(self):
"""
The 'scattermapbox' property is a tuple of instances of
Scattermapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattermapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Scattermapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattermapbox]
"""
return self['scattermapbox']
@scattermapbox.setter
def scattermapbox(self, val):
self['scattermapbox'] = val
# scatterpolargl
# --------------
@property
def scatterpolargl(self):
"""
The 'scatterpolargl' property is a tuple of instances of
Scatterpolargl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolargl
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolargl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolargl]
"""
return self['scatterpolargl']
@scatterpolargl.setter
def scatterpolargl(self, val):
self['scatterpolargl'] = val
# scatterpolar
# ------------
@property
def scatterpolar(self):
"""
The 'scatterpolar' property is a tuple of instances of
Scatterpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolar]
"""
return self['scatterpolar']
@scatterpolar.setter
def scatterpolar(self, val):
self['scatterpolar'] = val
# scatter
# -------
@property
def scatter(self):
"""
The 'scatter' property is a tuple of instances of
Scatter that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter]
"""
return self['scatter']
@scatter.setter
def scatter(self, val):
self['scatter'] = val
# scatterternary
# --------------
@property
def scatterternary(self):
"""
The 'scatterternary' property is a tuple of instances of
Scatterternary that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterternary
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterternary constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterternary]
"""
return self['scatterternary']
@scatterternary.setter
def scatterternary(self, val):
self['scatterternary'] = val
# splom
# -----
@property
def splom(self):
"""
The 'splom' property is a tuple of instances of
Splom that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Splom
- A list or tuple of dicts of string/value properties that
will be passed to the Splom constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Splom]
"""
return self['splom']
@splom.setter
def splom(self, val):
self['splom'] = val
# streamtube
# ----------
@property
def streamtube(self):
"""
The 'streamtube' property is a tuple of instances of
Streamtube that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Streamtube
- A list or tuple of dicts of string/value properties that
will be passed to the Streamtube constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Streamtube]
"""
return self['streamtube']
@streamtube.setter
def streamtube(self, val):
self['streamtube'] = val
# sunburst
# --------
@property
def sunburst(self):
"""
The 'sunburst' property is a tuple of instances of
Sunburst that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sunburst
- A list or tuple of dicts of string/value properties that
will be passed to the Sunburst constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sunburst]
"""
return self['sunburst']
@sunburst.setter
def sunburst(self, val):
self['sunburst'] = val
# surface
# -------
@property
def surface(self):
"""
The 'surface' property is a tuple of instances of
Surface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Surface
- A list or tuple of dicts of string/value properties that
will be passed to the Surface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Surface]
"""
return self['surface']
@surface.setter
def surface(self, val):
self['surface'] = val
# table
# -----
@property
def table(self):
"""
The 'table' property is a tuple of instances of
Table that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Table
- A list or tuple of dicts of string/value properties that
will be passed to the Table constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Table]
"""
return self['table']
@table.setter
def table(self, val):
self['table'] = val
# violin
# ------
@property
def violin(self):
"""
The 'violin' property is a tuple of instances of
Violin that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Violin
- A list or tuple of dicts of string/value properties that
will be passed to the Violin constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Violin]
"""
return self['violin']
@violin.setter
def violin(self, val):
self['violin'] = val
# volume
# ------
@property
def volume(self):
"""
The 'volume' property is a tuple of instances of
Volume that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Volume
- A list or tuple of dicts of string/value properties that
will be passed to the Volume constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Volume]
"""
return self['volume']
@volume.setter
def volume(self, val):
self['volume'] = val
# waterfall
# ---------
@property
def waterfall(self):
"""
The 'waterfall' property is a tuple of instances of
Waterfall that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Waterfall
- A list or tuple of dicts of string/value properties that
will be passed to the Waterfall constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Waterfall]
"""
return self['waterfall']
@waterfall.setter
def waterfall(self, val):
self['waterfall'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'layout.template'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
area
plotly.graph_objs.layout.template.data.Area instance or
dict with compatible properties
barpolar
plotly.graph_objs.layout.template.data.Barpolar
instance or dict with compatible properties
bar
plotly.graph_objs.layout.template.data.Bar instance or
dict with compatible properties
box
plotly.graph_objs.layout.template.data.Box instance or
dict with compatible properties
candlestick
plotly.graph_objs.layout.template.data.Candlestick
instance or dict with compatible properties
carpet
plotly.graph_objs.layout.template.data.Carpet instance
or dict with compatible properties
choropleth
plotly.graph_objs.layout.template.data.Choropleth
instance or dict with compatible properties
cone
plotly.graph_objs.layout.template.data.Cone instance or
dict with compatible properties
contourcarpet
plotly.graph_objs.layout.template.data.Contourcarpet
instance or dict with compatible properties
contour
plotly.graph_objs.layout.template.data.Contour instance
or dict with compatible properties
heatmapgl
plotly.graph_objs.layout.template.data.Heatmapgl
instance or dict with compatible properties
heatmap
plotly.graph_objs.layout.template.data.Heatmap instance
or dict with compatible properties
histogram2dcontour
plotly.graph_objs.layout.template.data.Histogram2dConto
ur instance or dict with compatible properties
histogram2d
plotly.graph_objs.layout.template.data.Histogram2d
instance or dict with compatible properties
histogram
plotly.graph_objs.layout.template.data.Histogram
instance or dict with compatible properties
isosurface
plotly.graph_objs.layout.template.data.Isosurface
instance or dict with compatible properties
mesh3d
plotly.graph_objs.layout.template.data.Mesh3d instance
or dict with compatible properties
ohlc
plotly.graph_objs.layout.template.data.Ohlc instance or
dict with compatible properties
parcats
plotly.graph_objs.layout.template.data.Parcats instance
or dict with compatible properties
parcoords
plotly.graph_objs.layout.template.data.Parcoords
instance or dict with compatible properties
pie
plotly.graph_objs.layout.template.data.Pie instance or
dict with compatible properties
pointcloud
plotly.graph_objs.layout.template.data.Pointcloud
instance or dict with compatible properties
sankey
plotly.graph_objs.layout.template.data.Sankey instance
or dict with compatible properties
scatter3d
plotly.graph_objs.layout.template.data.Scatter3d
instance or dict with compatible properties
scattercarpet
plotly.graph_objs.layout.template.data.Scattercarpet
instance or dict with compatible properties
scattergeo
plotly.graph_objs.layout.template.data.Scattergeo
instance or dict with compatible properties
scattergl
plotly.graph_objs.layout.template.data.Scattergl
instance or dict with compatible properties
scattermapbox
plotly.graph_objs.layout.template.data.Scattermapbox
instance or dict with compatible properties
scatterpolargl
plotly.graph_objs.layout.template.data.Scatterpolargl
instance or dict with compatible properties
scatterpolar
plotly.graph_objs.layout.template.data.Scatterpolar
instance or dict with compatible properties
scatter
plotly.graph_objs.layout.template.data.Scatter instance
or dict with compatible properties
scatterternary
plotly.graph_objs.layout.template.data.Scatterternary
instance or dict with compatible properties
splom
plotly.graph_objs.layout.template.data.Splom instance
or dict with compatible properties
streamtube
plotly.graph_objs.layout.template.data.Streamtube
instance or dict with compatible properties
sunburst
plotly.graph_objs.layout.template.data.Sunburst
instance or dict with compatible properties
surface
plotly.graph_objs.layout.template.data.Surface instance
or dict with compatible properties
table
plotly.graph_objs.layout.template.data.Table instance
or dict with compatible properties
violin
plotly.graph_objs.layout.template.data.Violin instance
or dict with compatible properties
volume
plotly.graph_objs.layout.template.data.Volume instance
or dict with compatible properties
waterfall
plotly.graph_objs.layout.template.data.Waterfall
instance or dict with compatible properties
"""
def __init__(
self,
arg=None,
area=None,
barpolar=None,
bar=None,
box=None,
candlestick=None,
carpet=None,
choropleth=None,
cone=None,
contourcarpet=None,
contour=None,
heatmapgl=None,
heatmap=None,
histogram2dcontour=None,
histogram2d=None,
histogram=None,
isosurface=None,
mesh3d=None,
ohlc=None,
parcats=None,
parcoords=None,
pie=None,
pointcloud=None,
sankey=None,
scatter3d=None,
scattercarpet=None,
scattergeo=None,
scattergl=None,
scattermapbox=None,
scatterpolargl=None,
scatterpolar=None,
scatter=None,
scatterternary=None,
splom=None,
streamtube=None,
sunburst=None,
surface=None,
table=None,
violin=None,
volume=None,
waterfall=None,
**kwargs
):
"""
Construct a new Data object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.layout.template.Data
area
plotly.graph_objs.layout.template.data.Area instance or
dict with compatible properties
barpolar
plotly.graph_objs.layout.template.data.Barpolar
instance or dict with compatible properties
bar
plotly.graph_objs.layout.template.data.Bar instance or
dict with compatible properties
box
plotly.graph_objs.layout.template.data.Box instance or
dict with compatible properties
candlestick
plotly.graph_objs.layout.template.data.Candlestick
instance or dict with compatible properties
carpet
plotly.graph_objs.layout.template.data.Carpet instance
or dict with compatible properties
choropleth
plotly.graph_objs.layout.template.data.Choropleth
instance or dict with compatible properties
cone
plotly.graph_objs.layout.template.data.Cone instance or
dict with compatible properties
contourcarpet
plotly.graph_objs.layout.template.data.Contourcarpet
instance or dict with compatible properties
contour
plotly.graph_objs.layout.template.data.Contour instance
or dict with compatible properties
heatmapgl
plotly.graph_objs.layout.template.data.Heatmapgl
instance or dict with compatible properties
heatmap
plotly.graph_objs.layout.template.data.Heatmap instance
or dict with compatible properties
histogram2dcontour
plotly.graph_objs.layout.template.data.Histogram2dConto
ur instance or dict with compatible properties
histogram2d
plotly.graph_objs.layout.template.data.Histogram2d
instance or dict with compatible properties
histogram
plotly.graph_objs.layout.template.data.Histogram
instance or dict with compatible properties
isosurface
plotly.graph_objs.layout.template.data.Isosurface
instance or dict with compatible properties
mesh3d
plotly.graph_objs.layout.template.data.Mesh3d instance
or dict with compatible properties
ohlc
plotly.graph_objs.layout.template.data.Ohlc instance or
dict with compatible properties
parcats
plotly.graph_objs.layout.template.data.Parcats instance
or dict with compatible properties
parcoords
plotly.graph_objs.layout.template.data.Parcoords
instance or dict with compatible properties
pie
plotly.graph_objs.layout.template.data.Pie instance or
dict with compatible properties
pointcloud
plotly.graph_objs.layout.template.data.Pointcloud
instance or dict with compatible properties
sankey
plotly.graph_objs.layout.template.data.Sankey instance
or dict with compatible properties
scatter3d
plotly.graph_objs.layout.template.data.Scatter3d
instance or dict with compatible properties
scattercarpet
plotly.graph_objs.layout.template.data.Scattercarpet
instance or dict with compatible properties
scattergeo
plotly.graph_objs.layout.template.data.Scattergeo
instance or dict with compatible properties
scattergl
plotly.graph_objs.layout.template.data.Scattergl
instance or dict with compatible properties
scattermapbox
plotly.graph_objs.layout.template.data.Scattermapbox
instance or dict with compatible properties
scatterpolargl
plotly.graph_objs.layout.template.data.Scatterpolargl
instance or dict with compatible properties
scatterpolar
plotly.graph_objs.layout.template.data.Scatterpolar
instance or dict with compatible properties
scatter
plotly.graph_objs.layout.template.data.Scatter instance
or dict with compatible properties
scatterternary
plotly.graph_objs.layout.template.data.Scatterternary
instance or dict with compatible properties
splom
plotly.graph_objs.layout.template.data.Splom instance
or dict with compatible properties
streamtube
plotly.graph_objs.layout.template.data.Streamtube
instance or dict with compatible properties
sunburst
plotly.graph_objs.layout.template.data.Sunburst
instance or dict with compatible properties
surface
plotly.graph_objs.layout.template.data.Surface instance
or dict with compatible properties
table
plotly.graph_objs.layout.template.data.Table instance
or dict with compatible properties
violin
plotly.graph_objs.layout.template.data.Violin instance
or dict with compatible properties
volume
plotly.graph_objs.layout.template.data.Volume instance
or dict with compatible properties
waterfall
plotly.graph_objs.layout.template.data.Waterfall
instance or dict with compatible properties
Returns
-------
Data
"""
super(Data, self).__init__('data')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.template.Data
constructor must be a dict or
an instance of plotly.graph_objs.layout.template.Data"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.layout.template import (data as v_data)
# Initialize validators
# ---------------------
self._validators['area'] = v_data.AreasValidator()
self._validators['barpolar'] = v_data.BarpolarsValidator()
self._validators['bar'] = v_data.BarsValidator()
self._validators['box'] = v_data.BoxsValidator()
self._validators['candlestick'] = v_data.CandlesticksValidator()
self._validators['carpet'] = v_data.CarpetsValidator()
self._validators['choropleth'] = v_data.ChoroplethsValidator()
self._validators['cone'] = v_data.ConesValidator()
self._validators['contourcarpet'] = v_data.ContourcarpetsValidator()
self._validators['contour'] = v_data.ContoursValidator()
self._validators['heatmapgl'] = v_data.HeatmapglsValidator()
self._validators['heatmap'] = v_data.HeatmapsValidator()
self._validators['histogram2dcontour'
] = v_data.Histogram2dContoursValidator()
self._validators['histogram2d'] = v_data.Histogram2dsValidator()
self._validators['histogram'] = v_data.HistogramsValidator()
self._validators['isosurface'] = v_data.IsosurfacesValidator()
self._validators['mesh3d'] = v_data.Mesh3dsValidator()
self._validators['ohlc'] = v_data.OhlcsValidator()
self._validators['parcats'] = v_data.ParcatssValidator()
self._validators['parcoords'] = v_data.ParcoordssValidator()
self._validators['pie'] = v_data.PiesValidator()
self._validators['pointcloud'] = v_data.PointcloudsValidator()
self._validators['sankey'] = v_data.SankeysValidator()
self._validators['scatter3d'] = v_data.Scatter3dsValidator()
self._validators['scattercarpet'] = v_data.ScattercarpetsValidator()
self._validators['scattergeo'] = v_data.ScattergeosValidator()
self._validators['scattergl'] = v_data.ScatterglsValidator()
self._validators['scattermapbox'] = v_data.ScattermapboxsValidator()
self._validators['scatterpolargl'] = v_data.ScatterpolarglsValidator()
self._validators['scatterpolar'] = v_data.ScatterpolarsValidator()
self._validators['scatter'] = v_data.ScattersValidator()
self._validators['scatterternary'] = v_data.ScatterternarysValidator()
self._validators['splom'] = v_data.SplomsValidator()
self._validators['streamtube'] = v_data.StreamtubesValidator()
self._validators['sunburst'] = v_data.SunburstsValidator()
self._validators['surface'] = v_data.SurfacesValidator()
self._validators['table'] = v_data.TablesValidator()
self._validators['violin'] = v_data.ViolinsValidator()
self._validators['volume'] = v_data.VolumesValidator()
self._validators['waterfall'] = v_data.WaterfallsValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('area', None)
self['area'] = area if area is not None else _v
_v = arg.pop('barpolar', None)
self['barpolar'] = barpolar if barpolar is not None else _v
_v = arg.pop('bar', None)
self['bar'] = bar if bar is not None else _v
_v = arg.pop('box', None)
self['box'] = box if box is not None else _v
_v = arg.pop('candlestick', None)
self['candlestick'] = candlestick if candlestick is not None else _v
_v = arg.pop('carpet', None)
self['carpet'] = carpet if carpet is not None else _v
_v = arg.pop('choropleth', None)
self['choropleth'] = choropleth if choropleth is not None else _v
_v = arg.pop('cone', None)
self['cone'] = cone if cone is not None else _v
_v = arg.pop('contourcarpet', None)
self['contourcarpet'
] = contourcarpet if contourcarpet is not None else _v
_v = arg.pop('contour', None)
self['contour'] = contour if contour is not None else _v
_v = arg.pop('heatmapgl', None)
self['heatmapgl'] = heatmapgl if heatmapgl is not None else _v
_v = arg.pop('heatmap', None)
self['heatmap'] = heatmap if heatmap is not None else _v
_v = arg.pop('histogram2dcontour', None)
self['histogram2dcontour'
] = histogram2dcontour if histogram2dcontour is not None else _v
_v = arg.pop('histogram2d', None)
self['histogram2d'] = histogram2d if histogram2d is not None else _v
_v = arg.pop('histogram', None)
self['histogram'] = histogram if histogram is not None else _v
_v = arg.pop('isosurface', None)
self['isosurface'] = isosurface if isosurface is not None else _v
_v = arg.pop('mesh3d', None)
self['mesh3d'] = mesh3d if mesh3d is not None else _v
_v = arg.pop('ohlc', None)
self['ohlc'] = ohlc if ohlc is not None else _v
_v = arg.pop('parcats', None)
self['parcats'] = parcats if parcats is not None else _v
_v = arg.pop('parcoords', None)
self['parcoords'] = parcoords if parcoords is not None else _v
_v = arg.pop('pie', None)
self['pie'] = pie if pie is not None else _v
_v = arg.pop('pointcloud', None)
self['pointcloud'] = pointcloud if pointcloud is not None else _v
_v = arg.pop('sankey', None)
self['sankey'] = sankey if sankey is not None else _v
_v = arg.pop('scatter3d', None)
self['scatter3d'] = scatter3d if scatter3d is not None else _v
_v = arg.pop('scattercarpet', None)
self['scattercarpet'
] = scattercarpet if scattercarpet is not None else _v
_v = arg.pop('scattergeo', None)
self['scattergeo'] = scattergeo if scattergeo is not None else _v
_v = arg.pop('scattergl', None)
self['scattergl'] = scattergl if scattergl is not None else _v
_v = arg.pop('scattermapbox', None)
self['scattermapbox'
] = scattermapbox if scattermapbox is not None else _v
_v = arg.pop('scatterpolargl', None)
self['scatterpolargl'
] = scatterpolargl if scatterpolargl is not None else _v
_v = arg.pop('scatterpolar', None)
self['scatterpolar'] = scatterpolar if scatterpolar is not None else _v
_v = arg.pop('scatter', None)
self['scatter'] = scatter if scatter is not None else _v
_v = arg.pop('scatterternary', None)
self['scatterternary'
] = scatterternary if scatterternary is not None else _v
_v = arg.pop('splom', None)
self['splom'] = splom if splom is not None else _v
_v = arg.pop('streamtube', None)
self['streamtube'] = streamtube if streamtube is not None else _v
_v = arg.pop('sunburst', None)
self['sunburst'] = sunburst if sunburst is not None else _v
_v = arg.pop('surface', None)
self['surface'] = surface if surface is not None else _v
_v = arg.pop('table', None)
self['table'] = table if table is not None else _v
_v = arg.pop('violin', None)
self['violin'] = violin if violin is not None else _v
_v = arg.pop('volume', None)
self['volume'] = volume if volume is not None else _v
_v = arg.pop('waterfall', None)
self['waterfall'] = waterfall if waterfall is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.layout.template import data
|
the-stack_0_3640 | import io
import jsonpickle
import logging
import numpy as np
import os
from tqdm import tqdm
from typing import Tuple, List, Optional, Dict, Text, Any
import rasa.utils.io
from rasa.core import utils
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.domain import PREV_PREFIX, Domain
from rasa.core.events import ActionExecuted
from rasa.core.trackers import DialogueStateTracker
from rasa.core.training.data import DialogueTrainingData
from rasa.utils.common import is_logging_disabled
logger = logging.getLogger(__name__)
class SingleStateFeaturizer(object):
"""Base class for mechanisms to transform the conversations state into ML formats.
Subclasses of SingleStateFeaturizer decide how the bot will transform
the conversation state to a format which a classifier can read:
feature vector.
"""
def prepare_from_domain(self, domain: Domain) -> None:
"""Helper method to init based on domain."""
pass
def encode(self, state: Dict[Text, float]) -> np.ndarray:
"""Encode user input."""
raise NotImplementedError(
"SingleStateFeaturizer must have "
"the capacity to "
"encode states to a feature vector"
)
@staticmethod
def action_as_one_hot(action: Text, domain: Domain) -> np.ndarray:
"""Encode system action as one-hot vector."""
if action is None:
return np.ones(domain.num_actions, dtype=int) * -1
y = np.zeros(domain.num_actions, dtype=int)
y[domain.index_for_action(action)] = 1
return y
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain encoded in rows."""
pass
class BinarySingleStateFeaturizer(SingleStateFeaturizer):
"""Assumes all features are binary.
All features should be either on or off, denoting them with 1 or 0.
"""
def __init__(self):
"""Declares instant variables."""
super(BinarySingleStateFeaturizer, self).__init__()
self.num_features = None
self.input_state_map = None
def prepare_from_domain(self, domain: Domain) -> None:
"""Use Domain to prepare featurizer."""
self.num_features = domain.num_states
self.input_state_map = domain.input_state_map
def encode(self, state: Dict[Text, float]) -> np.ndarray:
"""Returns a binary vector indicating which features are active.
Given a dictionary of states (e.g. 'intent_greet',
'prev_action_listen',...) return a binary vector indicating which
features of `self.input_features` are in the bag. NB it's a
regular double precision float array type.
For example with two active features out of five possible features
this would return a vector like `[0 0 1 0 1]`
If intent features are given with a probability, for example
with two active features and two uncertain intents out
of five possible features this would return a vector
like `[0.3, 0.7, 1.0, 0, 1.0]`.
If this is just a padding vector we set all values to `-1`.
padding vectors are specified by a `None` or `[None]`
value for states.
"""
if not self.num_features:
raise Exception(
"BinarySingleStateFeaturizer was not prepared before encoding."
)
if state is None or None in state:
return np.ones(self.num_features, dtype=np.int32) * -1
# we are going to use floats and convert to int later if possible
used_features = np.zeros(self.num_features, dtype=np.float)
using_only_ints = True
for state_name, prob in state.items():
if state_name in self.input_state_map:
idx = self.input_state_map[state_name]
used_features[idx] = prob
using_only_ints = using_only_ints and utils.is_int(prob)
else:
logger.debug(
"Feature '{}' (value: '{}') could not be found in "
"feature map. Make sure you added all intents and "
"entities to the domain".format(state_name, prob)
)
if using_only_ints:
# this is an optimization - saves us a bit of memory
return used_features.astype(np.int32)
else:
return used_features
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain encoded in rows as bag of words"""
return np.eye(domain.num_actions)
class LabelTokenizerSingleStateFeaturizer(SingleStateFeaturizer):
"""Creates bag-of-words feature vectors.
User intents and bot action names are split into tokens
and used to create bag-of-words feature vectors.
Args:
split_symbol: The symbol that separates words in
intets and action names.
use_shared_vocab: The flag that specifies if to create
the same vocabulary for user intents and bot actions.
"""
def __init__(
self, use_shared_vocab: bool = False, split_symbol: Text = "_"
) -> None:
"""inits vocabulary for label bag of words representation"""
super(LabelTokenizerSingleStateFeaturizer, self).__init__()
self.use_shared_vocab = use_shared_vocab
self.split_symbol = split_symbol
self.num_features = None
self.user_labels = []
self.slot_labels = []
self.bot_labels = []
self.bot_vocab = None
self.user_vocab = None
@staticmethod
def _create_label_token_dict(labels, split_symbol="_"):
"""Splits labels into tokens by using provided symbol.
Creates the lookup dictionary for this tokens.
Values in this dict are used for featurization.
"""
distinct_tokens = set(
[token for label in labels for token in label.split(split_symbol)]
)
return {token: idx for idx, token in enumerate(sorted(distinct_tokens))}
def prepare_from_domain(self, domain: Domain) -> None:
"""Creates internal vocabularies for user intents and bot actions."""
self.user_labels = domain.intent_states + domain.entity_states
self.slot_labels = domain.slot_states + domain.form_states
self.bot_labels = domain.action_names
if self.use_shared_vocab:
self.bot_vocab = self._create_label_token_dict(
self.bot_labels + self.user_labels, self.split_symbol
)
self.user_vocab = self.bot_vocab
else:
self.bot_vocab = self._create_label_token_dict(
self.bot_labels, self.split_symbol
)
self.user_vocab = self._create_label_token_dict(
self.user_labels, self.split_symbol
)
self.num_features = (
len(self.user_vocab) + len(self.slot_labels) + len(self.bot_vocab)
)
def encode(self, state: Dict[Text, float]) -> np.ndarray:
"""Returns a binary vector indicating which tokens are present."""
if not self.num_features:
raise Exception(
"LabelTokenizerSingleStateFeaturizer "
"was not prepared before encoding."
)
if state is None or None in state:
return np.ones(self.num_features, dtype=np.int32) * -1
# we are going to use floats and convert to int later if possible
used_features = np.zeros(self.num_features, dtype=np.float)
using_only_ints = True
for state_name, prob in state.items():
using_only_ints = using_only_ints and utils.is_int(prob)
if state_name in self.user_labels:
if PREV_PREFIX + ACTION_LISTEN_NAME in state:
# else we predict next action from bot action and memory
for t in state_name.split(self.split_symbol):
used_features[self.user_vocab[t]] += prob
elif state_name in self.slot_labels:
offset = len(self.user_vocab)
idx = self.slot_labels.index(state_name)
used_features[offset + idx] += prob
elif state_name[len(PREV_PREFIX) :] in self.bot_labels:
action_name = state_name[len(PREV_PREFIX) :]
for t in action_name.split(self.split_symbol):
offset = len(self.user_vocab) + len(self.slot_labels)
idx = self.bot_vocab[t]
used_features[offset + idx] += prob
else:
logger.warning(
"Feature '{}' could not be found in "
"feature map.".format(state_name)
)
if using_only_ints:
# this is an optimization - saves us a bit of memory
return used_features.astype(np.int32)
else:
return used_features
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain encoded in rows as bag of words"""
encoded_all_actions = np.zeros(
(domain.num_actions, len(self.bot_vocab)), dtype=np.int32
)
for idx, name in enumerate(domain.action_names):
for t in name.split(self.split_symbol):
encoded_all_actions[idx, self.bot_vocab[t]] = 1
return encoded_all_actions
class TrackerFeaturizer(object):
"""Base class for actual tracker featurizers."""
def __init__(
self,
state_featurizer: Optional[SingleStateFeaturizer] = None,
use_intent_probabilities: bool = False,
) -> None:
self.state_featurizer = state_featurizer
self.use_intent_probabilities = use_intent_probabilities
def _create_states(
self,
tracker: DialogueStateTracker,
domain: Domain,
is_binary_training: bool = False,
) -> List[Dict[Text, float]]:
"""Create states: a list of dictionaries.
If use_intent_probabilities is False (default behaviour),
pick the most probable intent out of all provided ones and
set its probability to 1.0, while all the others to 0.0.
"""
states = tracker.past_states(domain)
# during training we encounter only 1 or 0
if not self.use_intent_probabilities and not is_binary_training:
bin_states = []
for state in states:
# copy state dict to preserve internal order of keys
bin_state = dict(state)
best_intent = None
best_intent_prob = -1.0
for state_name, prob in state:
if state_name.startswith("intent_"):
if prob > best_intent_prob:
# finding the maximum confidence intent
if best_intent is not None:
# delete previous best intent
del bin_state[best_intent]
best_intent = state_name
best_intent_prob = prob
else:
# delete other intents
del bin_state[state_name]
if best_intent is not None:
# set the confidence of best intent to 1.0
bin_state[best_intent] = 1.0
bin_states.append(bin_state)
return bin_states
else:
return [dict(state) for state in states]
def _pad_states(self, states: List[Any]) -> List[Any]:
"""Pads states."""
return states
def _featurize_states(
self, trackers_as_states: List[List[Dict[Text, float]]]
) -> Tuple[np.ndarray, List[int]]:
"""Create X."""
features = []
true_lengths = []
for tracker_states in trackers_as_states:
dialogue_len = len(tracker_states)
# len(trackers_as_states) = 1 means
# it is called during prediction or we have
# only one story, so no padding is needed
if len(trackers_as_states) > 1:
tracker_states = self._pad_states(tracker_states)
story_features = [
self.state_featurizer.encode(state) for state in tracker_states
]
features.append(story_features)
true_lengths.append(dialogue_len)
# noinspection PyPep8Naming
X = np.array(features)
return X, true_lengths
def _featurize_labels(
self, trackers_as_actions: List[List[Text]], domain: Domain
) -> np.ndarray:
"""Create y."""
labels = []
for tracker_actions in trackers_as_actions:
if len(trackers_as_actions) > 1:
tracker_actions = self._pad_states(tracker_actions)
story_labels = [
self.state_featurizer.action_as_one_hot(action, domain)
for action in tracker_actions
]
labels.append(story_labels)
y = np.array(labels)
if y.ndim == 3 and isinstance(self, MaxHistoryTrackerFeaturizer):
# if it is MaxHistoryFeaturizer, remove time axis
y = y[:, 0, :]
return y
def training_states_and_actions(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> Tuple[List[List[Dict]], List[List[Text]]]:
"""Transforms list of trackers to lists of states and actions."""
raise NotImplementedError(
"Featurizer must have the capacity to encode trackers to feature vectors"
)
def featurize_trackers(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> DialogueTrainingData:
"""Create training data."""
if self.state_featurizer is None:
raise ValueError(
"Variable 'state_featurizer' is not set. Provide "
"'SingleStateFeaturizer' class to featurize trackers."
)
self.state_featurizer.prepare_from_domain(domain)
(trackers_as_states, trackers_as_actions) = self.training_states_and_actions(
trackers, domain
)
# noinspection PyPep8Naming
X, true_lengths = self._featurize_states(trackers_as_states)
y = self._featurize_labels(trackers_as_actions, domain)
return DialogueTrainingData(X, y, true_lengths)
def prediction_states(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> List[List[Dict[Text, float]]]:
"""Transforms list of trackers to lists of states for prediction."""
raise NotImplementedError(
"Featurizer must have the capacity to create feature vector"
)
# noinspection PyPep8Naming
def create_X(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> np.ndarray:
"""Create X for prediction."""
trackers_as_states = self.prediction_states(trackers, domain)
X, _ = self._featurize_states(trackers_as_states)
return X
def persist(self, path):
featurizer_file = os.path.join(path, "featurizer.json")
rasa.utils.io.create_directory_for_file(featurizer_file)
# noinspection PyTypeChecker
rasa.utils.io.write_text_file(str(jsonpickle.encode(self)), featurizer_file)
@staticmethod
def load(path):
"""Loads the featurizer from file."""
featurizer_file = os.path.join(path, "featurizer.json")
if os.path.isfile(featurizer_file):
return jsonpickle.decode(rasa.utils.io.read_file(featurizer_file))
else:
logger.error(
"Couldn't load featurizer for policy. "
"File '{}' doesn't exist.".format(featurizer_file)
)
return None
class FullDialogueTrackerFeaturizer(TrackerFeaturizer):
"""Creates full dialogue training data for time distributed architectures.
Creates training data that uses each time output for prediction.
Training data is padded up to the length of the longest dialogue with -1.
"""
def __init__(
self,
state_featurizer: SingleStateFeaturizer,
use_intent_probabilities: bool = False,
) -> None:
super(FullDialogueTrackerFeaturizer, self).__init__(
state_featurizer, use_intent_probabilities
)
self.max_len = None
@staticmethod
def _calculate_max_len(trackers_as_actions):
"""Calculate the length of the longest dialogue."""
if trackers_as_actions:
return max([len(states) for states in trackers_as_actions])
else:
return None
def _pad_states(self, states: List[Any]) -> List[Any]:
"""Pads states up to max_len."""
if len(states) < self.max_len:
states += [None] * (self.max_len - len(states))
return states
def training_states_and_actions(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> Tuple[List[List[Dict]], List[List[Text]]]:
"""Transforms list of trackers to lists of states and actions.
Training data is padded up to the length of the longest dialogue with -1.
"""
trackers_as_states = []
trackers_as_actions = []
logger.debug(
"Creating states and action examples from "
"collected trackers (by {}({}))..."
"".format(type(self).__name__, type(self.state_featurizer).__name__)
)
pbar = tqdm(trackers, desc="Processed trackers", disable=is_logging_disabled())
for tracker in pbar:
states = self._create_states(tracker, domain, is_binary_training=True)
delete_first_state = False
actions = []
for event in tracker.applied_events():
if isinstance(event, ActionExecuted):
if not event.unpredictable:
# only actions which can be
# predicted at a stories start
actions.append(event.action_name)
else:
# unpredictable actions can be
# only the first in the story
if delete_first_state:
raise Exception(
"Found two unpredictable "
"actions in one story."
"Check your story files."
)
else:
delete_first_state = True
if delete_first_state:
states = states[1:]
trackers_as_states.append(states[:-1])
trackers_as_actions.append(actions)
self.max_len = self._calculate_max_len(trackers_as_actions)
logger.debug("The longest dialogue has {} actions.".format(self.max_len))
return trackers_as_states, trackers_as_actions
def prediction_states(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> List[List[Dict[Text, float]]]:
"""Transforms list of trackers to lists of states for prediction."""
trackers_as_states = [
self._create_states(tracker, domain) for tracker in trackers
]
return trackers_as_states
class MaxHistoryTrackerFeaturizer(TrackerFeaturizer):
"""Slices the tracker history into max_history batches.
Creates training data that uses last output for prediction.
Training data is padded up to the max_history with -1.
"""
MAX_HISTORY_DEFAULT = 5
def __init__(
self,
state_featurizer: Optional[SingleStateFeaturizer] = None,
max_history: int = None,
remove_duplicates: bool = True,
use_intent_probabilities: bool = False,
) -> None:
super(MaxHistoryTrackerFeaturizer, self).__init__(
state_featurizer, use_intent_probabilities
)
self.max_history = max_history or self.MAX_HISTORY_DEFAULT
self.remove_duplicates = remove_duplicates
@staticmethod
def slice_state_history(
states: List[Dict[Text, float]], slice_length: int
) -> List[Optional[Dict[Text, float]]]:
"""Slices states from the trackers history.
If the slice is at the array borders, padding will be added to ensure
the slice length.
"""
slice_end = len(states)
slice_start = max(0, slice_end - slice_length)
padding = [None] * max(0, slice_length - slice_end)
# noinspection PyTypeChecker
state_features = padding + states[slice_start:]
return state_features
@staticmethod
def _hash_example(states, action):
"""Hash states for efficient deduplication."""
frozen_states = tuple(
(s if s is None else frozenset(s.items()) for s in states)
)
frozen_actions = (action,)
return hash((frozen_states, frozen_actions))
def training_states_and_actions(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> Tuple[List[List[Optional[Dict[Text, float]]]], List[List[Text]]]:
"""Transforms list of trackers to lists of states and actions.
Training data is padded up to the max_history with -1.
"""
trackers_as_states = []
trackers_as_actions = []
# from multiple states that create equal featurizations
# we only need to keep one.
hashed_examples = set()
logger.debug(
"Creating states and action examples from "
"collected trackers (by {}({}))..."
"".format(type(self).__name__, type(self.state_featurizer).__name__)
)
pbar = tqdm(trackers, desc="Processed trackers", disable=is_logging_disabled())
for tracker in pbar:
states = self._create_states(tracker, domain, True)
idx = 0
for event in tracker.applied_events():
if isinstance(event, ActionExecuted):
if not event.unpredictable:
# only actions which can be
# predicted at a stories start
sliced_states = self.slice_state_history(
states[: idx + 1], self.max_history
)
if self.remove_duplicates:
hashed = self._hash_example(
sliced_states, event.action_name
)
# only continue with tracker_states that created a
# hashed_featurization we haven't observed
if hashed not in hashed_examples:
hashed_examples.add(hashed)
trackers_as_states.append(sliced_states)
trackers_as_actions.append([event.action_name])
else:
trackers_as_states.append(sliced_states)
trackers_as_actions.append([event.action_name])
pbar.set_postfix(
{"# actions": "{:d}".format(len(trackers_as_actions))}
)
idx += 1
logger.debug("Created {} action examples.".format(len(trackers_as_actions)))
return trackers_as_states, trackers_as_actions
def prediction_states(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> List[List[Dict[Text, float]]]:
"""Transforms list of trackers to lists of states for prediction."""
trackers_as_states = [
self._create_states(tracker, domain) for tracker in trackers
]
trackers_as_states = [
self.slice_state_history(states, self.max_history)
for states in trackers_as_states
]
return trackers_as_states
|
the-stack_0_3641 | import wx
DisplayCrosshairsEventType = wx.NewEventType()
DisplayMagnifierEventType = wx.NewEventType()
FitToPageEventType = wx.NewEventType()
SetNumarrayEventType = wx.NewEventType()
ScaleSizeEventType = wx.NewEventType()
ScaleValuesEventType = wx.NewEventType()
EVT_DISPLAY_CROSSHAIRS = wx.PyEventBinder(DisplayCrosshairsEventType)
EVT_DISPLAY_MAGNIFIER = wx.PyEventBinder(DisplayMagnifierEventType)
EVT_FIT_TO_PAGE = wx.PyEventBinder(FitToPageEventType)
EVT_SET_NUMARRAY = wx.PyEventBinder(SetNumarrayEventType)
EVT_SCALE_SIZE = wx.PyEventBinder(ScaleSizeEventType)
EVT_SCALE_VALUES = wx.PyEventBinder(ScaleValuesEventType)
class DisplayCrosshairsEvent(wx.PyCommandEvent):
def __init__(self, source, display):
wx.PyCommandEvent.__init__(self, DisplayCrosshairsEventType,
source.GetId())
self.SetEventObject(source)
self.display = display
class DisplayMagnifierEvent(wx.PyCommandEvent):
def __init__(self, source, display):
wx.PyCommandEvent.__init__(self, DisplayMagnifierEventType,
source.GetId())
self.SetEventObject(source)
self.display = display
class FitToPageEvent(wx.PyCommandEvent):
def __init__(self, source):
wx.PyCommandEvent.__init__(self, FitToPageEventType, source.GetId())
self.SetEventObject(source)
class SetNumarrayEvent(wx.PyCommandEvent):
def __init__(self, source, array):
wx.PyCommandEvent.__init__(self, SetNumarrayEventType, source.GetId())
self.SetEventObject(source)
self.array = array
def GetNumarray(self):
return self.array
class ScaleSizeEvent(wx.PyCommandEvent):
def __init__(self, source, scale):
wx.PyCommandEvent.__init__(self, ScaleSizeEventType, source.GetId())
self.SetEventObject(source)
self.scale = scale
def GetScale(self):
return self.scale
class ScaleValuesEvent(wx.PyCommandEvent):
def __init__(self, source, valuerange):
wx.PyCommandEvent.__init__(self, ScaleValuesEventType, source.GetId())
self.SetEventObject(source)
self.valuerange = valuerange
def GetValueRange(self):
return self.valuerange
|
the-stack_0_3642 | from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from textattack.commands import TextAttackCommand
from textattack.commands.attack.attack_args import *
from textattack.commands.augment import AUGMENTATION_RECIPE_NAMES
def _cb(s):
return textattack.shared.utils.color_text(str(s), color="blue", method="ansi")
class ListThingsCommand(TextAttackCommand):
"""
The list module:
List default things in textattack.
"""
def _list(self, list_of_things):
""" Prints a list or dict of things. """
if isinstance(list_of_things, list):
list_of_things = sorted(list_of_things)
for thing in list_of_things:
print(_cb(thing))
elif isinstance(list_of_things, dict):
for thing in sorted(list_of_things.keys()):
thing_long_description = list_of_things[thing]
print(f"{_cb(thing)} ({thing_long_description})")
else:
raise TypeError(f"Cannot print list of type {type(list_of_things)}")
@staticmethod
def things():
list_dict = {}
list_dict["models"] = list(HUGGINGFACE_DATASET_BY_MODEL.keys()) + list(
TEXTATTACK_DATASET_BY_MODEL.keys()
)
list_dict["search-methods"] = SEARCH_METHOD_CLASS_NAMES
list_dict["transformations"] = {
**BLACK_BOX_TRANSFORMATION_CLASS_NAMES,
**WHITE_BOX_TRANSFORMATION_CLASS_NAMES,
}
list_dict["constraints"] = CONSTRAINT_CLASS_NAMES
list_dict["goal-functions"] = GOAL_FUNCTION_CLASS_NAMES
list_dict["attack-recipes"] = ATTACK_RECIPE_NAMES
list_dict["augmentation-recipes"] = AUGMENTATION_RECIPE_NAMES
return list_dict
def run(self, args):
try:
list_of_things = ListThingsCommand.things()[args.feature]
except KeyError:
raise ValuError(f"Unknown list key {args.thing}")
self._list(list_of_things)
@staticmethod
def register_subcommand(main_parser: ArgumentParser):
parser = main_parser.add_parser(
"list",
help="list features in TextAttack",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"feature", help=f"the feature to list", choices=ListThingsCommand.things()
)
parser.set_defaults(func=ListThingsCommand())
|
the-stack_0_3644 | #!python3
from lib import queries
from tkinter import *
from tkinter import ttk
from lib.gui import elements
from lib.gui import dialog
flag = 0
dbDict = {}
fDict = {}
class MainFrame(Toplevel):
def __init__(self, dbCon):
Toplevel.__init__(self)
self.geometry("800x600")
self.flag = flag
self.dbDict = dbDict
self.fDict = fDict
self.dbName = ''
self.dbCon = dbCon
# Initialize attribute options
self.ribbonVar = IntVar()
self.imgColVar = IntVar()
self.dropdownChoices = [ 'From-To', 'Timespan', 'Ισοζύγιο' ]
self.ribbonDropdownVar = StringVar()
self.ribbonType = 0
# Initialize the user interface
self.el = self.init_gui()
if flag == 0:
self.dbDict = queries.loadDatabases(self.dbCon)
# Populate treeview with form names
for k, v in self.dbDict.items():
self.tree.insert('', 'end', iid=k, values=(k, v))
def init_gui(self):
"""Builds GUI."""
return elements.Elements.create_widgets(self)
def update_form(self):
formId = self.tree.focus()
dl = dialog.MyDialog(self, 'Είστε σίγουροι ότι θέλετε να αλλάξετε τη φόρμα No.' + formId + ';').show()
if dl == True:
# INSERT PROGRESS BAR CALL HERE
queries.updateForm(self.dbCon, self.dbName, formId, self.check_ribbon(), self.ribbonType, self.check_imgCol())
def restore_form(self):
formId = self.tree.focus()
dl = dialog.MyDialog(self, 'Είστε σίγουροι ότι θέλετε να επαναφέρετε τη φόρμα No.' + formId + ';').show()
if dl == True:
queries.updateForm(self.dbCon, formId, self.check_ribbon, self.ribbonType, self.check_imgCol)
def new_form(self):
nextId = max(formIds) + 1
self.dbCon.executeScriptsFromFile("scripts\Insert_Form.sql")
xml = xmlhandler.FormXml()
for child in xml.get_xml():
print(child.tag, child.attrib)
def use_database(self):
isUsed = self.flag
if isUsed == 0:
selectedDatabaseId = self.tree.focus()
selectedDatabaseName = queries.getSelectedDatabaseName(self.dbCon, selectedDatabaseId)
self.flag = 1
for i in self.tree.get_children():
self.tree.delete(i)
self.fDict = queries.loadForms(self.dbCon, selectedDatabaseName)
# Populate treeview with form names
for k, v in self.fDict.items():
self.tree.insert('', 'end', iid=k, values=(k, v))
elements.Elements.changeText(self, 'Exit DB')
elements.Elements.showButtons(self)
self.dbName = selectedDatabaseName
elif isUsed == 1:
self.flag = 0
self.dbDict = queries.loadDatabases(db)
# Clear the tree
for i in self.tree.get_children():
self.tree.delete(i)
# Populate treeview with database names
for k, v in self.dbDict.items():
self.tree.insert('', 'end', iid=k, values=(k, v))
elements.Elements.changeText(self, 'Use DB')
elements.Elements.hideButtons(self)
def exit_manager(self):
self.quit()
# Checkbox methods for each attribute
def check_ribbon(self):
return self.ribbonVar.get()
def check_imgCol(self):
return self.imgColVar.get()
def choice_ribbon(self, value):
if value == 'From-To':
self.ribbonType = 0
elif value == 'Timespan':
self.ribbonType = 1
elif value == 'Ισοζύγιο':
self.ribbonType = 2
|
the-stack_0_3645 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '古溪'
import os
import numpy as np
from dataset.data_util import pil_load_img
from dataset.dataload import TextDataset, TextInstance
from util.io import read_lines
import cv2
class Ctw1500Text(TextDataset):
def __init__(self, data_root, is_training=True, transform=None, ignore_list=None):
super().__init__(transform, is_training)
self.data_root = data_root
self.is_training = is_training
self.image_root = os.path.join(data_root, 'train' if is_training else 'test', "text_image")
self.annotation_root = os.path.join(data_root, 'train' if is_training else 'test', "text_label_circum")
self.image_list = os.listdir(self.image_root)
self.annotation_list = ['{}'.format(img_name.replace('.jpg', '')) for img_name in self.image_list]
@staticmethod
def parse_carve_txt(gt_path):
"""
.mat file parser
:param gt_path: (str), mat file path
:return: (list), TextInstance
"""
lines = read_lines(gt_path + ".txt")
polygons = []
for line in lines:
# line = strs.remove_all(line.strip('\ufeff'), '\xef\xbb\xbf')
gt = list(map(int, line.split(',')))
pts = np.stack([gt[4::2], gt[5::2]]).T.astype(np.int32)
pts[:, 0] = pts[:, 0] + gt[0]
pts[:, 1] = pts[:, 1] + gt[1]
polygons.append(TextInstance(pts, 'c', "**"))
return polygons
def __getitem__(self, item):
image_id = self.image_list[item]
image_path = os.path.join(self.image_root, image_id)
# Read image data
image = pil_load_img(image_path)
try:
h, w, c = image.shape
assert(c == 3)
except:
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.array(image)
# Read annotation
annotation_id = self.annotation_list[item]
annotation_path = os.path.join(self.annotation_root, annotation_id)
polygons = self.parse_carve_txt(annotation_path)
return self.get_training_data(image, polygons, image_id=image_id, image_path=image_path)
def __len__(self):
return len(self.image_list)
if __name__ == '__main__':
from util.augmentation import Augmentation
from util.misc import regularize_sin_cos
from nmslib import lanms
from util.pbox import bbox_transfor_inv, minConnectPath
from util import canvas as cav
import time
means = (0.485, 0.456, 0.406)
stds = (0.229, 0.224, 0.225)
transform = Augmentation(
size=640, mean=means, std=stds
)
trainset = Ctw1500Text(
data_root='../data/ctw1500',
is_training=True,
transform=transform
)
# img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, meta = trainset[944]
for idx in range(0, len(trainset)):
t0 = time.time()
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi = trainset[idx]
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi \
= map(lambda x: x.cpu().numpy(), (img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi))
img = img.transpose(1, 2, 0)
img = ((img * stds + means) * 255).astype(np.uint8)
print(idx, img.shape)
top_map = radius_map[:, :, 0]
bot_map = radius_map[:, :, 1]
print(radius_map.shape)
sin_map, cos_map = regularize_sin_cos(sin_map, cos_map)
ret, labels = cv2.connectedComponents(tcl_mask[:, :, 0].astype(np.uint8), connectivity=8)
cv2.imshow("labels0", cav.heatmap(np.array(labels * 255 / np.max(labels), dtype=np.uint8)))
print(np.sum(tcl_mask[:, :, 1]))
t0 = time.time()
for bbox_idx in range(1, ret):
bbox_mask = labels == bbox_idx
text_map = tcl_mask[:, :, 0] * bbox_mask
boxes = bbox_transfor_inv(radius_map, sin_map, cos_map, text_map, wclip=(2, 8))
# nms
boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), 0.25)
boxes = boxes[:, :8].reshape((-1, 4, 2)).astype(np.int32)
if boxes.shape[0] > 1:
center = np.mean(boxes, axis=1).astype(np.int32).tolist()
paths, routes_path = minConnectPath(center)
boxes = boxes[routes_path]
top = np.mean(boxes[:, 0:2, :], axis=1).astype(np.int32).tolist()
bot = np.mean(boxes[:, 2:4, :], axis=1).astype(np.int32).tolist()
boundary_point = top + bot[::-1]
# for index in routes:
for ip, pp in enumerate(top):
if ip == 0:
color = (0, 255, 255)
elif ip == len(top) - 1:
color = (255, 255, 0)
else:
color = (0, 0, 255)
cv2.circle(img, (int(pp[0]), int(pp[1])), 2, color, -1)
for ip, pp in enumerate(bot):
if ip == 0:
color = (0, 255, 255)
elif ip == len(top) - 1:
color = (255, 255, 0)
else:
color = (0, 255, 0)
cv2.circle(img, (int(pp[0]), int(pp[1])), 2, color, -1)
cv2.drawContours(img, [np.array(boundary_point)], -1, (0, 255, 255), 1)
# print("nms time: {}".format(time.time() - t0))
# # cv2.imshow("", img)
# # cv2.waitKey(0)
# print(meta["image_id"])
cv2.imshow('imgs', img)
cv2.imshow("", cav.heatmap(np.array(labels * 255 / np.max(labels), dtype=np.uint8)))
cv2.imshow("tr_mask", cav.heatmap(np.array(tr_mask * 255 / np.max(tr_mask), dtype=np.uint8)))
cv2.imshow("tcl_mask",
cav.heatmap(np.array(tcl_mask[:, :, 1] * 255 / np.max(tcl_mask[:, :, 1]), dtype=np.uint8)))
# cv2.imshow("top_map", cav.heatmap(np.array(top_map * 255 / np.max(top_map), dtype=np.uint8)))
# cv2.imshow("bot_map", cav.heatmap(np.array(bot_map * 255 / np.max(bot_map), dtype=np.uint8)))
cv2.waitKey(0)
|
the-stack_0_3646 | from functools import partial
import torch.nn as nn
from detectron2.layers import (BatchNorm2d, NaiveSyncBatchNorm,
FrozenBatchNorm2d)
from detectron2.utils import env
def get_norm(norm, out_channels, **kwargs):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
kwargs: Additional parameters in normalization layers,
such as, eps, momentum
Returns:
nn.Module or None: the normalization layer
"""
if norm is None:
return None
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
# Fixed in https://github.com/pytorch/pytorch/pull/36382
"SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (
1, 5) else nn.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
# for debugging:
"nnSyncBN": nn.SyncBatchNorm,
"naiveSyncBN": NaiveSyncBatchNorm,
}[norm]
return norm(out_channels, **kwargs)
def get_activation(activation):
"""
Only support `ReLU` and `LeakyReLU` now.
Args:
activation (str or callable):
Returns:
nn.Module: the activation layer
"""
act = {
"ReLU": nn.ReLU,
"LeakyReLU": nn.LeakyReLU,
}[activation]
if activation == "LeakyReLU":
act = partial(act, negative_slope=0.1)
return act(inplace=True)
|
the-stack_0_3647 | """
In this lesson, we'll cover two more advanced data types: lists and dictionaries.
Let's start with lists.
Lists are a great way to store a bunch of stuff in a collection. Here's an example:
"""
myList = ["foo", 5, 7, True, "hello", False, -1]
"""
This list contains a bunch of things. In other languages, making lists (often also
called 'arrays') with different datatypes (our list has integers, strings, and
booleans) is a bit harder. Python ends up taking care of stuff for you under
the hood, but in languages like Java or C you'll have to do it yourself.
For a list to be helpful, we need to be able to grab or manipulate the values
in it. To get the values out of the list, we can 'index' them. Indexing them is
basically just saying "give me the x'th item in this list". In computer science,
array's start at the 0'th index. So if I want to grab the first thing in the list,
I really have to index the zero'th thing in the list.
"""
# print(myList) # Prints the whole list
# print(myList[0]) # Gets the element at index 0, which is the first element.
# print(myList[4]) # Gets the element at index 4, which is the fifth element.
# print(myList[-1]) # Gets the last element (the -1 kind of 'wraps around')
"""
Let's take a closer look at what we did here: myList[index_number]. We can actually
do a couple of more things with this. The formal notation for this is
myList[start:end:step], so you can actually specify exactly what parts of the
list you want. Here are some examples:
"""
# print(myList[0:4]) # Gets the elements 0, 1, 2, 3. Note: the 'end' is not inclusive
# print(myList[3:4]) # Gets the third element only, because the end is not inclusive
# print(myList[:5]) # Gets the first five elements, the 0 for 'start' is assumed.
# print(myList[::2]) # Here we do not specify a start or end, but we say 'step' is 2,
# so we get every other element.
# print(myList[::-1]) # Prints the reverse of the list because 'step' is -1
"""
Now that we've covered some list indexing and slicing, lets see what else we can do
with lists.
"""
# myList.pop(0) # Removes the item of the list at the 0'th index.
# print(myList) # Now the list is missing it's first item, "foo"!
# print(len(myList)) # Prints the length of the list
"""
Now that we have lists under our belt, lets move on to dictionaries. Dictionaries
are similar to lists where we can store a bunch of values, but dictionaries contain
direct mappings of values to other values. Here's an example:
"""
addresses = {
"John Smith": "123 Technology Drive",
"Jane Doe": "615 Main Street",
"George Washington": "923 Providence Circle"
}
"""
There's a couple things to notice here. First, we use curly braces '{' instead of
the square brackets '[' for lists. Second, we have to have colons between the keys
and values. In our case, the names are called the keys and the addresses are the
values. Lastly, we need commas in between the key-value pairs. Once we have a
dictionary, we can access the values with their keys like this:
"""
# print(addresses["John Smith"])
# print(addresses["Jane Doe"])
"""
As you can see, you can use the same kind of format as lists, but passing in the key
instead of the list index. You can also set the values this way, like this:
"""
# addresses["John Smith"] = "322 1st Street" # Overwrites the value at key "John Doe"
# print(addresses["John Smith"])
# addresses["Adam Jones"] = "817 42nd Street South" # We can add new key-value pairs
# like this too
# print(addresses)
"""
Here's some more things you can do with dictionaries.
"""
# addresses.pop("John Smith") # Removes a key-value pair
# print(addresses)
addresses.update({
"Mark Howard": "123 Seymour Drive",
"Carol Smith": "512 Arden Way"
}) # Can add any number of key-value pairs into the dictionary, or 'combine' two
# print(addresses)
# print(addresses.keys()) # Gets a list of all of the keys in the dictionary
# print(addresses.values()) # Gets a list of all of the values in the dictionary
"""
To hammer a lot of this information in, take a look at "My Files/Python/3 - Practice.py"
for some practice problems.
"""
|
the-stack_0_3648 | """
This schema represents all known key/value pairs for the builder config file.
"""
from strictyaml import (
load,
Map,
MapPattern,
Str,
Int,
Float,
Seq,
YAMLError,
Optional,
Bool
)
stat_schema = Seq(
Map({
"name": Str(),
"tag": Str(),
"values": Seq(
Map({
"name": Str(),
"value": Int() | Float(),
Optional("nominalValue"): Int() | Float(),
Optional("linkedValue"): Int() | Float(),
Optional("rangeMinValue"): Int() | Float(),
Optional("rangeMaxValue"): Int() | Float(),
Optional("flags"): Int()
})
)
}),
)
stat_format4_schema = Seq(
Map({
"name": Str(),
Optional("flags"): Int(),
"location": MapPattern(Str(), Int() | Float()),
})
)
instance_schema = MapPattern(Str(), Seq(
Map({
Optional("familyName"): Str(),
Optional("styleName"): Str(),
"coordinates": MapPattern(Str(), Int() | Float()),
})
))
schema = Map(
{
"sources": Seq(Str()),
Optional("vttSources"): MapPattern(Str(), Str()),
Optional("logLevel"): Str(),
Optional("stylespaceFile"): Str(),
Optional("stat"): stat_schema | MapPattern(Str(), stat_schema),
Optional("statFormat4"): stat_format4_schema | MapPattern(Str(), stat_format4_schema),
Optional("familyName"): Str(),
Optional("includeSourceFixes"): Bool(),
Optional("stylespaceFile"): Str(),
Optional("instances"): instance_schema,
Optional("buildVariable"): Bool(),
Optional("buildStatic"): Bool(),
Optional("buildOTF"): Bool(),
Optional("buildTTF"): Bool(),
Optional("buildWebfont"): Bool(),
Optional("outputDir"): Str(),
Optional("vfDir"): Str(),
Optional("ttDir"): Str(),
Optional("otDir"): Str(),
Optional("woffDir"): Str(),
Optional("cleanUp"): Bool(),
Optional("autohintTTF"): Bool(),
Optional("axisOrder"): Seq(Str()),
Optional("flattenComponents"): Bool(),
Optional("decomposeTransformedComponents"): Bool(),
}
)
|
the-stack_0_3650 | from functools import wraps
from inspect import iscoroutinefunction
import falcon
try:
import jsonschema
except ImportError: # pragma: nocover
pass
def validate(req_schema=None, resp_schema=None, is_async=False):
"""Decorator for validating ``req.media`` using JSON Schema.
This decorator provides standard JSON Schema validation via the
``jsonschema`` package available from PyPI. Semantic validation via
the *format* keyword is enabled for the default checkers implemented
by ``jsonschema.FormatChecker``.
Note:
The `jsonschema`` package must be installed separately in order to use
this decorator, as Falcon does not install it by default.
See `json-schema.org <http://json-schema.org/>`_ for more
information on defining a compatible dictionary.
Keyword Args:
req_schema (dict): A dictionary that follows the JSON
Schema specification. The request will be validated against this
schema.
resp_schema (dict): A dictionary that follows the JSON
Schema specification. The response will be validated against this
schema.
is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
the decorated responder is a coroutine function (i.e., that it
is defined with ``async def``) or that it returns an awaitable
coroutine object.
Normally, when the function source is declared using ``async def``,
the resulting function object is flagged to indicate it returns a
coroutine when invoked, and this can be automatically detected.
However, it is possible to use a regular function to return an
awaitable coroutine object, in which case a hint is required to let
the framework know what to expect. Also, a hint is always required
when using a cythonized coroutine function, since Cython does not
flag them in a way that can be detected in advance, even when the
function is declared using ``async def``.
Example:
.. code:: python
from falcon.media.validators import jsonschema
# -- snip --
@jsonschema.validate(my_post_schema)
def on_post(self, req, resp):
# -- snip --
"""
def decorator(func):
if iscoroutinefunction(func) or is_async:
return _validate_async(func, req_schema, resp_schema)
return _validate(func, req_schema, resp_schema)
return decorator
def _validate(func, req_schema=None, resp_schema=None):
@wraps(func)
def wrapper(self, req, resp, *args, **kwargs):
if req_schema is not None:
try:
jsonschema.validate(
req.media, req_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError as e:
raise falcon.HTTPBadRequest(
'Request data failed validation',
description=e.message
)
result = func(self, req, resp, *args, **kwargs)
if resp_schema is not None:
try:
jsonschema.validate(
resp.media, resp_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError:
raise falcon.HTTPInternalServerError(
'Response data failed validation'
# Do not return 'e.message' in the response to
# prevent info about possible internal response
# formatting bugs from leaking out to users.
)
return result
return wrapper
def _validate_async(func, req_schema=None, resp_schema=None):
@wraps(func)
async def wrapper(self, req, resp, *args, **kwargs):
if req_schema is not None:
m = await req.get_media()
try:
jsonschema.validate(
m, req_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError as e:
raise falcon.HTTPBadRequest(
'Request data failed validation',
description=e.message
)
result = await func(self, req, resp, *args, **kwargs)
if resp_schema is not None:
try:
jsonschema.validate(
resp.media, resp_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError:
raise falcon.HTTPInternalServerError(
'Response data failed validation'
# Do not return 'e.message' in the response to
# prevent info about possible internal response
# formatting bugs from leaking out to users.
)
return result
return wrapper
|
the-stack_0_3651 | import collections
import copy
import functools
import logging
import sys
import six
from jsonschema import Draft4Validator, ValidationError, draft4_format_checker
from werkzeug import FileStorage
from ..exceptions import ExtraParameterProblem
from ..http_facts import FORM_CONTENT_TYPES
from ..json_schema import Draft4RequestValidator, Draft4ResponseValidator
from ..problem import problem
from ..utils import all_json, boolean, is_json_mimetype, is_null, is_nullable
logger = logging.getLogger('connexion.decorators.validation')
TYPE_MAP = {
'integer': int,
'number': float,
'boolean': boolean
}
class TypeValidationError(Exception):
def __init__(self, schema_type, parameter_type, parameter_name):
"""
Exception raise when type validation fails
:type schema_type: str
:type parameter_type: str
:type parameter_name: str
:return:
"""
self.schema_type = schema_type
self.parameter_type = parameter_type
self.parameter_name = parameter_name
def __str__(self):
msg = "Wrong type, expected '{schema_type}' for {parameter_type} parameter '{parameter_name}'"
return msg.format(**vars(self))
def coerce_type(param, value, parameter_type, parameter_name=None):
def make_type(value, type_literal):
type_func = TYPE_MAP.get(type_literal)
return type_func(value)
param_schema = param.get("schema", param)
if is_nullable(param_schema) and is_null(value):
return None
param_type = param_schema.get('type')
parameter_name = parameter_name if parameter_name else param.get('name')
if param_type == "array":
converted_params = []
for v in value:
try:
converted = make_type(v, param_schema["items"]["type"])
except (ValueError, TypeError):
converted = v
converted_params.append(converted)
return converted_params
else:
try:
return make_type(value, param_type)
except ValueError:
raise TypeValidationError(param_type, parameter_type, parameter_name)
except TypeError:
return value
def validate_parameter_list(request_params, spec_params):
request_params = set(request_params)
spec_params = set(spec_params)
return request_params.difference(spec_params)
class RequestBodyValidator(object):
def __init__(self, schema, consumes, api, is_null_value_valid=False, validator=None,
strict_validation=False):
"""
:param schema: The schema of the request body
:param consumes: The list of content types the operation consumes
:param is_null_value_valid: Flag to indicate if null is accepted as valid value.
:param validator: Validator class that should be used to validate passed data
against API schema. Default is jsonschema.Draft4Validator.
:type validator: jsonschema.IValidator
:param strict_validation: Flag indicating if parameters not in spec are allowed
"""
self.consumes = consumes
self.schema = schema
self.has_default = schema.get('default', False)
self.is_null_value_valid = is_null_value_valid
validatorClass = validator or Draft4RequestValidator
self.validator = validatorClass(schema, format_checker=draft4_format_checker)
self.api = api
self.strict_validation = strict_validation
def validate_formdata_parameter_list(self, request):
request_params = request.form.keys()
spec_params = self.schema.get('properties', {}).keys()
return validate_parameter_list(request_params, spec_params)
def __call__(self, function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
@functools.wraps(function)
def wrapper(request):
if all_json(self.consumes):
data = request.json
empty_body = not(request.body or request.form or request.files)
if data is None and not empty_body and not self.is_null_value_valid:
try:
ctype_is_json = is_json_mimetype(request.headers.get("Content-Type", ""))
except ValueError:
ctype_is_json = False
if ctype_is_json:
# Content-Type is json but actual body was not parsed
return problem(400,
"Bad Request",
"Request body is not valid JSON"
)
else:
# the body has contents that were not parsed as JSON
return problem(415,
"Unsupported Media Type",
"Invalid Content-type ({content_type}), expected JSON data".format(
content_type=request.headers.get("Content-Type", "")
))
logger.debug("%s validating schema...", request.url)
error = self.validate_schema(data, request.url)
if error and not self.has_default:
return error
elif self.consumes[0] in FORM_CONTENT_TYPES:
data = dict(request.form.items()) or (request.body if len(request.body) > 0 else {})
data.update(dict.fromkeys(request.files, '')) # validator expects string..
logger.debug('%s validating schema...', request.url)
if self.strict_validation:
formdata_errors = self.validate_formdata_parameter_list(request)
if formdata_errors:
raise ExtraParameterProblem(formdata_errors, [])
if data:
props = self.schema.get("properties", {})
errs = []
for k, param_defn in props.items():
if k in data:
try:
data[k] = coerce_type(param_defn, data[k], 'requestBody', k)
except TypeValidationError as e:
errs += [str(e)]
print(errs)
if errs:
return problem(400, 'Bad Request', errs)
error = self.validate_schema(data, request.url)
if error:
return error
response = function(request)
return response
return wrapper
def validate_schema(self, data, url):
# type: (dict, AnyStr) -> Union[ConnexionResponse, None]
if self.is_null_value_valid and is_null(data):
return None
try:
self.validator.validate(data)
except ValidationError as exception:
logger.error("{url} validation error: {error}".format(url=url,
error=exception.message),
extra={'validator': 'body'})
return problem(400, 'Bad Request', str(exception.message))
return None
class ResponseBodyValidator(object):
def __init__(self, schema, validator=None):
"""
:param schema: The schema of the response body
:param validator: Validator class that should be used to validate passed data
against API schema. Default is jsonschema.Draft4Validator.
:type validator: jsonschema.IValidator
"""
ValidatorClass = validator or Draft4ResponseValidator
self.validator = ValidatorClass(schema, format_checker=draft4_format_checker)
def validate_schema(self, data, url):
# type: (dict, AnyStr) -> Union[ConnexionResponse, None]
try:
self.validator.validate(data)
except ValidationError as exception:
logger.error("{url} validation error: {error}".format(url=url,
error=exception),
extra={'validator': 'response'})
six.reraise(*sys.exc_info())
return None
class ParameterValidator(object):
def __init__(self, parameters, api, strict_validation=False):
"""
:param parameters: List of request parameter dictionaries
:param api: api that the validator is attached to
:param strict_validation: Flag indicating if parameters not in spec are allowed
"""
self.parameters = collections.defaultdict(list)
for p in parameters:
self.parameters[p['in']].append(p)
self.api = api
self.strict_validation = strict_validation
@staticmethod
def validate_parameter(parameter_type, value, param, param_name=None):
if value is not None:
if is_nullable(param) and is_null(value):
return
try:
converted_value = coerce_type(param, value, parameter_type, param_name)
except TypeValidationError as e:
return str(e)
param = copy.deepcopy(param)
if 'required' in param:
del param['required']
try:
if parameter_type == 'formdata' and param.get('type') == 'file':
Draft4Validator(
param,
format_checker=draft4_format_checker,
types={'file': FileStorage}).validate(converted_value)
else:
Draft4Validator(
param, format_checker=draft4_format_checker).validate(converted_value)
except ValidationError as exception:
debug_msg = 'Error while converting value {converted_value} from param ' \
'{type_converted_value} of type real type {param_type} to the declared type {param}'
fmt_params = dict(
converted_value=str(converted_value),
type_converted_value=type(converted_value),
param_type=param.get('type'),
param=param
)
logger.info(debug_msg.format(**fmt_params))
return str(exception)
elif param.get('required'):
return "Missing {parameter_type} parameter '{param[name]}'".format(**locals())
def validate_query_parameter_list(self, request):
request_params = request.query.keys()
spec_params = [x['name'] for x in self.parameters.get('query', [])]
return validate_parameter_list(request_params, spec_params)
def validate_formdata_parameter_list(self, request):
request_params = request.form.keys()
spec_params = [x['name'] for x in self.parameters.get('formData', [])]
return validate_parameter_list(request_params, spec_params)
def validate_query_parameter(self, param, request):
"""
Validate a single query parameter (request.args in Flask)
:type param: dict
:rtype: str
"""
val = request.query.get(param['name'])
return self.validate_parameter('query', val, param)
def validate_path_parameter(self, param, request):
val = request.path_params.get(param['name'].replace('-', '_'))
return self.validate_parameter('path', val, param)
def validate_header_parameter(self, param, request):
val = request.headers.get(param['name'])
return self.validate_parameter('header', val, param)
def validate_formdata_parameter(self, param_name, param, request):
if param.get('type') == 'file' or param.get('format') == 'binary':
val = request.files.get(param_name)
else:
val = request.form.get(param_name)
return self.validate_parameter('formdata', val, param)
def __call__(self, function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
@functools.wraps(function)
def wrapper(request):
logger.debug("%s validating parameters...", request.url)
if self.strict_validation:
query_errors = self.validate_query_parameter_list(request)
formdata_errors = self.validate_formdata_parameter_list(request)
if formdata_errors or query_errors:
raise ExtraParameterProblem(formdata_errors, query_errors)
for param in self.parameters.get('query', []):
error = self.validate_query_parameter(param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
for param in self.parameters.get('path', []):
error = self.validate_path_parameter(param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
for param in self.parameters.get('header', []):
error = self.validate_header_parameter(param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
for param in self.parameters.get('formData', []):
error = self.validate_formdata_parameter(param["name"], param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
return function(request)
return wrapper
|
the-stack_0_3653 | import os
from decouple import config
from flask import Flask, render_template
from core.model import cotacoes
def create_app():
app = Flask('core')
app.config["SECRET_KEY"] = config('SECRET_KEY')
@app.route('/')
def home():
dicionario = cotacoes.cotar()
if dicionario['sucesso']:
template_renderised = render_template("index.html", dicionario=dicionario)
else:
template_renderised = render_template('error.html', dicionario=dicionario)
return template_renderised
return app
if __name__ == '__main__':
app = create_app()
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
the-stack_0_3654 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""adam"""
import numpy as np
from mindspore.common import dtype as mstype
from mindspore.common.initializer import initializer
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer
_learning_rate_update_func = ['linear', 'cos', 'sin']
adam_opt = C.MultitypeFuncGraph("adam_opt")
@adam_opt.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Bool")
def _update_run_op(beta1, beta2, eps, lr, weight_decay_tensor, param, m, v, gradient, decay_flag):
"""
Update parameters.
Args:
beta1 (Tensor): The exponential decay rate for the 1st moment estimates. Should be in range (0.0, 1.0).
beta2 (Tensor): The exponential decay rate for the 2nd moment estimates. Should be in range (0.0, 1.0).
eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.
lr (Tensor): Learning rate.
weight_decay_tensor (Tensor): Weight decay. Should be equal to or greater than 0.
param (Tensor): Parameters.
m (Tensor): m value of parameters.
v (Tensor): v value of parameters.
gradient (Tensor): Gradient of parameters.
Returns:
Tensor, the new value of v after updating.
"""
op_mul = P.Mul()
op_square = P.Square()
op_sqrt = P.Sqrt()
op_cast = P.Cast()
op_reshape = P.Reshape()
op_shape = P.Shape()
param_fp32 = op_cast(param, mstype.float32)
m_fp32 = op_cast(m, mstype.float32)
v_fp32 = op_cast(v, mstype.float32)
gradient_fp32 = op_cast(gradient, mstype.float32)
next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32) - beta1, gradient_fp32)
next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32)
- beta2, op_square(gradient_fp32))
update = next_m / (op_sqrt(next_v) + eps)
if decay_flag:
update = update + op_mul(weight_decay_tensor, param_fp32)
update_with_lr = op_mul(lr, update)
next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32))
next_v = F.depend(next_v, F.assign(param, next_param))
next_v = F.depend(next_v, F.assign(m, next_m))
next_v = F.depend(next_v, F.assign(v, next_v))
return next_v
def _check_param_value(beta1, beta2, eps, weight_decay, prim_name):
"""Check the type of inputs."""
validator.check_value_type("beta1", beta1, [float], prim_name)
validator.check_value_type("beta2", beta2, [float], prim_name)
validator.check_value_type("eps", eps, [float], prim_name)
validator.check_value_type("weight_dacay", weight_decay, [float], prim_name)
validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
validator.check_number_range("eps", eps, 0.0, float("inf"), Rel.INC_NEITHER, prim_name)
validator.check_number_range("weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
def _check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, prim_name):
"""Check the type of inputs."""
validator.check_float_positive('learning_rate', learning_rate, prim_name)
validator.check_float_legal_value('learning_rate', learning_rate, prim_name)
validator.check_float_positive('end_learning_rate', end_learning_rate, prim_name)
validator.check_float_legal_value('end_learning_rate', end_learning_rate, prim_name)
validator.check_float_positive('power', power, prim_name)
validator.check_float_legal_value('power', power, prim_name)
validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)
@adam_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Number", "Tensor", "Tensor", "Tensor",
"Tensor")
def _run_opt_with_one_number(opt, lr, beta1_power, beta2_power, beta1, beta2, eps, gradient, params, moment1,
moment2):
"""Apply adam optimizer to the weight parameter using Tensor."""
success = True
success = F.depend(success, opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
eps, gradient))
return success
class Adam(Optimizer):
r"""
Updates gradients by Adaptive Moment Estimation (Adam) algorithm.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector `moment1`, :math:`v` represents the 2nd moment vector `moment2`,
:math:`g` represents `gradients`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent
`beta1` and `beta2`, :math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent
`beta1_power` and `beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `params`,
:math:`\epsilon` represents `eps`.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be class mindspore.Parameter.
learning_rate (Union[float, Tensor, Iterable]): A value for the learning rate. When the learning_rate is
Iterable or a Tensor and the dims of the Tensor is 1,
use dynamic learning rate, then the i-th step will
take the i-th value as the learning rate.
When the learning_rate is float or learning_rate is a Tensor
but the dims of the Tensor is 0, use fixed learning rate.
Other cases are not supported. Default: 1e-3.
beta1 (float): The exponential decay rate for the 1st moment estimates. Should be in range (0.0, 1.0). Default:
0.9.
beta2 (float): The exponential decay rate for the 2nd moment estimates. Should be in range (0.0, 1.0). Default:
0.999.
eps (float): Term added to the denominator to improve numerical stability. Should be greater than 0. Default:
1e-8.
use_locking (bool): Whether to enable a lock to protect updating variable tensors.
If True, updating of the var, m, and v tensors will be protected by a lock.
If False, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If True, updates the gradients using NAG.
If False, updates the gradients without using NAG. Default: False.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
loss_scale (float): A floating point value for the loss scale. Should be equal to or greater than 1. Default:
1.0.
decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default:
lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
Tensor[bool], the value is True.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.Adam(params=net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
"""
def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
use_nesterov=False, weight_decay=0.0, loss_scale=1.0,
decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name):
super(Adam, self).__init__(learning_rate, params, weight_decay, loss_scale, decay_filter)
_check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)
validator.check_value_type("use_locking", use_locking, [bool], self.cls_name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.cls_name)
validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name)
validator.check_number_range("loss_scale", loss_scale, 1.0, float("inf"), Rel.INC_LEFT, self.cls_name)
self.beta1 = Tensor(beta1, mstype.float32)
self.beta2 = Tensor(beta2, mstype.float32)
self.beta1_power = Parameter(initializer(1, [1], mstype.float32), name="beta1_power")
self.beta2_power = Parameter(initializer(1, [1], mstype.float32), name="beta2_power")
self.eps = eps
self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')
self.hyper_map = C.HyperMap()
self.opt = P.Adam(use_locking, use_nesterov)
self.pow = P.Pow()
self.sqrt = P.Sqrt()
self.one = Tensor(np.array([1.0]).astype(np.float32))
self.realdiv = P.RealDiv()
def construct(self, gradients):
params = self.parameters
moment1 = self.moment1
moment2 = self.moment2
gradients = self.decay_weight(gradients)
gradients = self.scale_grad(gradients)
lr = self.get_lr()
beta1_power = self.beta1_power * self.beta1
self.beta1_power = beta1_power
beta2_power = self.beta2_power * self.beta2
self.beta2_power = beta2_power
success = self.hyper_map(F.partial(adam_opt, self.opt, lr, beta1_power, beta2_power, self.beta1,
self.beta2, self.eps),
gradients, params, moment1, moment2)
return success
class AdamWeightDecay(Optimizer):
"""
Implements Adam algorithm weight decay fix.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be class mindspore.Parameter.
learning_rate (Union[float, Tensor, Iterable]): A value for the learning rate. When the learning_rate is
Iterable or a Tensor and the dims of the Tensor is 1,
use dynamic learning rate, then the i-th step will
take the i-th value as the learning rate.
When the learning_rate is float or learning_rate is a Tensor
but the dims of the Tensor is 0, use fixed learning rate.
Other cases are not supported. Default: 1e-3.
beta1 (float): The exponential decay rate for the 1st moment estimates. Default: 0.9.
Should be in range (0.0, 1.0).
beta2 (float): The exponential decay rate for the 2nd moment estimates. Default: 0.999.
Should be in range (0.0, 1.0).
eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6.
Should be greater than 0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default:
lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
tuple[Parameter], the updated velocity value, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.AdamWeightDecay(params=net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
"""
def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0,
decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name):
super(AdamWeightDecay, self).__init__(learning_rate, params)
_check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)
self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
self.eps = Tensor(np.array([eps]).astype(np.float32))
self.weight_decay_tensor = Tensor(np.array([weight_decay]).astype(np.float32))
self.params = self.parameters
self.moments1 = self.params.clone(prefix="adam_m", init='zeros')
self.moments2 = self.params.clone(prefix="adam_v", init='zeros')
self.decay_flag = tuple(decay_filter(x) for x in self.params)
self.hyper_map = C.HyperMap()
def construct(self, gradients):
lr = self.get_lr()
updated_velocity = self.hyper_map(F.partial(adam_opt, self.beta1, self.beta2, self.eps, lr,
self.weight_decay_tensor),
self.params, self.moments1, self.moments2, gradients, self.decay_flag)
return updated_velocity
class AdamWeightDecayDynamicLR(Optimizer):
"""
Adam Weight Decay Dynamic Learning Rate (LR).
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be class mindspore.Parameter.
decay_steps (int): The steps of the decay.
learning_rate (float): A floating point value for the learning rate. Default: 0.001.
end_learning_rate (float): A floating point value for the end learning rate. Default: 0.0001.
power (float): Power. Default: 10.0.
beta1 (float): The exponential decay rate for the 1st moment estimates. Default: 0.9.
Should be in range (0.0, 1.0).
beta2 (float): The exponential decay rate for the 2nd moment estimates. Default: 0.999.
Should be in range (0.0, 1.0).
eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6.
Should be greater than 0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default:
lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
tuple[Parameter], the updated velocity value, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.AdamWeightDecayDynamicLR(params=net.trainable_params(), decay_steps=10)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
"""
def __init__(self,
params,
decay_steps,
learning_rate=0.001,
end_learning_rate=0.0001,
power=10.0,
beta1=0.9,
beta2=0.999,
eps=1e-6,
weight_decay=0.0,
decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name):
super(AdamWeightDecayDynamicLR, self).__init__(learning_rate, params)
_check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)
_check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, self.cls_name)
# turn them to scalar when me support scalar/tensor mix operations
self.global_step = Parameter(initializer(0, [1]), name="global_step")
self.decay_steps = Tensor(np.array([decay_steps]).astype(np.float32))
self.end_learning_rate = Tensor(np.array([end_learning_rate]).astype(np.float32))
self.diff_learning_rate = Tensor(np.array([learning_rate - end_learning_rate]).astype(np.float32))
self.power = power
self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
self.eps = Tensor(np.array([eps]).astype(np.float32))
self.weight_decay_tensor = Tensor(np.array([weight_decay]).astype(np.float32))
self.params = self.parameters
self.moments1 = self.params.clone(prefix="adam_m", init='zeros')
self.moments2 = self.params.clone(prefix="adam_v", init='zeros')
self.decay_flag = tuple(decay_filter(x) for x in self.params)
self.hyper_map = C.HyperMap()
self.min = P.Minimum()
self.pow = P.Pow()
self.one = Tensor(np.array([1.0]).astype(np.float32))
def construct(self, gradients):
step = self.min(self.global_step, self.decay_steps)
p = step / self.decay_steps
lr = self.diff_learning_rate * self.pow(self.one - p, self.power) + self.end_learning_rate
updated_velocity = self.hyper_map(F.partial(adam_opt, self.beta1, self.beta2, self.eps, lr,
self.weight_decay_tensor),
self.params, self.moments1, self.moments2, gradients, self.decay_flag)
added_global_step = self.global_step + self.one
F.control_depend(lr, added_global_step)
self.global_step = added_global_step
return updated_velocity
|
the-stack_0_3655 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
try:
from pandas._testing import makeMissingDataframe
except ImportError:
from pandas.util.testing import makeMissingDataframe
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED
from pyspark.testing.sqlutils import SQLTestUtils
class StatsTest(PandasOnSparkTestCase, SQLTestUtils):
def _test_stat_functions(self, pdf_or_pser, psdf_or_psser):
functions = ["max", "min", "mean", "sum", "count"]
for funcname in functions:
self.assert_eq(getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)())
functions = ["std", "var", "product", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(),
getattr(pdf_or_pser, funcname)(),
check_exact=False,
)
functions = ["std", "var", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(ddof=0),
getattr(pdf_or_pser, funcname)(ddof=0),
check_exact=False,
)
# NOTE: To test skew, kurt, and median, just make sure they run.
# The numbers are different in spark and pandas.
functions = ["skew", "kurt", "median"]
for funcname in functions:
getattr(psdf_or_psser, funcname)()
def test_stat_functions(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4], "C": [1, np.nan, 3, np.nan]})
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
# empty
self._test_stat_functions(pdf.A.loc[[]], psdf.A.loc[[]])
self._test_stat_functions(pdf.loc[[]], psdf.loc[[]])
def test_stat_functions_multiindex_column(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
def test_stat_functions_with_no_numeric_columns(self):
pdf = pd.DataFrame(
{
"A": ["a", None, "c", "d", None, "f", "g"],
"B": ["A", "B", "C", None, "E", "F", None],
}
)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf, psdf)
def test_sum(self):
pdf = pd.DataFrame({"a": [1, 2, 3, np.nan], "b": [0.1, np.nan, 0.3, np.nan]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.sum(min_count=3), pdf.sum(min_count=3))
self.assert_eq(psdf.sum(axis=1, min_count=1), pdf.sum(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].sum(), pdf.loc[[]].sum())
self.assert_eq(psdf.loc[[]].sum(min_count=1), pdf.loc[[]].sum(min_count=1))
self.assert_eq(psdf["a"].sum(), pdf["a"].sum())
self.assert_eq(psdf["a"].sum(min_count=3), pdf["a"].sum(min_count=3))
self.assert_eq(psdf["b"].sum(min_count=3), pdf["b"].sum(min_count=3))
self.assert_eq(psdf["a"].loc[[]].sum(), pdf["a"].loc[[]].sum())
self.assert_eq(psdf["a"].loc[[]].sum(min_count=1), pdf["a"].loc[[]].sum(min_count=1))
def test_product(self):
pdf = pd.DataFrame(
{"a": [1, -2, -3, np.nan], "b": [0.1, np.nan, -0.3, np.nan], "c": [10, 20, 0, -10]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.product(), pdf.product(), check_exact=False)
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.product(min_count=3), pdf.product(min_count=3), check_exact=False)
self.assert_eq(psdf.product(axis=1, min_count=1), pdf.product(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].product(), pdf.loc[[]].product())
self.assert_eq(psdf.loc[[]].product(min_count=1), pdf.loc[[]].product(min_count=1))
self.assert_eq(psdf["a"].product(), pdf["a"].product(), check_exact=False)
self.assert_eq(
psdf["a"].product(min_count=3), pdf["a"].product(min_count=3), check_exact=False
)
self.assert_eq(psdf["b"].product(min_count=3), pdf["b"].product(min_count=3))
self.assert_eq(psdf["c"].product(min_count=3), pdf["c"].product(min_count=3))
self.assert_eq(psdf["a"].loc[[]].product(), pdf["a"].loc[[]].product())
self.assert_eq(
psdf["a"].loc[[]].product(min_count=1), pdf["a"].loc[[]].product(min_count=1)
)
def test_abs(self):
pdf = pd.DataFrame(
{
"A": [1, -2, np.nan, -4, 5],
"B": [1.0, -2, np.nan, -4, 5],
"C": [-6.0, -7, -8, np.nan, 10],
"D": ["a", "b", "c", "d", np.nan],
"E": [True, np.nan, False, True, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.A.abs(), pdf.A.abs())
self.assert_eq(psdf.B.abs(), pdf.B.abs())
self.assert_eq(psdf.E.abs(), pdf.E.abs())
# pandas' bug?
# self.assert_eq(psdf[["B", "C", "E"]].abs(), pdf[["B", "C", "E"]].abs())
self.assert_eq(psdf[["B", "C"]].abs(), pdf[["B", "C"]].abs())
self.assert_eq(psdf[["E"]].abs(), pdf[["E"]].abs())
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.abs()
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.D.abs()
def test_axis_on_dataframe(self):
# The number of each count is intentionally big
# because when data is small, it executes a shortcut.
# Less than 'compute.shortcut_limit' will execute a shortcut
# by using collected pandas dataframe directly.
# now we set the 'compute.shortcut_limit' as 1000 explicitly
with option_context("compute.shortcut_limit", 1000):
pdf = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5] * 300,
"B": [1.0, -2, 3, -4, 5] * 300,
"C": [-6.0, -7, -8, -9, 10] * 300,
"D": [True, False, True, False, False] * 300,
},
index=range(10, 15001, 10),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.count(axis=1), pdf.count(axis=1))
self.assert_eq(psdf.var(axis=1), pdf.var(axis=1))
self.assert_eq(psdf.var(axis=1, ddof=0), pdf.var(axis=1, ddof=0))
self.assert_eq(psdf.std(axis=1), pdf.std(axis=1))
self.assert_eq(psdf.std(axis=1, ddof=0), pdf.std(axis=1, ddof=0))
self.assert_eq(psdf.max(axis=1), pdf.max(axis=1))
self.assert_eq(psdf.min(axis=1), pdf.min(axis=1))
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.kurtosis(axis=1), pdf.kurtosis(axis=1))
self.assert_eq(psdf.skew(axis=1), pdf.skew(axis=1))
self.assert_eq(psdf.mean(axis=1), pdf.mean(axis=1))
self.assert_eq(psdf.sem(axis=1), pdf.sem(axis=1))
self.assert_eq(psdf.sem(axis=1, ddof=0), pdf.sem(axis=1, ddof=0))
self.assert_eq(
psdf.count(axis=1, numeric_only=True), pdf.count(axis=1, numeric_only=True)
)
self.assert_eq(psdf.var(axis=1, numeric_only=True), pdf.var(axis=1, numeric_only=True))
self.assert_eq(
psdf.var(axis=1, ddof=0, numeric_only=True),
pdf.var(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(psdf.std(axis=1, numeric_only=True), pdf.std(axis=1, numeric_only=True))
self.assert_eq(
psdf.std(axis=1, ddof=0, numeric_only=True),
pdf.std(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(
psdf.max(axis=1, numeric_only=True),
pdf.max(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.min(axis=1, numeric_only=True),
pdf.min(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.sum(axis=1, numeric_only=True),
pdf.sum(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.product(axis=1, numeric_only=True),
pdf.product(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.kurtosis(axis=1, numeric_only=True), pdf.kurtosis(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.skew(axis=1, numeric_only=True), pdf.skew(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.mean(axis=1, numeric_only=True), pdf.mean(axis=1, numeric_only=True)
)
self.assert_eq(psdf.sem(axis=1, numeric_only=True), pdf.sem(axis=1, numeric_only=True))
self.assert_eq(
psdf.sem(axis=1, ddof=0, numeric_only=True),
pdf.sem(axis=1, ddof=0, numeric_only=True),
)
def test_corr(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# DataFrame
# we do not handle NaNs for now
pdf = makeMissingDataframe(0.3, 42).fillna(0)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_a = pdf.A
pser_b = pdf.B
psser_a = psdf.A
psser_b = psdf.B
self.assertAlmostEqual(psser_a.corr(psser_b), pser_a.corr(pser_b))
self.assertRaises(TypeError, lambda: psser_a.corr(psdf))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_xa = pdf[("X", "A")]
pser_xb = pdf[("X", "B")]
psser_xa = psdf[("X", "A")]
psser_xb = psdf[("X", "B")]
self.assert_eq(psser_xa.corr(psser_xb), pser_xa.corr(pser_xb), almost=True)
def test_cov_corr_meta(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
pdf = pd.DataFrame(
{
"a": np.array([1, 2, 3], dtype="i1"),
"b": np.array([1, 2, 3], dtype="i2"),
"c": np.array([1, 2, 3], dtype="i4"),
"d": np.array([1, 2, 3]),
"e": np.array([1.0, 2.0, 3.0], dtype="f4"),
"f": np.array([1.0, 2.0, 3.0]),
"g": np.array([True, False, True]),
"h": np.array(list("abc")),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr())
def test_stats_on_boolean_dataframe(self):
pdf = pd.DataFrame({"A": [True, False, True], "B": [False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.min(), pdf.min())
self.assert_eq(psdf.max(), pdf.max())
self.assert_eq(psdf.count(), pdf.count())
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.product(), pdf.product())
self.assert_eq(psdf.mean(), pdf.mean())
self.assert_eq(psdf.var(), pdf.var(), check_exact=False)
self.assert_eq(psdf.var(ddof=0), pdf.var(ddof=0), check_exact=False)
self.assert_eq(psdf.std(), pdf.std(), check_exact=False)
self.assert_eq(psdf.std(ddof=0), pdf.std(ddof=0), check_exact=False)
self.assert_eq(psdf.sem(), pdf.sem(), check_exact=False)
self.assert_eq(psdf.sem(ddof=0), pdf.sem(ddof=0), check_exact=False)
def test_stats_on_boolean_series(self):
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(psser.min(), pser.min())
self.assert_eq(psser.max(), pser.max())
self.assert_eq(psser.count(), pser.count())
self.assert_eq(psser.sum(), pser.sum())
self.assert_eq(psser.product(), pser.product())
self.assert_eq(psser.mean(), pser.mean())
self.assert_eq(psser.var(), pser.var(), almost=True)
self.assert_eq(psser.var(ddof=0), pser.var(ddof=0), almost=True)
self.assert_eq(psser.std(), pser.std(), almost=True)
self.assert_eq(psser.std(ddof=0), pser.std(ddof=0), almost=True)
self.assert_eq(psser.sem(), pser.sem(), almost=True)
self.assert_eq(psser.sem(ddof=0), pser.sem(ddof=0), almost=True)
def test_stats_on_non_numeric_columns_should_be_discarded_if_numeric_only_is_true(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf[["i", "s"]].max(numeric_only=True), pdf[["i", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].max(numeric_only=True), pdf[["b", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["i", "s"]].min(numeric_only=True), pdf[["i", "s"]].min(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].min(numeric_only=True), pdf[["b", "s"]].min(numeric_only=True)
)
self.assert_eq(psdf.count(numeric_only=True), pdf.count(numeric_only=True))
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(psdf.product(numeric_only=True), pdf.product(numeric_only=True))
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf.product(numeric_only=True), pdf.product(numeric_only=True).astype(int)
)
self.assert_eq(psdf.mean(numeric_only=True), pdf.mean(numeric_only=True))
self.assert_eq(psdf.var(numeric_only=True), pdf.var(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.var(ddof=0, numeric_only=True),
pdf.var(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.std(numeric_only=True), pdf.std(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.std(ddof=0, numeric_only=True),
pdf.std(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.sem(numeric_only=True), pdf.sem(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.sem(ddof=0, numeric_only=True),
pdf.sem(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(len(psdf.median(numeric_only=True)), len(pdf.median(numeric_only=True)))
self.assert_eq(len(psdf.kurtosis(numeric_only=True)), len(pdf.kurtosis(numeric_only=True)))
self.assert_eq(len(psdf.skew(numeric_only=True)), len(pdf.skew(numeric_only=True)))
# Boolean was excluded because of a behavior change in NumPy
# https://github.com/numpy/numpy/pull/16273#discussion_r641264085 which pandas inherits
# but this behavior is inconsistent in pandas context.
# Boolean column in quantile tests are excluded for now.
# TODO(SPARK-35555): track and match the behavior of quantile to pandas'
pdf = pd.DataFrame({"i": [0, 1, 2], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
len(psdf.quantile(q=0.5, numeric_only=True)),
len(pdf.quantile(q=0.5, numeric_only=True)),
)
self.assert_eq(
len(psdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
len(pdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
)
def test_numeric_only_unsupported(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False)
)
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False),
pdf[["i", "b"]].sum(numeric_only=False).astype(int),
)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.sum(numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.s.sum()
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_stats import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
the-stack_0_3657 | from typing import Tuple
import re
RESULT_PATTERN = re.compile(r'([^\*]+)(\**)')
def convert_to_stars(t_value: float) -> str:
t = abs(t_value)
if t < 1.645:
return ''
elif t < 1.96:
return '*'
elif t < 2.576:
return '**'
elif t > 2.576:
return '***'
else: # covers nan
return ''
def parse_stars_value(value: str) -> Tuple[str, str]:
match = RESULT_PATTERN.fullmatch(value)
if not match:
return '', ''
result = match.group(1)
stars = match.group(2)
return result, stars |
the-stack_0_3661 | """
CRUD de SQLite3 con Python 3
@author parzibyte
Más tutoriales en: parzibyte.me/blog
"""
import sqlite3
try:
#Conectar a la base de datos
bd = sqlite3.connect("libros.db")
cursor = bd.cursor()
#Listar los libros
sentencia = "SELECT *,rowid FROM libros;"
cursor.execute(sentencia)
libros = cursor.fetchall()
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+{:-<10}+".format("", "", "", "", ""))
print("|{:^20}|{:^20}|{:^10}|{:^50}|{:^10}|".format("Autor", "Género", "Precio", "Título", "Rowid"))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+{:-<10}+".format("", "", "", "", ""))
for autor, genero, precio, titulo, rowid in libros:
print("|{:^20}|{:^20}|{:^10}|{:^50}|{:^10}|".format(autor, genero, precio, titulo, rowid))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+{:-<10}+".format("", "", "", "", ""))
#Pedir id del libro a editar
id_libro = input("\nEscribe el id del libro que quieres editar: ")
if not id_libro:
print("No escribiste nada")
exit()
#Pedir nuevos datos
autor = input("\nNuevo autor: ")
genero = input("\nNuevo género: ")
precio = float(input("\nNuevo precio: "))
titulo = input("\nNuevo título: ")
#Sentencia para actualizar
sentencia = "UPDATE libros SET autor = ?, genero = ?, precio = ?, titulo = ? WHERE rowid = ?;"
#Actualizar datos
cursor.execute(sentencia, [autor, genero, precio, titulo, id_libro])
bd.commit()
print("Datos guardados")
except sqlite3.OperationalError as error:
print("Error al abrir:", error) |
the-stack_0_3662 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, osv
# We just create a new model
class mother(models.Model):
_name = 'test.inherit.mother'
_columns = {
# check interoperability of field inheritance with old-style fields
'name': osv.fields.char('Name'),
'state': osv.fields.selection([('a', 'A'), ('b', 'B')], string='State'),
}
_defaults = {
'name': 'Foo',
}
surname = fields.Char(compute='_compute_surname')
@api.one
@api.depends('name')
def _compute_surname(self):
self.surname = self.name or ''
# We want to inherits from the parent model and we add some fields
# in the child object
class daughter(models.Model):
_name = 'test.inherit.daughter'
_inherits = {'test.inherit.mother': 'template_id'}
template_id = fields.Many2one('test.inherit.mother', 'Template',
required=True, ondelete='cascade')
field_in_daughter = fields.Char('Field1')
# We add a new field in the parent object. Because of a recent refactoring,
# this feature was broken.
# This test and these models try to show the bug and fix it.
class mother(models.Model):
_inherit = 'test.inherit.mother'
field_in_mother = fields.Char()
# extend the name field: make it required and change its default value
name = fields.Char(required=True, default='Bar')
# extend the selection of the state field
state = fields.Selection(selection_add=[('c', 'C')])
# override the computed field, and extend its dependencies
@api.one
@api.depends('field_in_mother')
def _compute_surname(self):
if self.field_in_mother:
self.surname = self.field_in_mother
else:
super(mother, self)._compute_surname()
class mother(models.Model):
_inherit = 'test.inherit.mother'
# extend again the selection of the state field
state = fields.Selection(selection_add=[('d', 'D')])
class daughter(models.Model):
_inherit = 'test.inherit.daughter'
# simply redeclare the field without adding any option
template_id = fields.Many2one()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.