ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a37ff4f59ecc56719fbbf86b9ce21fe27e51eca | from flask import render_template,request,redirect,url_for,abort
from . import main
from .forms import UpdateProfile
from ..models import User
from flask_login import login_required,current_user
from .. import db,photos
import markdown2
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to pomodoro'
content = "WELCOME TO POMODORO APP"
return render_template('index.html', title = title,content = content)
@main.route('/about')
def about():
return render_template('about.html', title = 'About')
@main.route('/pomodoro')
@login_required
def pomodoro():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to pomodoro'
content = "WELCOME TO POMODORO APP"
return render_template('pomodoro.html', title = title,content = content)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/reason/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_reason(id):
form = ReasonForm()
reason = get_reason(id)
if form.validate_on_submit():
title = form.title.data
reason = form.reason.data
# Updated reason instance
new_reason = Reason(reason_id=reason.id,reason_title=title,reason=reason,user=current_user)
# save reason method
new_reason.save_reason()
return redirect(url_for('.reason',id = reason.id ))
title = f'{reason.title} reason'
return render_template('new_reason.html',title = title, reason_form=form, reason=reason) |
py | 1a37ff8acabc66e8c329ec5eceae7a369b9ccf2b | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import copy
import grpc
import six
import threading
from concurrent import futures
from edl.utils import common_pb2
from edl.utils import data_server_pb2
from edl.utils import data_server_pb2_grpc
from edl.utils import error_utils
from edl.utils import exceptions
from edl.utils.log_utils import logger
class PodData(object):
"""
Manage pod's data:
batch_data_ids, file_list, data_server_endpoint
"""
def __init__(self, pod_id, data_server_endpoint):
# batch_data_ids
self._pod_id = pod_id
self._data_server_endpoint = data_server_endpoint
# total ids for filter
self._batch_data_ids = set()
self._queue = collections.deque()
# data_server_pb2.FileListElement
self._file_list_slice = []
self._reach_data_end = False
def append_file_list_element(self, element):
self._file_list_slice.append(element)
@property
def reach_data_end(self):
return self._reach_data_end
@reach_data_end.setter
def reach_data_end(self, r):
self._reach_data_end = r
def get_size(self):
return len(self._queue)
def pop(self, num):
a = []
while len(self._queue) > 0:
if (num > 0 and len(a) < num) or num <= 0:
batch_data_id = self._queue.popleft()
a.append(batch_data_id)
else:
break
logger.debug(
"batch_data_ids:{}, queue:{}".format(
len(self._batch_data_ids), len(self._queue)
)
)
return a
def put(self, data_server_endpoint, batch_data_ids):
self._data_server_endpoint = data_server_endpoint
for batch_data_id in batch_data_ids:
if batch_data_id in self._batch_data_ids:
continue
self._queue.append(batch_data_id)
self._batch_data_ids.add(batch_data_id)
logger.debug(
"batch_data_ids:{}, queue:{}".format(
len(self._batch_data_ids), len(self._queue)
)
)
class PodsData(object):
"""
Reader's pods data
pod_id=>PodData
"""
def __init__(self, reader_name, file_list, pod_ids):
self._reader_name = reader_name
# pod_id => PodData
self._pod_data = {}
# pod_id => BalanceBatchData
self._balanced_batch_data = {}
self._barrier_ids = set()
self._reach_data_end_ids = set()
self._lock = threading.Lock()
# string list
self._file_list = file_list
self._pod_ids = set(pod_ids)
self._init()
self._total = 0
def _init(self):
for pod_id in self._pod_ids:
self._pod_data[pod_id] = PodData(pod_id, None)
self._balanced_batch_data[pod_id] = [] # array of BatchDataMeta
i = 0
while i < len(self._file_list):
for pod_id in self._pod_ids:
m = data_server_pb2.FileListElement()
m.idx = i
m.path = self._file_list[i]
self._pod_data[pod_id].append_file_list_element(m)
i += 1
if i >= len(self._file_list):
break
def get_pod_file_list(self, pod_id):
pod_data = self._pod_data[pod_id]
return pod_data._file_list_slice
def set_data_end(self, pod_id):
with self._lock:
pod_data = self._pod_data[pod_id]
pod_data.reach_data_end()
self._reach_data_end_ids.add(pod_id)
def _get_batch_data_id_from_others(self, avg_num, need_num):
ret = []
for pod_id in self._pod_ids:
src = self._pod_data[pod_id]
if src.get_size() < avg_num:
continue
dst = data_server_pb2.BatchDataMeta()
dst.reader_name = self._reader_name
dst.producer_pod_id = src._pod_id
dst.data_server_endpoint = src._data_server_endpoint
pop_num = src.get_size() - avg_num
ids = src.pop(pop_num)
if len(ids) <= 0:
continue
dst.extend(ids)
ret.append(dst)
need_num -= len(ids)
if need_num <= 0:
break
return ret
def put(self, pod_id, data_server_endpoint, batch_data_ids):
with self._lock:
pod_data = self._pod_data[pod_id]
pod_data.put(data_server_endpoint, batch_data_ids)
total = 0
for _, pod_data in six.iteritems(self._pod_data):
total += pod_data.get_size()
self._barrier_ids.add(pod_id)
if (self._barrier_ids | self._reach_data_end_ids) != self._pod_ids:
logger.debug(
"barrier_ids:{} readch_data_end_ids:{}".format(
len(self._barrier_ids), len(self._reach_data_end_ids)
)
)
return
avg_num = total / len(self._pod_ids)
logger.debug("total:{} avg_num:{}".format(total, avg_num))
if avg_num < 1:
return
# get batch_data_ids from pods_data to balance_batch_data
for pod_id in self._pod_ids:
src = self._pod_data[pod_id]
dst = data_server_pb2.BatchDataMeta()
dst.reader_name = self._reader_name
dst.producer_pod_id = src._pod_id
dst.data_server_endpoint = src._data_server_endpoint
ids = src.pop(num=avg_num)
if len(ids) >= avg_num:
dst.batch_data_ids.extend(ids)
self._balanced_batch_data[pod_id].append(dst)
logger.debug(
"balance_data_ids:{}".format(
len(self._balanced_batch_data[pod_id])
)
)
else:
need_num = avg_num - len(ids)
ret = self._get_batch_data_id_from_others(avg_num, need_num)
if len(ret) <= 0:
continue
self._balanced_batch_data[pod_id].extend(ret)
logger.debug(
"balance_data_ids:{}".format(
len(self._balanced_batch_data[pod_id])
)
)
self._barrier_ids = set()
def _is_all_reach_data_end(self):
for _, pod_data in six.iteritems(self._pod_data):
if not pod_data.reach_data_end:
return False
return True
# FIXME(gongwb): avoid global lock of all pods
@error_utils.handle_errors_until_timeout
def pop(self, pod_id, ret, timeout=60):
with self._lock:
balanced_data = self._balanced_batch_data[pod_id]
if len(balanced_data) > 0:
for data in balanced_data:
ret.append(copy.copy(data))
return ret
if self._is_all_reach_data_end():
return None
raise exceptions.EdlDataGenerateError("wait to generate more data")
class DataServerServicer(data_server_pb2_grpc.DataServerServicer):
def __init__(self, trainer_env, reader_name, file_list, pod_ids, local_reader):
self._lock = threading.Lock()
self._trainer_env = trainer_env
# string list
self._file_list = file_list
self._pod_ids = pod_ids
self._local_reader = local_reader
self._reader_name = reader_name
# reader_name=>PodData
self._pod_data = PodsData(reader_name, file_list, pod_ids)
def _check_leader(self):
if self._trainer_env.global_rank != 0:
raise exceptions.EdlNotLeaderError(
"This server rank:{} is not Leader".format(
self._trainer_env.global_rank
)
)
# only leader can do this
def ReportBatchDataMeta(self, request, context):
res = common_pb2.EmptyRet()
try:
self._check_leader()
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
if len(request.batch_data_ids) > 0:
self._pod_data.put(
request.pod_id, request.data_server_endpoint, request.batch_data_ids
)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
def ReachDataEnd(self, request, context):
res = common_pb2.EmptyRet()
try:
self._check_leader()
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
self._pod_data.set_data_end(request.pod_id)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
# only leader can do this
def GetBatchDataMeta(self, request, context):
res = data_server_pb2.BatchDataMetaResponse()
try:
self._check_leader()
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
self._pod_data.pop(request.pod_id, res.data, timeout=60)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
def GetBatchData(self, request, context):
res = data_server_pb2.BatchDataResponse()
try:
datas = self._local_reader.get_local_batch_data(request)
for data in datas:
b = copy.copy(data)
res.datas.append(b)
except Exception as e:
import traceback
exceptions.serialize(res, e, traceback.format_exc())
return res
def _check_file_list(self, file_list):
for i, ele in enumerate(file_list):
if self._file_list[i] != ele.path:
raise exceptions.EdlFileListNotMatchError(
"client:{} server:{}".format(file_list, self._file_list)
)
def _check_pod_id(self, pod_id):
if pod_id not in self._pod_ids:
raise exceptions.EdlPodIDNotExistError(
"pod_id:{} not exist in {}".format(pod_id, self._pod_ids)
)
def _check_reader_name(self, reader_name):
if reader_name != self._reader_name:
raise exceptions.EdlReaderNameError(
"{} not equal {}".format(reader_name, self._reader_name)
)
# only leader can do this
def GetFileList(self, request, context):
"""
Get slice of file list for a pod by pod_id
Need not lock because there are readonly
"""
res = data_server_pb2.FileListResponse()
try:
self._check_leader()
self._check_file_list(request.file_list)
self._check_pod_id(request.pod_id)
self._check_reader_name(request.reader_name)
file_list = self._pod_data.get_pod_file_list(request.pod_id)
for m in file_list:
res.file_list.append(m)
return res
except exceptions.EdlException as e:
exceptions.serialize(res, e)
return res
class DataServer(object):
def __init__(self, trainer_env, reader_name, file_list, local_reader):
self._server = None
self._addr = None
self._port = None
self._endpoint = None
self._trainer_env = trainer_env
self._reader_name = reader_name
self._file_list = file_list
self._local_reader = local_reader
def start(self, addr, cache_capcity=1000, max_workers=100, concurrency=20):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=max_workers),
options=[
("grpc.max_send_message_length", 1024 * 1024 * 1024),
("grpc.max_receive_message_length", 1024 * 1024 * 1024),
],
maximum_concurrent_rpcs=concurrency,
)
data_server_pb2_grpc.add_DataServerServicer_to_server(
DataServerServicer(
trainer_env=self._trainer_env,
reader_name=self._reader_name,
file_list=self._file_list,
pod_ids=self._trainer_env.pod_ids,
local_reader=self._local_reader,
),
server,
)
self._addr = addr
self._port = server.add_insecure_port("{}:0".format(addr))
assert (
self._port > 0
), "data server start on addr:{} error, selected port is {}".format(
addr, self._port
)
self._endpoint = "{}:{}".format(self._addr, self._port)
server.start()
self._server = server
print("start data_server:", self._endpoint)
@property
def endpoint(self):
return self._endpoint
def wait(self, timeout=None):
if timeout is not None:
self._server.stop(timeout)
return
self._server.wait_for_termination(timeout)
def shutdown(self):
pass
|
py | 1a38000cd316c8eb2937577ef226db089eeb3c22 | ## This is the code in Python
# Your task is to create the code for the same in C/C++
items = input("Input comma separated sequence of words")
words = [word for word in items.split(",")]
print(",".join(sorted(list(set(words)))))
|
py | 1a38002e8ef7ea27cfa54b6cc6bbb4b9c9045595 | __title__ = 'django-daiquiri'
__version__ = '0.3'
__author__ = 'Jochen Klar'
__email__ = '[email protected]'
__license__ = 'Apache-2.0'
__copyright__ = 'Copyright 2016-2017 Leibniz Institute for Astrophysics Potsdam (AIP)'
VERSION = __version__
|
py | 1a3800790f9c57d8b6c3985def0ecb484205a72a | # Generated by Django 3.0.5 on 2020-10-31 16:05
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(unique=True, verbose_name='Slug категории'),
),
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL, verbose_name='Автор'),
),
migrations.AlterField(
model_name='comment',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Дата публикации'),
),
migrations.AlterField(
model_name='comment',
name='review',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='api.Review', verbose_name='Отзыв'),
),
migrations.AlterField(
model_name='comment',
name='text',
field=models.TextField(verbose_name='Комментарий'),
),
migrations.AlterField(
model_name='genre',
name='slug',
field=models.SlugField(unique=True, verbose_name='Slug жанра'),
),
migrations.AlterField(
model_name='review',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to=settings.AUTH_USER_MODEL, verbose_name='Автор'),
),
migrations.AlterField(
model_name='review',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Дата публикации'),
),
migrations.AlterField(
model_name='review',
name='score',
field=models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)], verbose_name='Оценка'),
),
migrations.AlterField(
model_name='review',
name='text',
field=models.TextField(verbose_name='Отзыв'),
),
migrations.AlterField(
model_name='review',
name='title',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='api.Title', verbose_name='Произведение'),
),
migrations.AlterField(
model_name='title',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='titles', to='api.Category', verbose_name='Категория'),
),
migrations.AlterField(
model_name='title',
name='description',
field=models.TextField(blank=True, null=True, verbose_name='Описание'),
),
migrations.AlterField(
model_name='title',
name='genre',
field=models.ManyToManyField(blank=True, related_name='titles', to='api.Genre', verbose_name='Жанр'),
),
migrations.AlterField(
model_name='title',
name='name',
field=models.CharField(max_length=200, unique=True, verbose_name='Название'),
),
migrations.AlterField(
model_name='title',
name='year',
field=models.PositiveSmallIntegerField(blank=True, db_index=True, null=True, validators=[django.core.validators.MinValueValidator(1895), django.core.validators.MaxValueValidator(2020)], verbose_name='Год выпуска'),
),
]
|
py | 1a38009cafc383e714a163986c20739ce04b28bc | import re
from dataclasses import dataclass, field
from typing import Optional, Sequence, Union
from snuba_sdk.column import Column
from snuba_sdk.expressions import (
Expression,
InvalidExpression,
ScalarLiteralType,
ScalarType,
is_literal,
is_scalar,
)
class InvalidFunction(InvalidExpression):
pass
alias_re = re.compile(r"^[a-zA-Z](\w|\.)+$")
# In theory the function matcher should be the same as the column one.
# However legacy API sends curried functions as raw strings, and it
# wasn't worth it to import an entire parsing grammar into the SDK
# just to accomodate that one case. Instead, allow it for now and
# once that use case is eliminated we can remove this.
function_name_re = re.compile(r"^[a-zA-Z](\w|[().,+]| |\[|\])+$")
@dataclass(frozen=True)
class CurriedFunction(Expression):
function: str
initializers: Optional[Sequence[Union[ScalarLiteralType, Column]]] = None
parameters: Optional[
Sequence[Union[ScalarType, Column, "CurriedFunction", "Function"]]
] = None
alias: Optional[str] = None
def validate(self) -> None:
if not isinstance(self.function, str):
raise InvalidFunction(f"function '{self.function}' must be a string")
if self.function == "":
# TODO: Have a whitelist of valid functions to check, maybe even with more
# specific parameter type checking
raise InvalidFunction("function cannot be empty")
if not function_name_re.match(self.function):
raise InvalidFunction(
f"function '{self.function}' contains invalid characters"
)
if self.initializers is not None:
if not isinstance(self.initializers, Sequence):
raise InvalidFunction(
f"initializers of function {self.function} must be a Sequence"
)
elif not all(
isinstance(param, Column) or is_literal(param)
for param in self.initializers
):
raise InvalidFunction(
f"initializers to function {self.function} must be a scalar or column"
)
if self.alias is not None:
if not isinstance(self.alias, str) or self.alias == "":
raise InvalidFunction(
f"alias '{self.alias}' of function {self.function} must be None or a non-empty string"
)
if not alias_re.match(self.alias):
raise InvalidFunction(
f"alias '{self.alias}' of function {self.function} contains invalid characters"
)
if self.parameters is not None:
if not isinstance(self.parameters, Sequence):
raise InvalidFunction(
f"parameters of function {self.function} must be a Sequence"
)
for param in self.parameters:
if not isinstance(
param, (Column, CurriedFunction, Function)
) and not is_scalar(param):
assert not isinstance(param, bytes) # mypy
raise InvalidFunction(
f"parameter '{param}' of function {self.function} is an invalid type"
)
def __eq__(self, other: object) -> bool:
# Don't use the alias to compare equality
if not isinstance(other, CurriedFunction):
return False
return (
self.function == other.function
and self.initializers == other.initializers
and self.parameters == other.parameters
)
@dataclass(frozen=True)
class Function(CurriedFunction):
initializers: Optional[Sequence[Union[ScalarLiteralType, Column]]] = field(
init=False, default=None
)
|
py | 1a3801e8d17ec3238541780b26bb6ade318ed806 | import numpy as np
import glob
import os
import fridge.Material.Element as Element
import fridge.utilities.utilities as utilities
AVOGADROS_NUMBER = 0.6022140857
cur_dir = os.path.dirname(__file__)
material_dir = os.path.join(cur_dir, '../data/materials/')
class Material(object):
"""Creates a material consisting of elements based on the Material database."""
def __init__(self):
self.atomDensity = 0.0
self.density = 0.0
self.linearCoeffExpansion = 0.0
self.name = ''
self.materialName = ''
self.atomPercent = {}
self.enrichmentDict = {}
self.weightPercent = {}
self.elementDict = {}
self.elements = []
self.zaids = []
self.weightFraction = []
self.enrichmentZaids = []
self.enrichmentIsotopes = []
self.enrichmentVector = []
self.isotopicAtomPercents = []
def set_material(self, material):
self.name = material
self.read_material_data(self.name)
self.create_material_data()
def read_material_data(self, material):
"""Read in the material data from the material database."""
material_yaml_file = glob.glob(os.path.join(material_dir, material + '.yaml'))
inputs = utilities.yaml_reader(material_yaml_file, material_dir, material)
self.name = inputs['Name']
self.materialName = material
self.elements = inputs['Elements']
self.zaids = inputs['Elemental ZAIDs']
self.weightFraction = inputs['Elemental Weight Fractions'] if 'Elemental Weight Fractions' in inputs else []
self.enrichmentZaids = inputs['Elemental Adjustment ZAIDs'] if 'Elemental Adjustment ZAIDs' in inputs else []
self.enrichmentIsotopes = inputs['Isotopic Adjustment ZAIDs'] if 'Isotopic Adjustment ZAIDs' in inputs else []
self.enrichmentVector = inputs['Isotopic Weight Percents'] if 'Isotopic Weight Percents' in inputs else []
self.isotopicAtomPercents = inputs['Isotopic Atom Percents'] if 'Isotopic Atom Percents' in inputs else []
self.density = inputs['Density']
self.linearCoeffExpansion = inputs['Linear Coefficient of Expansion']
def create_material_data(self):
"""Create a material based on the data from the material database."""
for num, zaid in enumerate(self.enrichmentZaids):
enriched_isotope_dict = {}
for isoNum, isotopes in enumerate(self.enrichmentIsotopes[num]):
enriched_isotope_dict[isotopes] = self.enrichmentVector[num][isoNum]
self.enrichmentDict[zaid] = enriched_isotope_dict
for num, element in enumerate(self.elements):
self.elementDict[self.zaids[num]] = Element.Element(element)
if self.isotopicAtomPercents:
self.atomDensity = self.density
self.set_atom_fractions()
else:
self.set_elemental_enrichment()
self.set_weight_percent()
self.atomDensity, self.atomPercent = set_atom_percent(self.weightPercent, self.density,
self.elementDict)
def set_elemental_enrichment(self):
"""Adjust the element's natural abundance to compensate for enrichment."""
for elementEnrichement, zaidVector in self.enrichmentDict.items():
for zaid, enrichmentPercent in zaidVector.items():
self.elementDict[elementEnrichement].weightPercentDict[zaid] = enrichmentPercent
def set_weight_percent(self, void_percent=1.0):
"""Calculates the weight percent of a material."""
weight_total = 0.0
for zaidNum, zaid in enumerate(self.zaids):
for isotope, isotopeFraction in self.elementDict[zaid].weightPercentDict.items():
if isotopeFraction != 0.0:
self.weightPercent[isotope] = isotopeFraction * self.weightFraction[zaidNum] * void_percent
weight_total += self.weightPercent[isotope]
try:
assert np.allclose(weight_total, 1.0 * void_percent)
except AssertionError:
print("Weight percent does not sum to 1.0 for {}. Check the material file.".format(self.name))
def set_void(self, void_percent):
"""Adjust the atom density/atom percent of a material to account for voiding."""
self.set_weight_percent(void_percent)
self.atomDensity, self.atomPercent = set_atom_percent(self.weightPercent, self.density, self.elementDict)
def set_atom_fractions(self, void_percent=1.0):
"""Calculates the atom density of a material given a material with atom densities defined."""
for zaidNum, zaid in enumerate(self.zaids):
for isotope, isotopeFraction in self.elementDict[zaid].atomPercentDict.items():
if zaid in self.isotopicAtomPercents:
print(self.elementDict[zaid].weightPercentDict[isotope])
self.atomPercent[isotope] = self.elementDict[zaid].atomPercentDict[isotope] * \
self.isotopicAtomPercents[zaid] * void_percent
elif isotope in self.isotopicAtomPercents:
self.atomPercent[isotope] = self.isotopicAtomPercents[isotope] * void_percent
assert np.allclose(sum(self.atomPercent.values()), self.density, 3)
def set_atom_percent(weight_percents, density, element_dict):
"""Converts the weight percent of a material to the atom percent and atom density."""
atom_densities = {}
atom_percent = {}
for zaid, weight in weight_percents.items():
element = str(zaid)
if len(element) < 5:
current_element = int(element[:1] + '000')
else:
current_element = int(element[:2] + '000')
atom_densities[zaid] = weight*density*AVOGADROS_NUMBER / element_dict[current_element].molecularMassDict[zaid]
atom_density = sum(atom_densities.values())
for zaid, atomicDensity in atom_densities.items():
atom_percent[zaid] = atomicDensity / atom_density
return atom_density, atom_percent
def get_smeared_material(materials, void_material='', void_percent=1.0):
"""Create the material data card for a smeared material."""
smear_material = {}
for material, materialWeightPercent in materials.items():
void_multiplier = 1.0
if material == 'Void':
pass
else:
base_material = Material()
base_material.set_material(material)
if base_material.materialName == void_material:
void_multiplier = void_percent
for isotope, isotopeWeightPercent in base_material.weightPercent.items():
element = str(isotope)
if len(element) < 5:
current_element = element[:1] + '000'
else:
current_element = element[:2] + '000'
current_element = int(current_element)
try:
smear_material[isotope] += isotopeWeightPercent * materialWeightPercent * base_material.density \
* AVOGADROS_NUMBER * void_multiplier / \
base_material.elementDict[current_element].molecularMassDict[isotope]
except KeyError:
smear_material[isotope] = isotopeWeightPercent * materialWeightPercent * base_material.density \
* AVOGADROS_NUMBER * void_multiplier / \
base_material.elementDict[current_element].molecularMassDict[isotope]
smeared_material = Material()
smeared_material.name = "{}".format([val for val in materials])
smeared_material.atomDensity = sum(smear_material.values())
smeared_atom_percent = {}
for k, v in smear_material.items():
smeared_atom_percent[k] = v / smeared_material.atomDensity
smeared_material.atomPercent = smeared_atom_percent
return smeared_material
def smear_coolant_wirewrap(info):
"""Returns a smeared material for the coolant and wire wrap."""
height = info[0]
fuel_radius = info[1] / 2
wirewrap_radius = info[2] / 2
wire_wrap_axial_pitch = info[3]
fuel_pitch = info[4]
coolant_material = info[5]
clad_material = info[6]
fuel_volume = utilities.get_cylinder_volume(fuel_radius, height)
wire_wrap_volume = utilities.get_toroidal_volume(fuel_radius, wirewrap_radius, wire_wrap_axial_pitch, height)
pin_hexagonal_universe_volume = utilities.get_hexagonal_prism_volume(fuel_pitch, height)
coolant_volume = pin_hexagonal_universe_volume - fuel_volume - wire_wrap_volume
total_coolant_wire_wrap_volume = coolant_volume + wire_wrap_volume
wire_wrap_volume_percent = wire_wrap_volume / total_coolant_wire_wrap_volume
coolant_volume_percent = coolant_volume / total_coolant_wire_wrap_volume
smeared_material_dict = {clad_material: wire_wrap_volume_percent, coolant_material: coolant_volume_percent}
return smeared_material_dict
|
py | 1a38022d41be818b2a5cfdc5b70a5306115a1292 | #AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"hello_world": "00_core.ipynb"}
modules = ["core.py"]
git_url = "https://github.com/cbjuan/nbdev_test/tree/master/"
def custom_doc_links(name): return None |
py | 1a3802f3c346e2fff413131471e6aa74d04ccc35 | """
My 5th bot.
I don't actually like those films, but that was an order. And, frankly, a very interesting one!
"""
from bs4 import BeautifulSoup
from datetime import datetime
import requests
import telebot
import json
def search_link(name):
"""Find a link for a film"""
with open("database.json", "r", encoding="utf-8") as f:
database = json.load(f)
try:
return "https://doramalive.ru" + database[name]
# If there is no such film:
except:
return "error"
def parse_dorama_page(link):
"""Parse the film webpage"""
res = requests.get(link)
soup = BeautifulSoup(res.text, 'html.parser')
dorama = {}
# Put the information into the dictionary
dorama["link"] = link
dorama["name"] = " ".join(soup.find("h1").string.split()[1::])
dorama["rating"] = soup.find("div", class_="vote-detail").get_text()
dorama["description"] = soup.find("div", class_="detail-more").get_text()
parametrs = soup.find_all("dl", class_="dl-horizontal")
for parametr in parametrs:
par = parametr.find_all("dd")
dorama["made_in"] = par[1].get_text()
dorama["made_date"] = par[2].get_text()
dorama["genres"] = []
genres = soup.find_all("span", "label label-default genre")
for genre in genres:
dorama["genres"].append(" ".join(genre.find("a").get("title").split()[2::]).title())
return dorama
# BOT STARTS HERE ###
bot = telebot.TeleBot("2133317357:AAEAEsYGXuZqD0psX-GapGh1YjCrFcNkToU")
print("Bot is active!")
@bot.message_handler(commands=["start"])
def command_start(message):
"""Handler of the first command /start"""
bot.send_message(message.chat.id, "✨")
bot.send_message(message.chat.id, "Привет! Я помогу вам найти информацию о дорамах. "
"Просто напишите мне название, а всё вам о ней расскажу!")
@bot.message_handler(content_types=['text'])
def reply(message):
"""Handler of any text message. It is supposed to be the name of a film"""
print(f"Human: {not (message.from_user.is_bot)} || Name: {message.from_user.first_name} "
f"{message.from_user.last_name} || Id: {message.from_user.id} || Time: {datetime.now().strftime('%H:%M')};")
link = search_link(message.text.lower())
# If there is no such film:
if link == "error":
bot.send_message(message.chat.id, "К сожаленю такой дорамы нет. Или вы неверно "
"ввели название ☹️ Попробуйте, пожалуйста, ещё раз.")
# If there is
else:
dorama = parse_dorama_page(link)
n = round(float(dorama["rating"].split()[0]))
stars = ["⭐" for i in range(n)]
msg = f"<b>Название:</b> {dorama['name']}\n<b>Производство:</b> {dorama['made_in']}\n<b>Дата премьеры:" \
f"</b> {dorama['made_date']}\n<b>Рейтинг: {''.join(stars)}</b> {dorama['rating']}\n<b>Жанры: ▫️</b> " \
f"{'▫️'.join(dorama['genres'])}\n<b>Описание:</b> {dorama['description']}\n<b>Ссылка:</b> " \
f"{dorama['link']}"
bot.send_message(message.chat.id, msg, parse_mode="html")
bot.polling(none_stop=True, interval=0) |
py | 1a380399ef5eb63442bf5a1ff8b556df874d108a | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=super-init-not-called,arguments-differ
from uamqp import c_uamqp
from uamqp import utils
class AMQPType:
"""Base type for specific AMQP encoded type definitions.
:ivar value: The Python value of the AMQP type.
:ivar c_data: The C AMQP encoded object.
"""
def __init__(self, value):
self._c_type = self._c_wrapper(value)
@property
def value(self):
return self._c_type.value
@property
def c_data(self):
return self._c_type
def _c_wrapper(self, value):
raise NotImplementedError()
class AMQPSymbol(AMQPType):
"""An AMQP symbol object.
:ivar value: The Python value of the AMQP type.
:vartype value: bytes
:ivar c_data: The C AMQP encoded object.
:vartype c_data: c_uamqp.SymbolValue
:param value: The value to encode as an AMQP symbol.
:type value: bytes or str
:param encoding: The encoding to be used if a str is provided.
The default is 'UTF-8'.
:type encoding: str
"""
def __init__(self, value, encoding='UTF-8'):
self._c_type = self._c_wrapper(value, encoding)
def _c_wrapper(self, value, encoding='UTF-8'):
value = value.encode(encoding) if isinstance(value, str) else value
return c_uamqp.symbol_value(value)
class AMQPLong(AMQPType):
"""An AMQP long object.
:ivar value: The Python value of the AMQP type.
:vartype value: int
:ivar c_data: The C AMQP encoded object.
:vartype c_data: ~uamqp.c_uamqp.LongValue
:param value: The value to encode as an AMQP ulong.
:type value: int
:raises: ValueError if value is not within allowed range.
"""
def _c_wrapper(self, value):
try:
return c_uamqp.long_value(int(value))
except TypeError:
raise ValueError("Value must be an integer")
except OverflowError:
raise ValueError("Value {} is too large for a Long value.".format(value))
class AMQPuLong(AMQPType):
"""An AMQP unsigned long object.
:ivar value: The Python value of the AMQP type.
:vartype value: int
:ivar c_data: The C AMQP encoded object.
:vartype c_data: ~uamqp.c_uamqp.ArrayValue
:param value: The value to encode as an AMQP Array.
:type value: list
:raises: ValueError if value is not within allowed range.
"""
def _c_wrapper(self, value):
try:
return c_uamqp.ulong_value(int(value))
except TypeError:
raise ValueError("Value must be an integer")
except OverflowError:
raise ValueError("Value {} is too large for an unsigned Long value.".format(value))
class AMQPArray(AMQPType):
"""An AMQP Array object. All the values in the array
must be of the same type.
:ivar value: The Python values of the AMQP array.
:vartype value: list
:ivar c_data: The C AMQP encoded object.
:vartype c_data: ~uamqp.c_uamqp.ULongValue
:param value: The value to encode as an AMQP symbol.
:type value: int
:raises: ValueError if all values are not the same type.
"""
def _c_wrapper(self, value_array):
value_type = type(value_array[0])
if not all(isinstance(x, value_type) for x in value_array):
raise ValueError("All Array values must be the same type.")
c_array = c_uamqp.array_value()
for value in value_array:
c_array.append(utils.data_factory(value))
return c_array
|
py | 1a3804a091964cbadb7a088c164b9c1804c5cdc3 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuzzing engine interface."""
_ENGINES = {}
class Error(Exception):
"""Engine error."""
class FuzzOptions(object):
"""Represents options passed to the engine. Can be overridden to provide more
options."""
def __init__(self, corpus_dir, arguments, strategies):
self.corpus_dir = corpus_dir
self.arguments = arguments
self.strategies = strategies
class Crash(object):
"""Represents a crash found by the fuzzing engine."""
def __init__(self, input_path, stacktrace, reproduce_args, crash_time):
self.input_path = input_path
self.stacktrace = stacktrace
self.reproduce_args = reproduce_args
self.crash_time = crash_time
class FuzzResult(object):
"""Represents a result of a fuzzing session: a list of crashes found and the
stats generated."""
def __init__(self, logs, command, crashes, stats, time_executed):
self.logs = logs
self.command = command
self.crashes = crashes
self.stats = stats
self.time_executed = time_executed
class ReproduceResult(object):
"""Results from running a testcase against a target."""
def __init__(self, command, return_code, time_executed, output):
self.command = command
self.return_code = return_code
self.time_executed = time_executed
self.output = output
class Engine(object):
"""Base interface for a grey box fuzzing engine."""
@property
def name(self):
"""Get the name of the engine."""
raise NotImplementedError
def fuzz_additional_processing_timeout(self, options):
"""Return the maximum additional timeout in seconds for additional
operations in fuzz() (e.g. merging back new items).
Args:
options: A FuzzOptions object.
Returns:
An int representing the number of seconds required.
"""
del options
return 0
def prepare(self, corpus_dir, target_path, build_dir):
"""Prepare for a fuzzing session, by generating options. Returns a
FuzzOptions object.
Args:
corpus_dir: The main corpus directory.
target_path: Path to the target.
build_dir: Path to the build directory.
Returns:
A FuzzOptions object.
"""
raise NotImplementedError
def fuzz(self, target_path, options, reproducers_dir, max_time):
"""Run a fuzz session.
Args:
target_path: Path to the target.
options: The FuzzOptions object returned by prepare().
reproducers_dir: The directory to put reproducers in when crashes
are found.
max_time: Maximum allowed time for the fuzzing to run.
Returns:
A FuzzResult object.
"""
raise NotImplementedError
def reproduce(self, target_path, input_path, arguments, max_time):
"""Reproduce a crash given an input.
Args:
target_path: Path to the target.
input_path: Path to the reproducer input.
arguments: Additional arguments needed for reproduction.
max_time: Maximum allowed time for the reproduction.
Returns:
A ReproduceResult.
Raises:
TimeoutError: If the reproduction exceeds max_time.
"""
raise NotImplementedError
def minimize_corpus(self, target_path, arguments, input_dirs, output_dir,
reproducers_dir, max_time):
"""Optional (but recommended): run corpus minimization.
Args:
target_path: Path to the target.
arguments: Additional arguments needed for corpus minimization.
input_dirs: Input corpora.
output_dir: Output directory to place minimized corpus.
reproducers_dir: The directory to put reproducers in when crashes are
found.
max_time: Maximum allowed time for the minimization.
Returns:
A FuzzResult object.
Raises:
TimeoutError: If the corpus minimization exceeds max_time.
Error: If the merge failed in some other way.
"""
raise NotImplementedError
def minimize_testcase(self, target_path, arguments, input_path, output_path,
max_time):
"""Optional (but recommended): Minimize a testcase.
Args:
target_path: Path to the target.
arguments: Additional arguments needed for testcase minimization.
input_path: Path to the reproducer input.
output_path: Path to the minimized output.
max_time: Maximum allowed time for the minimization.
Returns:
A ReproduceResult.
Raises:
TimeoutError: If the testcase minimization exceeds max_time.
"""
raise NotImplementedError
def cleanse(self, target_path, arguments, input_path, output_path, max_time):
"""Optional (but recommended): Cleanse a testcase.
Args:
target_path: Path to the target.
arguments: Additional arguments needed for testcase cleanse.
input_path: Path to the reproducer input.
output_path: Path to the cleansed output.
max_time: Maximum allowed time for the cleanse.
Returns:
A ReproduceResult.
Raises:
TimeoutError: If the cleanse exceeds max_time.
"""
raise NotImplementedError
def register(name, engine_class):
"""Register a fuzzing engine."""
if name in _ENGINES:
raise ValueError('Engine {name} is already registered'.format(name=name))
_ENGINES[name] = engine_class
def get(name):
"""Get an implemntation of a fuzzing engine, or None if one does not exist."""
engine_class = _ENGINES.get(name)
if engine_class:
return engine_class()
return None
|
py | 1a38054ecc6ff1f86fd421b66213c35caa321ed4 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''Training set and test set loader'''
import os
import pickle
import numpy as np
class TrainDataset:
'''Training data loader'''
def __init__(self,
sliding_dir,
train_pkl_path,
valid_pkl_path,
visual_dim,
sentence_embed_dim,
IoU=0.5,
nIoU=0.15,
context_num=1,
context_size=128
):
self.sliding_dir = sliding_dir
self.train_pkl_path = train_pkl_path
self.valid_pkl_path = valid_pkl_path
self.visual_dim = visual_dim
self.sentence_embed_dim = sentence_embed_dim
self.IoU = IoU
self.nIoU = nIoU
self.context_num = context_num
self.context_size = context_size
self.load_data()
def load_data(self):
'''load_data'''
train_csv = pickle.load(open(self.train_pkl_path, 'rb'), encoding='iso-8859-1')
self.clip_sentence_pairs = []
for l in train_csv:
clip_name = l[0]
sent_vecs = l[1]
for sent_vec in sent_vecs:
self.clip_sentence_pairs.append((clip_name, sent_vec))
movie_names_set = set()
self.movie_clip_names = {}
for k in range(len(self.clip_sentence_pairs)):
clip_name = self.clip_sentence_pairs[k][0]
movie_name = clip_name.split("_")[0]
if not movie_name in movie_names_set:
movie_names_set.add(movie_name)
self.movie_clip_names[movie_name] = []
self.movie_clip_names[movie_name].append(k)
self.movie_names = list(movie_names_set)
self.num_samples = len(self.clip_sentence_pairs)
# read sliding windows, and match them with the groundtruths to make training samples
sliding_clips_tmp = os.listdir(self.sliding_dir)
sliding_clips_tmp.sort()
self.clip_sentence_pairs_iou = []
movie_names = set()
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2] == "npy":
movie_name = clip_name.split("_")[0]
movie_names.add(movie_name)
movie_names = list(movie_names)
movie_names.sort()
for movie_name in self.movie_names:
start_ends = []
clip_names = []
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2] == "npy":
if clip_name.split("_")[0] == movie_name:
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
start_ends.append((start, end))
clip_names.append(clip_name)
table = {}
for clip_sentence in self.clip_sentence_pairs:
o_start_ends = []
original_clip_name = clip_sentence[0]
original_movie_name = original_clip_name.split("_")[0]
if original_movie_name == movie_name:
o_start = int(original_clip_name.split("_")[1])
o_end = int(original_clip_name.split("_")[2].split(".")[0])
if (o_start, o_end) in table.keys():
match_indexs = table[(o_start, o_end)]
for j in match_indexs:
start, end = start_ends[j]
clip_name = clip_names[j]
start_offset = o_start - start
end_offset = o_end - end
self.clip_sentence_pairs_iou.append(
(clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))
else:
o_start_ends.append((o_start, o_end))
start_ends = np.array(start_ends)
o_start_ends = np.array(list(set(o_start_ends)))
if o_start_ends.shape[0] == 0:
continue
ious = self.calc_IoU(start_ends, o_start_ends)
nIoLs = self.calc_nIoL(o_start_ends, start_ends)
match_indexs = (nIoLs < self.nIoU)[0] & (ious > self.IoU)[:, 0]
match_indexs = np.where(match_indexs)[0]
table[(o_start, o_end)] = match_indexs
for k in match_indexs:
start, end = start_ends[k]
clip_name = clip_names[k]
start_offset = o_start - start
end_offset = o_end - end
self.clip_sentence_pairs_iou.append(
(clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))
self.num_samples_iou = len(self.clip_sentence_pairs_iou)
def calc_nIoL(self, base, sliding_clip):
'''Calculate the nIoL of two fragments'''
A = base.shape[0]
inter = self.calc_inter(base, sliding_clip)
sliding_clip = np.expand_dims(sliding_clip, 0).repeat(A, axis=0)
length = sliding_clip[:, :, 1] - sliding_clip[:, :, 0]
nIoL = 1 - inter / length
return nIoL
def calc_IoU(self, clips_a, clips_b):
'''Calculate the IoU of two fragments'''
inter = self.calc_inter(clips_a, clips_b)
union = self.calc_union(clips_a, clips_b)
return inter / union
def calc_inter(self, clips_a, clips_b):
'''Calculate the intersection of two fragments'''
A = clips_a.shape[0]
B = clips_b.shape[0]
clips_a = np.expand_dims(clips_a, 1).repeat(B, axis=1)
clips_b = np.expand_dims(clips_b, 0).repeat(A, axis=0)
max_min = np.maximum(clips_a[:, :, 0], clips_b[:, :, 0])
min_max = np.minimum(clips_a[:, :, 1], clips_b[:, :, 1])
return np.maximum(min_max - max_min, 0)
def calc_union(self, clips_a, clips_b):
'''Calculate the union of two fragments'''
A = clips_a.shape[0]
B = clips_b.shape[0]
clips_a = np.expand_dims(clips_a, 1).repeat(B, axis=1)
clips_b = np.expand_dims(clips_b, 0).repeat(A, axis=0)
min_min = np.minimum(clips_a[:, :, 0], clips_b[:, :, 0])
max_max = np.maximum(clips_a[:, :, 1], clips_b[:, :, 1])
return max_max - min_min
def get_context_window(self, clip_name):
'''Get the context window of the fragment'''
movie_name = clip_name.split("_")[0]
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
self.context_size = end - start
left_context_feats = np.zeros([self.context_num, self.visual_dim // 3], dtype=np.float32)
right_context_feats = np.zeros([self.context_num, self.visual_dim // 3], dtype=np.float32)
last_left_feat = np.load(os.path.join(self.sliding_dir, clip_name))
last_right_feat = np.load(os.path.join(self.sliding_dir, clip_name))
for k in range(self.context_num):
left_context_start = start - self.context_size * (k + 1)
left_context_end = start - self.context_size * k
right_context_start = end + self.context_size * k
right_context_end = end + self.context_size * (k + 1)
left_context_name = movie_name + "_" + str(left_context_start) + "_" + str(left_context_end) + ".npy"
right_context_name = movie_name + "_" + str(right_context_start) + "_" + str(right_context_end) + ".npy"
left_context_path = os.path.join(self.sliding_dir, left_context_name)
if os.path.exists(left_context_path):
left_context_feat = np.load(left_context_path)
last_left_feat = left_context_feat
else:
left_context_feat = last_left_feat
right_context_path = os.path.join(self.sliding_dir, right_context_name)
if os.path.exists(right_context_path):
right_context_feat = np.load(right_context_path)
last_right_feat = right_context_feat
else:
right_context_feat = last_right_feat
left_context_feats[k] = left_context_feat
right_context_feats[k] = right_context_feat
return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)
def __getitem__(self, index):
'''Return a data'''
left_context_feat, right_context_feat = self.get_context_window(self.clip_sentence_pairs_iou[index][2])
feat_path = os.path.join(self.sliding_dir, self.clip_sentence_pairs_iou[index][2])
featmap = np.load(feat_path)
vis = np.hstack((left_context_feat, featmap, right_context_feat))
sent = self.clip_sentence_pairs_iou[index][1][:self.sentence_embed_dim]
p_offset = self.clip_sentence_pairs_iou[index][3]
l_offset = self.clip_sentence_pairs_iou[index][4]
offset = np.array([p_offset, l_offset], dtype=np.float32)
return np.concatenate((vis, sent)), offset
def __len__(self):
'''Return the length of the data set'''
return self.num_samples_iou
class TestingDataSet:
'''TestingDataSet'''
def __init__(self, img_dir, csv_path, batch_size):
self.batch_size = batch_size
self.image_dir = img_dir
self.semantic_size = 4800
csv = pickle.load(open(csv_path, 'rb'), encoding='iso-8859-1')
self.clip_sentence_pairs = []
for l in csv:
clip_name = l[0]
sent_vecs = l[1]
for sent_vec in sent_vecs:
self.clip_sentence_pairs.append((clip_name, sent_vec))
movie_names_set = set()
self.movie_clip_names = {}
for k in range(len(self.clip_sentence_pairs)):
clip_name = self.clip_sentence_pairs[k][0]
movie_name = clip_name.split("_")[0]
if not movie_name in movie_names_set:
movie_names_set.add(movie_name)
self.movie_clip_names[movie_name] = []
self.movie_clip_names[movie_name].append(k)
self.movie_names = list(movie_names_set)
self.movie_names.sort()
self.clip_num_per_movie_max = 0
for movie_name in self.movie_clip_names:
if len(self.movie_clip_names[movie_name]) > self.clip_num_per_movie_max:
self.clip_num_per_movie_max = len(self.movie_clip_names[movie_name])
self.sliding_clip_path = img_dir
sliding_clips_tmp = os.listdir(self.sliding_clip_path)
self.sliding_clip_names = []
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2] == "npy":
movie_name = clip_name.split("_")[0]
if movie_name in self.movie_clip_names:
self.sliding_clip_names.append(clip_name.split(".")[0]+"."+clip_name.split(".")[1])
self.num_samples = len(self.clip_sentence_pairs)
assert self.batch_size <= self.num_samples
def get_clip_sample(self, sample_num, movie_name, clip_name):
'''Get a clip'''
length = len(os.listdir(self.image_dir+movie_name+"/"+clip_name))
sample_step = 1.0*length/sample_num
sample_pos = np.floor(sample_step*np.array(range(sample_num)))
sample_pos_str = []
img_names = os.listdir(self.image_dir+movie_name+"/"+clip_name)
# sort is very important! to get a correct sequence order
img_names.sort()
for pos in sample_pos:
sample_pos_str.append(self.image_dir+movie_name+"/"+clip_name+"/"+img_names[int(pos)])
return sample_pos_str
def get_context_window(self, clip_name, win_length):
'''Get the context window of the fragment'''
movie_name = clip_name.split("_")[0]
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
clip_length = 128#end-start
left_context_feats = np.zeros([win_length, 4096], dtype=np.float32)
right_context_feats = np.zeros([win_length, 4096], dtype=np.float32)
last_left_feat = np.load(self.sliding_clip_path+clip_name)
last_right_feat = np.load(self.sliding_clip_path+clip_name)
for k in range(win_length):
left_context_start = start-clip_length*(k+1)
left_context_end = start-clip_length*k
right_context_start = end+clip_length*k
right_context_end = end+clip_length*(k+1)
left_context_name = movie_name+"_"+str(left_context_start)+"_"+str(left_context_end)+".npy"
right_context_name = movie_name+"_"+str(right_context_start)+"_"+str(right_context_end)+".npy"
if os.path.exists(self.sliding_clip_path+left_context_name):
left_context_feat = np.load(self.sliding_clip_path+left_context_name)
last_left_feat = left_context_feat
else:
left_context_feat = last_left_feat
if os.path.exists(self.sliding_clip_path+right_context_name):
right_context_feat = np.load(self.sliding_clip_path+right_context_name)
last_right_feat = right_context_feat
else:
right_context_feat = last_right_feat
left_context_feats[k] = left_context_feat
right_context_feats[k] = right_context_feat
return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)
def load_movie_byclip(self, movie_name, sample_num):
'''Read visual features through clip'''
movie_clip_sentences = []
movie_clip_featmap = []
clip_set = set()
for k in range(len(self.clip_sentence_pairs)):
if movie_name in self.clip_sentence_pairs[k][0]:
movie_clip_sentences.append(
(self.clip_sentence_pairs[k][0], self.clip_sentence_pairs[k][1][:self.semantic_size]))
if not self.clip_sentence_pairs[k][0] in clip_set:
clip_set.add(self.clip_sentence_pairs[k][0])
visual_feature_path = self.image_dir+self.clip_sentence_pairs[k][0]+".npy"
feature_data = np.load(visual_feature_path)
movie_clip_featmap.append((self.clip_sentence_pairs[k][0], feature_data))
return movie_clip_featmap, movie_clip_sentences
def load_movie_slidingclip(self, movie_name, sample_num):
'''Read visual features through slidingclip'''
movie_clip_sentences = []
movie_clip_featmap = []
for k in range(len(self.clip_sentence_pairs)):
if movie_name in self.clip_sentence_pairs[k][0]:
movie_clip_sentences.append(
(self.clip_sentence_pairs[k][0], self.clip_sentence_pairs[k][1][:self.semantic_size]))
for k in range(len(self.sliding_clip_names)):
if movie_name in self.sliding_clip_names[k]:
visual_feature_path = self.sliding_clip_path+self.sliding_clip_names[k]+".npy"
left_context_feat, right_context_feat = self.get_context_window(self.sliding_clip_names[k]+".npy", 1)
feature_data = np.load(visual_feature_path)
comb_feat = np.hstack((left_context_feat, feature_data, right_context_feat))
movie_clip_featmap.append((self.sliding_clip_names[k], comb_feat))
return movie_clip_featmap, movie_clip_sentences
|
py | 1a3805ff34aa29af4fd3867aca86ab953a9b5aa0 | # -*- coding: utf-8 -*_
#
# Copyright (c) 2020, Pureport, Inc.
# All Rights Reserved
from __future__ import absolute_import
from . import run_command_test
from ...utils import utils
def test_get():
run_command_test('gateways', 'get', utils.random_string())
def test_get_bgp_routes():
run_command_test('gateways', 'get-bgp-routes', utils.random_string())
def test_get_connectivity_over_time():
run_command_test('gateways', 'get-connectivity-over-time',
utils.random_string(), {'gt': utils.random_string()})
def test_get_latest_connectivity():
run_command_test('gateways', 'get-latest-connectivity', utils.random_string())
def test_get_tasks():
run_command_test('gateways', 'get-tasks', utils.random_string())
def test_create_task():
run_command_test('gateways', 'create-task', utils.random_string(), {})
|
py | 1a38066082ae9b708009a9c177d424d1c7f5df19 | # Copyright (c) 2013 James King <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from hy.models.string import HyString
class HyLambdaListKeyword(HyString):
"""
Hy LambdaListKeyword. Demarcates arguments in an argument list.
(defun my-fun (x &rest xs &optional (foo "default string")))
becomes:
def my_fun(x, *xs, foo="default string"):
pass
"""
_valid_types = ["&rest", "&optional", "&key", "&kwargs"]
def __init__(self, string):
self += string
|
py | 1a3807123754e66d81a2abb99669bb2aeb837a27 | from datetime import datetime
from flask import Flask
from flask import request, Response, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from flask_login import current_user, login_required, login_user, LoginManager, logout_user, UserMixin
from werkzeug.security import check_password_hash, generate_password_hash
from forms import SignUpForm, LoginForm
app = Flask(__name__)
#helps with debugging errors while flask app is running
app.config["DEBUG"] = True
#SECRET_KEY generated using python interpreter:
# $ python
# >>> import secrets
# >>> secrets.token_hex(16)
# >>> a65643b9b52d637a11b3182e923e5703
app.config["SECRET_KEY"]= 'a65643b9b52d637a11b3182e923e5703'
login_manager = LoginManager()
login_manager.init_app(app)
#Using SQLite for development
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///hackmerced.db'
db = SQLAlchemy(app)
###***** Users Table ******###
class Users(UserMixin, db.Model):
__tablename__ = "Users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(89))
fullname = db.Column(db.String(89))
email = db.Column(db.String(89))
ucmId = db.Column(db.String(89))
pwd = db.Column(db.String(128))
bio = db.Column(db.String(500))
major = db.Column(db.String(89))
gradDate = db.Column(db.String(89))
def check_password(self, userinputPwd):
return check_password_hash(self.pwd, userinputPwd)
def get_id(self):
return self.email
###***** Users Table ******###
###***** Tracks Table ******###
class Threads(db.Model):
__tablename__ = "Threads"
id = db.Column(db.Integer, primary_key=True)
views = db.Column(db.Integer, default=0)
title = db.Column(db.String(89))
url = db.Column(db.String(89))
addedTimeStamp = db.Column(db.DateTime, default=datetime.now)
#we might need a different database type to hold comments (can be very long)
description = db.Column(db.String(3000))
'''{"owner": INT , "comment": String},{},{},{}'''
replies = db.Column(db.String(3000), default=" ")
upvotes = db.Column(db.Integer, default=0)
downupvotes = db.Column(db.Integer, default=0)
usersUpvoted = db.Column(db.String(3000), default=" ")
userDownvoted = db.Column(db.String(3000), default=" ")
owner_id = db.Column(db.Integer, db.ForeignKey('Users.id'), nullable=True)
owner = db.relationship('Users', foreign_keys=owner_id)
###***** Tracks Table ******###
@login_manager.user_loader
def load_user(userInputEmail):
return Users.query.filter_by(email=userInputEmail).first()
@app.route("/signout")
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route("/dashboard")
@login_required
def dashboard_home():
return render_template('dashboard.html')
@app.route('/signup', methods= ['GET', 'POST'])
def register():
form = SignUpForm()
if request.method == "POST":
if not form.validate_on_submit():
flash('Please enter valid credentials!', 'danger')
return redirect(url_for('register'))
#Check if username already exists
#Make password atleast 8 charlong
#Take to "finish making profile" one time page
if not Users.query.filter_by(username=request.form['username']).first() and not Users.query.filter_by(email=request.form['email']).first():
print('Query responded with None.')
#create a row in DataBases
newUser = Users(username=request.form['username'],
fullname=request.form['username'],
email=request.form['email'],
pwd= generate_password_hash(str(request.form['password'])))
db.session.add(newUser)
db.session.commit()
flash('Thanks for signing up, you will now be able to login!', 'success')
return redirect(url_for('login'))
if Users.query.filter_by(username=request.form['username']).first():
flash(f'That username is taken! Select another.', 'danger')
return redirect(url_for('register'))
if Users.query.filter_by(email=request.form['email']).first():
flash('That email cannot be used.', 'danger')
return redirect(url_for('register'))
return redirect(url_for('register'))
if request.method == "GET":
return render_template('signup.html', form=form)
@app.route('/login', methods= ['GET', 'POST'])
def login():
form = LoginForm()
if request.method == "POST":
if not Users.query.filter_by(email=request.form['email']).first():
flash('No user with that email!', 'danger')
return redirect(url_for('login'))
user = load_user(str(request.form['email']))
if not user.check_password(request.form['password']):
flash('Wrong password!', 'danger')
return redirect(url_for('login'))
print(type(user))
login_user(user)
return redirect(url_for('dashboard_home'))
return render_template('login.html', form=form)
@app.route("/thread", methods=['GET','POST'])
@login_required
def make_thread():
if request.method == "POST":
if(request.form['title'] and request.form['description']):
newThread = Threads(title=request.form['title'],
url = request.form['title'].replace(" ", "-"),
description=request.form['description'],
owner=current_user)
db.session.add(newThread)
db.session.commit()
else:
return render_template("createpost.html")
return render_template('dashboard.html')
@app.route("/<threadTitle>", methods=['GET','POST'])
@login_required
def show_thread(threadTitle):
query = Threads.query.filter_by(url = threadTitle).first()
if query is None:
return redirect(url_for("dashboard_home"))
else:
views = query.views
threadDict = {"title": query.title, "description": query.description, "replies": query.replies, "views": views}
query.views = query.views + 1
db.session.commit()
return render_template('post.html', threadDict = threadDict)
if __name__ == '__main__':
app.run(host="0.0.0.0", port="8081")
|
py | 1a3809afe0da2516f442abc4c50738e391ed8f46 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands for interacting with dogbin(https://del.dog)"""
from requests import get, post, exceptions
import os
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
DOGBIN_URL = "https://del.dog/"
NEKOBIN_URL = "https://nekobin.com/"
@register(outgoing=True, pattern=r"^.paste(?: |$)([\s\S]*)")
async def paste(pstl):
""" For .paste command, pastes the text directly to dogbin. """
dogbin_final_url = ""
match = pstl.pattern_match.group(1).strip()
reply_id = pstl.reply_to_msg_id
if not (match or reply_id):
await pstl.edit("`Elon Musk said I cannot paste void.`")
return
if match:
message = match
elif reply_id:
message = await pstl.get_reply_message()
if message.media:
downloaded_file_name = await pstl.client.download_media(
message, TEMP_DOWNLOAD_DIRECTORY,
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r"
os.remove(downloaded_file_name)
else:
message = message.message
# Dogbin
await pstl.edit("`Pasting text . . .`")
resp = post(DOGBIN_URL + "documents", data=message.encode("utf-8"))
if resp.status_code == 200:
response = resp.json()
key = response["key"]
dogbin_final_url = DOGBIN_URL + key
if response["isUrl"]:
reply_text = (
"`Pasted successfully!`\n\n"
f"`Shortened URL:` {dogbin_final_url}\n\n"
"`Original(non-shortened) URLs`\n"
f"`Dogbin URL`: {DOGBIN_URL}v/{key}\n"
)
else:
reply_text = (
"`Pasted successfully!`\n\n" f"`Dogbin URL`: {dogbin_final_url}"
)
else:
reply_text = "`Failed to reach Dogbin`"
await pstl.edit(reply_text)
if BOTLOG:
await pstl.client.send_message(
BOTLOG_CHATID, "Paste query was executed successfully",
)
@register(outgoing=True, pattern="^.getpaste(?: |$)(.*)")
async def get_dogbin_content(dog_url):
""" For .getpaste command, fetches the content of a dogbin URL. """
textx = await dog_url.get_reply_message()
message = dog_url.pattern_match.group(1)
await dog_url.edit("`Getting dogbin content...`")
if textx:
message = str(textx.message)
format_normal = f"{DOGBIN_URL}"
format_view = f"{DOGBIN_URL}v/"
if message.startswith(format_view):
message = message[len(format_view) :]
elif message.startswith(format_normal):
message = message[len(format_normal) :]
elif message.startswith("del.dog/"):
message = message[len("del.dog/") :]
else:
await dog_url.edit("`Is that even a dogbin url?`")
return
resp = get(f"{DOGBIN_URL}raw/{message}")
try:
resp.raise_for_status()
except exceptions.HTTPError as HTTPErr:
await dog_url.edit(
"Request returned an unsuccessful status code.\n\n" + str(HTTPErr)
)
return
except exceptions.Timeout as TimeoutErr:
await dog_url.edit("Request timed out." + str(TimeoutErr))
return
except exceptions.TooManyRedirects as RedirectsErr:
await dog_url.edit(
"Request exceeded the configured number of maximum redirections."
+ str(RedirectsErr)
)
return
reply_text = "`Fetched dogbin URL content successfully!`\n\n`Content:` " + resp.text
await dog_url.edit(reply_text)
if BOTLOG:
await dog_url.client.send_message(
BOTLOG_CHATID, "Get dogbin content query was executed successfully",
)
@register(outgoing=True, pattern=r"^\.neko(?: |$)([\s\S]*)")
async def neko(nekobin):
"""For .paste command, pastes the text directly to dogbin."""
nekobin_final_url = ""
match = nekobin.pattern_match.group(1).strip()
reply_id = nekobin.reply_to_msg_id
if not match and not reply_id:
return await pstl.edit("`Cannot paste text.`")
if match:
message = match
elif reply_id:
message = await nekobin.get_reply_message()
if message.media:
downloaded_file_name = await nekobin.client.download_media(
message,
TEMP_DOWNLOAD_DIRECTORY,
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8")
os.remove(downloaded_file_name)
else:
message = message.text
# Nekobin
await nekobin.edit("`Pasting text . . .`")
resp = post(NEKOBIN_URL + "api/documents", json={"content": message})
if resp.status_code == 201:
response = resp.json()
key = response["result"]["key"]
nekobin_final_url = NEKOBIN_URL + key
reply_text = (
"`Pasted successfully!`\n\n"
f"[Nekobin URL]({nekobin_final_url})\n"
f"[View RAW]({NEKOBIN_URL}raw/{key})"
)
else:
reply_text = "`Failed to reach Nekobin`"
await nekobin.edit(reply_text)
if BOTLOG:
await nekobin.client.send_message(
BOTLOG_CHATID,
"Paste query was executed successfully",
)
CMD_HELP.update(
{
"dogbin": ".paste <text/reply>\
\nUsage: Create a paste or a shortened url using dogbin (https://del.dog/)\
\n\n.getpaste\
\nUsage: Gets the content of a paste or shortened url from dogbin (https://del.dog/)\
\n\n.neko <text/reply>\
\nUsage: Create a paste or a shortened url using nekobin (https://nekobin.com/)"
}
)
|
py | 1a3809d426de75da8c380544bb2343e73c578ca9 | # -*- coding:utf-8 -*-
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, and_, or_, func
from sqlalchemy.orm import sessionmaker
# 创建对象基类
Base = declarative_base()
class User(Base):
"""
1.指定表名
2.指定表结构
"""
__tablename__ = 'user'
def __init__(self, name=None, age=None, address=None):
self.user_name = name
self.user_age = age
self.user_address = address
id = Column(Integer, primary_key=True, autoincrement=True)
user_name = Column('userName', String(255))
user_age = Column('userAge', Integer)
user_address = Column('userAddress', String(255))
def __str__(self):
return self.user_name
def __repr__(self):
return self.user_age
# 数据库连接 echo=True 打印sql
engine = create_engine("mysql+pymysql://root:123456@localhost:3306/java?charset=utf8", echo=True)
# 创建表结构
# Base.metadata.create_all(engine)
# 删除表
# Base.metadata.drop_all(engine)
# session
Session = sessionmaker(bind=engine)
session = Session()
if __name__ == '__main__':
# 增
u = User('user', 10, 'address')
u1 = User()
u1.user_name = 'user1'
u1.user_age = 11
u1.user_address = 'address'
session.add(u)
session.add_all([u1])
session.commit()
# 删
session.query(User).filter(User.id > 10).delete()
session.query(User).filter_by(user_name='user').delete()
session.commit()
# 改
session.query(User).filter(User.id == 1).update({User.user_name: 'user_name'})
session.query(User).filter_by(user_name='user_name').update({'user_name': 'test_name'})
session.commit()
# 查
user = session.query(User).first()
# and
users = session.query(User).filter(User.id.in_([1, 2, 3])
, User.user_name == 'test_name').all()
users1 = session.query(User).filter(and_(User.id == 1, User.user_name == 'test_name')).all()
# or
users2 = session.query(User).filter(or_(User.id > 1, User.user_name == 'test_name')).all()
# like
users3 = session.query(User).filter(User.user_name.like('name%')).all()
# limit
users4 = session.query(User)[0:1]
# sort
users5 = session.query(User).order_by(User.id.desc()).all()
# group
users6 = session.query(User).group_by(User.id).all()
# func
max_id = session.query(func.max(User.id)).one()
sum_age = session.query(func.sum(User.user_age)).one()
|
py | 1a3809e43d4ab7109febd12f52bf96973f9471eb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
from HTMLTestRunner import HTMLTestRunner
import unittest
from db_fixture import test_data
sys.path.append('./interface')
sys.path.append('./db_fixture')
# 指定测试用例为当前文件夹下的 interface 目录
test_dir = './interface'
discover = unittest.defaultTestLoader.discover(test_dir, pattern='*_test.py')
if __name__ == "__main__":
# 初始化接口测试数据
test_data.init_data()
now = time.strftime("%Y-%m-%d %H_%M_%S")
filename = './report/' + now + '_result.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner(stream=fp,
title='Guest Manage System Interface Test Report',
description='Implementation Example with: ')
runner.run(discover)
fp.close()
|
py | 1a380a0eff132dab33aa43b0c43cbdf60a0af1ea | from collections import defaultdict
from mesa.time import RandomActivation
class RandomActivationByBreed(RandomActivation):
"""
A scheduler which activates each type of agent once per step, in random
order, with the order reshuffled every step.
This is equivalent to the NetLogo 'ask breed...' and is generally the
default behavior for an ABM.
Assumes that all agents have a step() method.
"""
def __init__(self, model):
super().__init__(model)
self.agents_by_breed = defaultdict(dict)
self.seen_ids = set()
def add(self, agent):
"""
Add an Agent object to the schedule
Args:
agent: An Agent to be added to the schedule.
"""
self._agents[agent.unique_id] = agent
agent_class = type(agent)
self.agents_by_breed[agent_class][agent.unique_id] = agent
def remove(self, agent):
"""
Remove all instances of a given agent from the schedule.
"""
del self._agents[agent.unique_id]
agent_class = type(agent)
del self.agents_by_breed[agent_class][agent.unique_id]
def step(self, by_breed=True):
"""
Executes the step of each agent breed, one at a time, in random order.
Args:
by_breed: If True, run all agents of a single breed before running
the next one.
"""
if by_breed:
for agent_class in self.agents_by_breed:
self.step_breed(agent_class)
self.steps += 1
self.time += 1
else:
super().step()
def step_breed(self, breed):
"""
Shuffle order and run all agents of a given breed.
Args:
breed: Class object of the breed to run.
"""
agent_keys = list(self.agents_by_breed[breed].keys())
self.model.random.shuffle(agent_keys)
for agent_key in agent_keys:
self.agents_by_breed[breed][agent_key].step()
def get_breed_count(self, breed_class):
"""
Returns the current number of agents of certain breed in the queue.
"""
return len(self.agents_by_breed[breed_class].values())
|
py | 1a380a11868e10aaaf75e88802b01c4af4517c33 | "Miscellaneous utilities"
###########################################################################
# Copyright (C) 2008 William Stein <[email protected]> #
# Distributed under the terms of the GNU General Public License (GPL) #
# http://www.gnu.org/licenses/ #
###########################################################################
from sage.structure.sequence import Sequence
from sage.categories.fields import Fields
_Fields = Fields()
def composite_field(K, L):
"""
Return a canonical field that contains both $K$ and $L$, if possible.
Otherwise, raise a ValueError.
INPUT:
K -- field
L -- field
OUTPUT:
field
EXAMPLES:
sage: composite_field(QQ,QQbar)
doctest:...: DeprecationWarning: The function composite_field() is deprecated. Use get_coercion_model().common_parent() instead
See http://trac.sagemath.org/19415 for details.
Algebraic Field
sage: composite_field(QQ,QQ[sqrt(2)])
Number Field in sqrt2 with defining polynomial x^2 - 2
sage: composite_field(QQ,QQ)
Rational Field
sage: composite_field(QQ,GF(7))
Traceback (most recent call last):
...
ValueError: unable to find a common field
"""
from sage.misc.superseded import deprecation
deprecation(19415, "The function composite_field() is deprecated. Use get_coercion_model().common_parent() instead")
C = Sequence([K(0), L(0)]).universe()
if C not in _Fields:
raise ValueError("unable to find a common field")
return C
|
py | 1a380b7fba5c8a760e2b32d1f5c5ad94a9d5d6a4 | #coding: utf-8
class TestParam(object):
pass
class Test(object):
test_param = TestParam()
print "Test", id(Test)
print "TestParam", id(TestParam)
print "Test.TestParam", id(Test.test_param)
z = Test()
print "Test()", id(z)
print "Test().TestParam", id(z.test_param)
print "2Test().TestParam", id(Test().test_param) |
py | 1a380bcf286c6e563362ee589ce3500c52aa0dba | # Package: JSON Mode
# License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
# Developer: Luis Espino
import os
import json
path = 'data/json/'
dataPath = path + 'databases'
##################
# Databases CRUD #
##################
# CREATE a database checking their existence
def createDatabase(database: str) -> int:
try:
if not database.isidentifier():
raise Exception()
initCheck()
data = read(dataPath)
if database in data:
return 2
new = {database:{}}
data.update(new)
write(dataPath, data)
return 0
except:
return 1
# READ and show databases by constructing a list
def showDatabases() -> list:
try:
initCheck()
databases = []
data = read(dataPath)
for d in data:
databases.append(d);
return databases
except:
return []
# UPDATE and rename a database name by inserting new_key and deleting old_key
def alterDatabase(databaseOld: str, databaseNew) -> int:
try:
if not databaseOld.isidentifier() or not databaseNew.isidentifier():
raise Exception()
initCheck()
data = read(dataPath)
if not databaseOld in data:
return 2
if databaseNew in data:
return 3
data[databaseNew] = data[databaseOld]
data.pop(databaseOld)
write(dataPath, data)
return 0
except:
return 1
# DELETE a database by pop from dictionary
def dropDatabase(database: str) -> int:
try:
if not database.isidentifier():
raise Exception()
initCheck()
data = read(dataPath)
if not database in data:
return 2
data.pop(database)
write(dataPath, data)
return 0
except:
return 1
###############
# Tables CRUD #
###############
# CREATE a table checking their existence
def createTable(database: str, table: str, numberColumns: int) -> int:
try:
if not database.isidentifier() \
or not table.isidentifier() \
or not isinstance(numberColumns, int):
raise Exception()
initCheck()
data = read(dataPath)
if not database in data:
return 2
if table in data[database]:
return 3
new = {table:{"NCOL":numberColumns}}
data[database].update(new)
write(dataPath, data)
dataTable = {}
write(path+database+'-'+table, dataTable)
return 0
except:
return 1
# show databases by constructing a list
def showTables(database: str) -> list:
try:
initCheck()
tables = []
data = read(dataPath)
if not database in data:
return None
for d in data[database]:
tables.append(d);
return tables
except:
return []
# extract all register of a table
def extractTable(database: str, table: str) -> list:
try:
initCheck()
rows = []
data = read(dataPath)
if not database in data:
return None
if table not in data[database]:
return None
data = read(path+database+'-'+table)
for d in data:
rows.append(data[d]);
return rows
except:
return None
# extract a range registers of a table
def extractRangeTable(database: str, table: str, lower: any, upper: any) -> list:
initCheck()
rows = []
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return rows
else:
if table not in data[database]:
return rows
with open('data/json/'+database+'-'+table) as file:
data = json.load(file)
for d in data:
if (str(d)<=str(upper) and str(d)>=str(lower)):
rows.append(data[d]);
return rows
# Add a PK list to specific table and database
def alterAddPK(database: str, table: str, columns: list) -> int:
try:
if not database.isidentifier() \
or not table.isidentifier() \
or not isinstance(columns, list):
raise Exception()
initCheck()
data = read(dataPath)
if not database in data:
return 2
if not table in data[database]:
return 3
if "PKEY" in data[database][table]:
return 4
maxi = max(columns)
mini = min(columns)
if not (mini>=0 and maxi<data[database][table]["NCOL"]):
return 5
new = {"PKEY":columns}
data[database][table].update(new)
write(dataPath, data)
return 0
except:
return 1
# Add a PK list to specific table and database
def alterDropPK(database: str, table: str) -> int:
initCheck()
dump = False
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return 2
else:
if not table in data[database]:
return 3
if "PKEY" not in data[database][table]:
return 4
else:
data[database][table].pop("PKEY")
dump = True
if dump:
with open('data/json/databases', 'w') as file:
json.dump(data, file)
return 0
else:
return 1
# Rename a table name by inserting new_key and deleting old_key
def alterTable(database: str, tableOld: str, tableNew: str) -> int:
try:
if not database.isidentifier() \
or not tableOld.isidentifier() \
or not tableNew.isidentifier() :
raise Exception()
initCheck()
data = read(dataPath)
if not database in data:
return 2
if not tableOld in data[database]:
return 3
if tableNew in data[database]:
return 4
data[database][tableNew] = data[database][tableOld]
data[database].pop(tableOld)
write(dataPath, data)
return 0
except:
return 1
# add a column at the end of register with default value
def alterAddColumn(database: str, table: str, default: any) -> int:
initCheck()
dump = False
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return 2
else:
if not table in data[database]:
return 3
data[database][table]['NCOL']+=1
dump = True
if dump:
with open('data/json/databases', 'w') as file:
json.dump(data, file)
with open('data/json/'+database+'-'+table) as file:
data = json.load(file)
for d in data:
data[d].append(default)
with open('data/json/'+database+'-'+table, 'w') as file:
json.dump(data, file)
return 0
else:
return 1
# drop a column and its content (except primary key columns)
def alterDropColumn(database: str, table: str, columnNumber: int) -> int:
initCheck()
dump = False
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return 2
else:
if not table in data[database]:
return 3
ncol = data[database][table]['NCOL']
pkey = data[database][table]['PKEY']
if columnNumber in pkey:
return 4
if not ncol >len(pkey):
return 4
if columnNumber<0 or columnNumber>ncol-1:
return 5
data[database][table]['NCOL']-=1
dump = True
if dump:
with open('data/json/databases', 'w') as file:
json.dump(data, file)
with open('data/json/'+database+'-'+table) as file:
data = json.load(file)
for d in data:
data[d].pop(columnNumber)
with open('data/json/'+database+'-'+table, 'w') as file:
json.dump(data, file)
return 0
else:
return 1
# Delete a table name by inserting new_key and deleting old_key
def dropTable(database: str, table: str) -> int:
try:
if not database.isidentifier() \
or not table.isidentifier() :
raise Exception()
initCheck()
data = read(dataPath)
if not database in data:
return 2
if not table in data[database]:
return 3
data[database].pop(table)
write(dataPath,data)
return 0
except:
return 1
##################
# Registers CRUD #
##################
# CREATE or insert a register
def insert(database: str, table: str, register: list) -> int:
try:
if not database.isidentifier() \
or not table.isidentifier() \
or not isinstance(register, list):
raise Exception()
initCheck()
hide = False
ncol = None
pkey = None
pk = ""
data = read(dataPath)
if not database in data:
return 2
if table not in data[database]:
return 3
if len(register)!=data[database][table]["NCOL"]:
return 5
if "PKEY" not in data[database][table]:
# hidden pk
hide = True
else:
# defined pk
pkey = data[database][table]["PKEY"]
ncol = data[database][table]["NCOL"]
data = read(path+database+'-'+table)
if hide:
pk = len(data)
else:
for i in pkey:
pk += str(register[i])+'|'
if pk in data:
return 4
new = {pk:register}
data.update(new)
write(path+database+'-'+table, data)
return 0
except:
return 1
# READ or load a CSV file to a table
def loadCSV(filepath: str, database: str, table: str) -> list:
try:
res = []
import csv
with open(filepath, 'r') as file:
reader = csv.reader(file, delimiter = ',')
for row in reader:
res.append(insert(database,table,row))
return res
except:
return []
# READ or extract a register
def extractRow(database: str, table: str, columns: list) -> list:
initCheck()
hide = False
ncol = None
pkey = None
pk = ""
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return []
else:
if table not in data[database]:
return []
if "PKEY" not in data[database][table]:
# hidden pk
hide = True
else:
# defined pk
pkey = data[database][table]["PKEY"]
with open('data/json/'+database+'-'+table) as file:
data = json.load(file)
if hide:
pk = columns[0]
else:
for i in pkey:
pk += str(columns[i])
if not pk in data:
return []
else:
return data[pk]
# UPDATE a register
def update(database: str, table: str, register: dict, columns: list) -> int:
initCheck()
dump = False
hide = False
ncol = None
pkey = None
pk = ""
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return 2
else:
if table not in data[database]:
return 3
if "PKEY" not in data[database][table]:
# hidden pk
hide = True
else:
# defined pk
pkey = data[database][table]["PKEY"]
with open('data/json/'+database+'-'+table) as file:
data = json.load(file)
if hide:
pk = columns[0]
else:
for i in pkey:
pk += str(columns[i])
if not pk in data:
return 4
else:
for key in register:
data[pk][key] = register[key]
dump = True
if dump:
with open('data/json/'+database+'-'+table, 'w') as file:
json.dump(data, file)
return 0
else:
return 1
# DELETE a specific register
def delete(database: str, table: str, columns: list) -> int:
initCheck()
dump = False
hide = False
ncol = None
pkey = None
pk = ""
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return 2
else:
if table not in data[database]:
return 3
if "PKEY" not in data[database][table]:
# hidden pk
hide = True
else:
# defined pk
pkey = data[database][table]["PKEY"]
with open('data/json/'+database+'-'+table) as file:
data = json.load(file)
if hide:
pk = columns[0]
else:
for i in pkey:
pk += str(columns[i])
if not pk in data:
return 4
else:
data.pop(pk)
dump = True
if dump:
with open('data/json/'+database+'-'+table, 'w') as file:
json.dump(data, file)
return 0
else:
return 1
# DELETE or truncate all registers of the table
def truncate(database: str, table: str) -> int:
initCheck()
dump = False
hide = False
ncol = None
pkey = None
pk = ""
with open('data/json/databases') as file:
data = json.load(file)
if not database in data:
return 2
else:
if table not in data[database]:
return 3
dump = True
if dump:
data = {}
with open('data/json/'+database+'-'+table, 'w') as file:
json.dump(data, file)
return 0
else:
return 1
#############
# Utilities #
#############
# Check the existence of data and json folder and databases file
# Create databases files if not exists
def initCheck():
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.exists('data/json'):
os.makedirs('data/json')
if not os.path.exists('data/json/databases'):
data = {}
with open('data/json/databases', 'w') as file:
json.dump(data, file)
# Read a JSON file
def read(path: str) -> dict:
with open(path) as file:
return json.load(file)
# Write a JSON file
def write(path: str, data: dict):
with open(path, 'w') as file:
json.dump(data, file)
# Show the complete file of databases and tables
def showJSON(fileName: str):
initCheck()
with open('data/json/'+fileName) as file:
data = json.load(file)
print(data)
# Delete all databases and tables by creating a new file
def dropAll():
initCheck()
data = {}
with open('data/json/databases', 'w') as file:
json.dump(data, file)
# show all collection of relational data
def showCollection():
initCheck()
databases = []
tables = []
datatables = []
with open('data/json/databases') as file:
data = json.load(file)
for d in data:
databases.append(d);
for t in data[d]:
tables.append(t)
datatables.append(d+'-'+t)
print('Databases: '+str(databases))
print('Tables: '+str(tables))
for d in datatables:
registers = []
with open('data/json/'+d) as file:
data = json.load(file)
for r in data:
registers.append(r)
print(d+' pkeys: '+str(registers))
#------------verificar base y tabla jsonMode---------
def verificar_Json(database: str, table: str):
try:
if not database.isidentifier() \
or not table.isidentifier() :
raise Exception()
initCheck()
data = read(dataPath)
if not database in data:
return 2
if not table in data[database]:
return 3
return 0
except:
return 1
|
py | 1a380c03c03b43761f4cee04dcdecdb88183d9b9 | from flask import render_template, request
from . import main
from .. import db
from ..models import Subscribers
from ..email import welcome_message, notification_message
@main.app_errorhandler(404)
def notfound(error):
"""
Function to render the 404 error page
"""
if request.method == "POST":
new_sub = Subscribers(email = request.form.get("subscriber"))
db.session.add(new_sub)
db.session.commit()
welcome_message("Thank you for subscribing to the CM blog",
"email/welcome", new_sub.email)
return render_template("notfound.html"),404 |
py | 1a380e50ca3bca8dd571c3996db205548784aaa6 | import sys
from colour.utilities.deprecation import ModuleAPI, build_API_changes
from colour.utilities.documentation import is_documentation_building
from colour.hints import Any
from .primitives import MAPPING_PLANE_TO_AXIS, primitive_grid, primitive_cube
from .primitives import PRIMITIVE_METHODS, primitive
from .section import hull_section
from .vertices import (
primitive_vertices_quad_mpl,
primitive_vertices_grid_mpl,
primitive_vertices_cube_mpl,
primitive_vertices_sphere,
)
from .vertices import PRIMITIVE_VERTICES_METHODS, primitive_vertices
__all__ = [
"MAPPING_PLANE_TO_AXIS",
"primitive_grid",
"primitive_cube",
]
__all__ += [
"hull_section",
]
__all__ += [
"PRIMITIVE_METHODS",
"primitive",
]
__all__ += [
"primitive_vertices_quad_mpl",
"primitive_vertices_grid_mpl",
"primitive_vertices_cube_mpl",
"primitive_vertices_sphere",
]
__all__ += [
"PRIMITIVE_VERTICES_METHODS",
"primitive_vertices",
]
# ----------------------------------------------------------------------------#
# --- API Changes and Deprecation Management ---#
# ----------------------------------------------------------------------------#
class geometry(ModuleAPI):
"""Define a class acting like the *geometry* module."""
def __getattr__(self, attribute) -> Any:
"""Return the value from the attribute with given name."""
return super().__getattr__(attribute)
# v0.4.0
API_CHANGES = {
"ObjectRenamed": [
[
"colour.geometry.PLANE_TO_AXIS_MAPPING",
"colour.geometry.MAPPING_PLANE_TO_AXIS",
],
]
}
"""Defines the *colour.geometry* sub-package API changes."""
if not is_documentation_building():
sys.modules["colour.geometry"] = geometry( # type:ignore[assignment]
sys.modules["colour.geometry"], build_API_changes(API_CHANGES)
)
del ModuleAPI, is_documentation_building, build_API_changes, sys
|
py | 1a380eab6cd88a2abf0b1ce191d22770e1c7f968 | # -*- coding: utf-8 -*-
import hashlib
from flask import (render_template, g, session,
jsonify, request,redirect, flash)
from web.app import app
from web.model import (User, UserInfo,UserSetting,BasicUser,
AdvancedUser,FeedSite, Feed, Sub, ReadFeed)
@app.route("/api/pop-feedsite/sub", methods=["POST"])
def sub_pop_feedsite():
feedsiteid = request.form.get("feedsiteid")
feedsite = FeedSite.get_feedsite_by_id(feedsiteid)
if feedsite == None:
flash("add %s failed"%feedsite.title)
return jsonify(dict(rcode=404))
g.user.sub_feedsite(feedsite)
flash("add %s sucessfully"%feedsite.title)
return jsonify(dict(rcode=200))
@app.route("/api/pop-feedsite/<feedsiteid>/", methods=["GET","POST"])
def pop_feeds(feedsiteid=None):
if feedsiteid is None:
return jsonify(dict(rcode=404))
feeds = [feed.to_dict() for feed in Feed.objects(feedsite=feedsiteid).order_by("-create_date")[:15]]
return jsonify(dict(rcode=200, feeds=feeds))
|
py | 1a380eaee62fd6bab266b227545f2c739dba7b95 | # functions
div = lambda a, b: -(-a//b) if a<0 else a//b
def DFS(add, sub, mult, divi, cur, result):
'''
add, sub, mult, divi := 각 연산자의 남은 사용 가능 횟수
cur := 현재 연산에 사용되어야 할 수열 숫자의 index
result := 현재까지의 계산 결과
'''
global result_max
global result_min
stack = [(add, sub, mult, divi, cur, result)]
while stack:
add, sub, mult, divi, cur, result = stack.pop()
if add + sub + mult + divi == 0: # 모든 연산자 소진
result_max = max(result_max, result)
result_min = min(result_min, result)
continue
if add > 0:
stack.append((add - 1, sub, mult, divi, cur + 1, result + A[cur]))
if sub > 0:
stack.append((add, sub - 1, mult, divi, cur + 1, result - A[cur]))
if mult > 0:
stack.append((add, sub, mult - 1, divi, cur + 1, result * A[cur]))
if divi > 0:
stack.append((add, sub, mult, divi - 1, cur + 1, div(result, A[cur])))
# input
N = int(input())
A = tuple(map(int, input().split()))
add, sub, mult, divi = map(int, input().split())
# process
'''
DFS를 이용한 완전탐색
'''
result_max = -1000000001
result_min = 1000000001
DFS(add, sub, mult, divi, 1, A[0])
# output
print(result_max)
print(result_min) |
py | 1a380ed2dcf93f5d8721f75aae3bddef243ed234 | """
Test address breakpoints set with shared library of SBAddress work correctly.
"""
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class AddressBreakpointTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_address_breakpoints(self):
"""Test address breakpoints set with shared library of SBAddress work correctly."""
self.build()
self.address_breakpoints()
def address_breakpoints(self):
"""Test address breakpoints set with shared library of SBAddress work correctly."""
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by name 'c'.
breakpoint = target.BreakpointCreateBySourceRegex(
"Set a breakpoint here", lldb.SBFileSpec("main.c"))
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() >= 1,
VALID_BREAKPOINT)
# Get the breakpoint location from breakpoint after we verified that,
# indeed, it has one location.
location = breakpoint.GetLocationAtIndex(0)
self.assertTrue(location and
location.IsEnabled(),
VALID_BREAKPOINT_LOCATION)
# Next get the address from the location, and create an address breakpoint using
# that address:
address = location.GetAddress()
target.BreakpointDelete(breakpoint.GetID())
breakpoint = target.BreakpointCreateBySBAddress(address)
# Disable ASLR. This will allow us to actually test (on platforms that support this flag)
# that the breakpoint was able to track the module.
launch_info = lldb.SBLaunchInfo(None)
flags = launch_info.GetLaunchFlags()
flags &= ~lldb.eLaunchFlagDisableASLR
launch_info.SetLaunchFlags(flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
self.assertTrue(process, PROCESS_IS_VALID)
# Did we hit our breakpoint?
from lldbsuite.test.lldbutil import get_threads_stopped_at_breakpoint
threads = get_threads_stopped_at_breakpoint(process, breakpoint)
self.assertTrue(
len(threads) == 1,
"There should be a thread stopped at our breakpoint")
# The hit count for the breakpoint should be 1.
self.assertEquals(breakpoint.GetHitCount(), 1)
process.Kill()
# Now re-launch and see that we hit the breakpoint again:
launch_info.Clear()
launch_info.SetLaunchFlags(flags)
process = target.Launch(launch_info, error)
self.assertTrue(process, PROCESS_IS_VALID)
thread = get_threads_stopped_at_breakpoint(process, breakpoint)
self.assertTrue(
len(threads) == 1,
"There should be a thread stopped at our breakpoint")
# The hit count for the breakpoint should now be 2.
self.assertEquals(breakpoint.GetHitCount(), 2)
|
py | 1a380f3ded84fa7156c639124f8b01b759cbf1b5 | a = 1
b = 2
c = sum(a, b) # код нерабочий!
print(c)
|
py | 1a3810c567e12ae5e5b5ab07d5645d6cf66bc226 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+=@v!1d50k+c9py@x_mk*^9yc*24sc&rrst!j2a*ip%1lr450('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User' |
py | 1a3810dc293218eac7cf942056045fb873b83982 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Policy Settings
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# Third-party modules
import six
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import (
StringField,
ReferenceField,
ListField,
EmbeddedDocumentField,
BooleanField,
)
# NOC modules
from .validationpolicy import ValidationPolicy
@six.python_2_unicode_compatible
class ValidationPolicyItem(EmbeddedDocument):
policy = ReferenceField(ValidationPolicy)
is_active = BooleanField(default=True)
def __str__(self):
return self.policy.name
@six.python_2_unicode_compatible
class ValidationPolicySettings(Document):
meta = {
"collection": "noc.validationpolicysettings",
"strict": False,
"auto_create_index": False,
"indexes": [("model_id", "object_id")],
}
model_id = StringField()
object_id = StringField()
policies = ListField(EmbeddedDocumentField(ValidationPolicyItem))
def __str__(self):
return "%s: %s" % (self.model_id, self.object_id)
|
py | 1a3812c290da729451c1d6995827c0e9e344ab69 | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_compute_volume_attachment
short_description: Manage a VolumeAttachment resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a VolumeAttachment resource in Oracle Cloud Infrastructure
- For I(state=present), attaches the specified storage volume to the specified instance.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
device:
description:
- The device name. To retrieve a list of devices for a given instance, see L(ListInstanceDevices,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/Device/ListInstanceDevices).
type: str
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
aliases: ["name"]
instance_id:
description:
- The OCID of the instance.
- Required for create using I(state=present).
type: str
is_read_only:
description:
- Whether the attachment was created in read-only mode.
type: bool
is_shareable:
description:
- Whether the attachment should be created in shareable mode. If an attachment
is created in shareable mode, then other instances can attach the same volume, provided
that they also create their attachments in shareable mode. Only certain volume types can
be attached in shareable mode. Defaults to false if not specified.
type: bool
type:
description:
- "The type of volume. The only supported values are \\"iscsi\\" and \\"paravirtualized\\"."
- Required for create using I(state=present).
type: str
choices:
- "service_determined"
- "emulated"
- "iscsi"
- "paravirtualized"
volume_id:
description:
- The OCID of the volume.
- Required for create using I(state=present).
type: str
use_chap:
description:
- Whether to use CHAP authentication for the volume attachment. Defaults to false.
- Applicable when type is 'iscsi'
type: bool
encryption_in_transit_type:
description:
- Refer the top-level definition of encryptionInTransitType.
The default value is NONE.
- Applicable when type is 'iscsi'
type: str
choices:
- "NONE"
- "BM_ENCRYPTION_IN_TRANSIT"
is_pv_encryption_in_transit_enabled:
description:
- Whether to enable in-transit encryption for the data volume's paravirtualized attachment. The default value is false.
- Applicable when type is 'paravirtualized'
type: bool
volume_attachment_id:
description:
- The OCID of the volume attachment.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
iscsi_login_state:
description:
- The iscsi login state of the volume attachment. For a multipath volume attachment,
all iscsi sessions need to be all logged-in or logged-out to be in logged-in or logged-out state.
- This parameter is updatable.
type: str
choices:
- "UNKNOWN"
- "LOGGING_IN"
- "LOGIN_SUCCEEDED"
- "LOGIN_FAILED"
- "LOGGING_OUT"
- "LOGOUT_SUCCEEDED"
- "LOGOUT_FAILED"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
state:
description:
- The state of the VolumeAttachment.
- Use I(state=present) to create or update a VolumeAttachment.
- Use I(state=absent) to delete a VolumeAttachment.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create volume_attachment with type = service_determined
oci_compute_volume_attachment:
# required
instance_id: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
type: service_determined
volume_id: "ocid1.volume.oc1..xxxxxxEXAMPLExxxxxx"
# optional
device: device_example
display_name: display_name_example
is_read_only: true
is_shareable: true
- name: Create volume_attachment with type = emulated
oci_compute_volume_attachment:
# required
instance_id: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
type: emulated
volume_id: "ocid1.volume.oc1..xxxxxxEXAMPLExxxxxx"
# optional
device: device_example
display_name: display_name_example
is_read_only: true
is_shareable: true
- name: Create volume_attachment with type = iscsi
oci_compute_volume_attachment:
# required
instance_id: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
type: iscsi
volume_id: "ocid1.volume.oc1..xxxxxxEXAMPLExxxxxx"
# optional
device: device_example
display_name: display_name_example
is_read_only: true
is_shareable: true
use_chap: true
encryption_in_transit_type: NONE
- name: Create volume_attachment with type = paravirtualized
oci_compute_volume_attachment:
# required
instance_id: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
type: paravirtualized
volume_id: "ocid1.volume.oc1..xxxxxxEXAMPLExxxxxx"
# optional
device: device_example
display_name: display_name_example
is_read_only: true
is_shareable: true
is_pv_encryption_in_transit_enabled: true
- name: Update volume_attachment
oci_compute_volume_attachment:
# required
volume_attachment_id: "ocid1.volumeattachment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
iscsi_login_state: UNKNOWN
- name: Update volume_attachment using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_compute_volume_attachment:
# required
display_name: display_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
iscsi_login_state: UNKNOWN
- name: Delete volume_attachment
oci_compute_volume_attachment:
# required
volume_attachment_id: "ocid1.volumeattachment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete volume_attachment using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_compute_volume_attachment:
# required
display_name: display_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
"""
RETURN = """
volume_attachment:
description:
- Details of the VolumeAttachment resource acted upon by the current operation
returned: on success
type: complex
contains:
attachment_type:
description:
- The type of volume attachment.
returned: on success
type: str
sample: emulated
availability_domain:
description:
- The availability domain of an instance.
- "Example: `Uocm:PHX-AD-1`"
returned: on success
type: str
sample: Uocm:PHX-AD-1
compartment_id:
description:
- The OCID of the compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
device:
description:
- The device name.
returned: on success
type: str
sample: device_example
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
id:
description:
- The OCID of the volume attachment.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
instance_id:
description:
- The OCID of the instance the volume is attached to.
returned: on success
type: str
sample: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
is_read_only:
description:
- Whether the attachment was created in read-only mode.
returned: on success
type: bool
sample: true
is_shareable:
description:
- Whether the attachment should be created in shareable mode. If an attachment
is created in shareable mode, then other instances can attach the same volume, provided
that they also create their attachments in shareable mode. Only certain volume types can
be attached in shareable mode. Defaults to false if not specified.
returned: on success
type: bool
sample: true
lifecycle_state:
description:
- The current state of the volume attachment.
returned: on success
type: str
sample: ATTACHING
time_created:
description:
- The date and time the volume was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
volume_id:
description:
- The OCID of the volume.
returned: on success
type: str
sample: "ocid1.volume.oc1..xxxxxxEXAMPLExxxxxx"
is_pv_encryption_in_transit_enabled:
description:
- Whether in-transit encryption for the data volume's paravirtualized attachment is enabled or not.
returned: on success
type: bool
sample: true
is_multipath:
description:
- Whether the Iscsi or Paravirtualized attachment is multipath or not, it is not applicable to NVMe attachment.
returned: on success
type: bool
sample: true
iscsi_login_state:
description:
- The iscsi login state of the volume attachment. For a Iscsi volume attachment,
all iscsi sessions need to be all logged-in or logged-out to be in logged-in or logged-out state.
returned: on success
type: str
sample: UNKNOWN
chap_secret:
description:
- "The Challenge-Handshake-Authentication-Protocol (CHAP) secret
valid for the associated CHAP user name.
(Also called the \\"CHAP password\\".)"
returned: on success
type: str
sample: chap_secret_example
chap_username:
description:
- The volume's system-generated Challenge-Handshake-Authentication-Protocol
(CHAP) user name. See L(RFC 1994,https://tools.ietf.org/html/rfc1994) for more on CHAP.
- "Example: `ocid1.volume.oc1.phx.<unique_ID>`"
returned: on success
type: str
sample: chap_username_example
ipv4:
description:
- The volume's iSCSI IP address.
- "Example: `169.254.0.2`"
returned: on success
type: str
sample: ipv4_example
iqn:
description:
- The target volume's iSCSI Qualified Name in the format defined
by L(RFC 3720,https://tools.ietf.org/html/rfc3720#page-32).
- "Example: `iqn.2015-12.us.oracle.com:<CHAP_username>`"
returned: on success
type: str
sample: iqn_example
port:
description:
- The volume's iSCSI port, usually port 860 or 3260.
- "Example: `3260`"
returned: on success
type: int
sample: 56
multipath_devices:
description:
- A list of secondary multipath devices
returned: on success
type: complex
contains:
ipv4:
description:
- The volume's iSCSI IP address.
- "Example: `169.254.2.2`"
returned: on success
type: str
sample: ipv4_example
iqn:
description:
- The target volume's iSCSI Qualified Name in the format defined
by L(RFC 3720,https://tools.ietf.org/html/rfc3720#page-32).
- "Example: `iqn.2015-12.com.oracleiaas:40b7ee03-883f-46c6-a951-63d2841d2195`"
returned: on success
type: str
sample: iqn_example
port:
description:
- The volume's iSCSI port, usually port 860 or 3260.
- "Example: `3260`"
returned: on success
type: int
sample: 56
encryption_in_transit_type:
description:
- Refer the top-level definition of encryptionInTransitType.
The default value is NONE.
returned: on success
type: str
sample: NONE
iscsi_attach_commands:
description:
- Commands to attach the iSCSI block volume. Empty if attachment_type is not iscsi.
returned: on success
type: list
sample: [ "sudo iscsiadm -m node -o new -T IQN -p IP:PORT", "sudo iscsiadm -m node -o update ..." ]
iscsi_detach_commands:
description:
- Commands to detach the iSCSI block volume. Empty if attachment_type is not iscsi.
returned: on success
type: list
sample: [ "sudo iscsiadm -m node -T IQN -p IP:PORT -u", "sudo iscsiadm -m node -o delete -T IQN" ]
sample: {
"attachment_type": "emulated",
"availability_domain": "Uocm:PHX-AD-1",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"device": "device_example",
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"instance_id": "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx",
"is_read_only": true,
"is_shareable": true,
"lifecycle_state": "ATTACHING",
"time_created": "2013-10-20T19:20:30+01:00",
"volume_id": "ocid1.volume.oc1..xxxxxxEXAMPLExxxxxx",
"is_pv_encryption_in_transit_enabled": true,
"is_multipath": true,
"iscsi_login_state": "UNKNOWN",
"chap_secret": "chap_secret_example",
"chap_username": "chap_username_example",
"ipv4": "ipv4_example",
"iqn": "iqn_example",
"port": 56,
"multipath_devices": [{
"ipv4": "ipv4_example",
"iqn": "iqn_example",
"port": 56
}],
"encryption_in_transit_type": "NONE",
"iscsi_attach_commands": [ "sudo iscsiadm -m node -o new -T IQN -p IP:PORT", "sudo iscsiadm -m node -o update ..." ],
"iscsi_detach_commands": [ "sudo iscsiadm -m node -T IQN -p IP:PORT -u", "sudo iscsiadm -m node -o delete -T IQN" ]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.core import ComputeClient
from oci.core.models import AttachVolumeDetails
from oci.core.models import UpdateVolumeAttachmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class VolumeAttachmentHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_possible_entity_types(self):
return super(VolumeAttachmentHelperGen, self).get_possible_entity_types() + [
"volumeattachment",
"volumeattachments",
"corevolumeattachment",
"corevolumeattachments",
"volumeattachmentresource",
"volumeattachmentsresource",
"core",
]
def get_module_resource_id_param(self):
return "volume_attachment_id"
def get_module_resource_id(self):
return self.module.params.get("volume_attachment_id")
def get_get_fn(self):
return self.client.get_volume_attachment
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_volume_attachment,
volume_attachment_id=self.module.params.get("volume_attachment_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["instance_id", "volume_id"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_volume_attachments, **kwargs
)
def get_create_model_class(self):
return AttachVolumeDetails
def get_exclude_attributes(self):
return ["type", "use_chap"]
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.attach_volume,
call_fn_args=(),
call_fn_kwargs=dict(attach_volume_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateVolumeAttachmentDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_volume_attachment,
call_fn_args=(),
call_fn_kwargs=dict(
volume_attachment_id=self.module.params.get("volume_attachment_id"),
update_volume_attachment_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.detach_volume,
call_fn_args=(),
call_fn_kwargs=dict(
volume_attachment_id=self.module.params.get("volume_attachment_id"),
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
VolumeAttachmentHelperCustom = get_custom_class("VolumeAttachmentHelperCustom")
class ResourceHelper(VolumeAttachmentHelperCustom, VolumeAttachmentHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
device=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
instance_id=dict(type="str"),
is_read_only=dict(type="bool"),
is_shareable=dict(type="bool"),
type=dict(
type="str",
choices=["service_determined", "emulated", "iscsi", "paravirtualized"],
),
volume_id=dict(type="str"),
use_chap=dict(type="bool"),
encryption_in_transit_type=dict(
type="str", choices=["NONE", "BM_ENCRYPTION_IN_TRANSIT"]
),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
volume_attachment_id=dict(aliases=["id"], type="str"),
iscsi_login_state=dict(
type="str",
choices=[
"UNKNOWN",
"LOGGING_IN",
"LOGIN_SUCCEEDED",
"LOGIN_FAILED",
"LOGGING_OUT",
"LOGOUT_SUCCEEDED",
"LOGOUT_FAILED",
],
),
compartment_id=dict(type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="volume_attachment",
service_client_class=ComputeClient,
namespace="core",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
py | 1a3812c4a9fb12cd7e5d152c2a8e7c10a9557da3 | #!/usr/bin/env python3
all_brivla = [ ("tavla", ["speaker", "listener", "subject", "language"])
, ("dunda", ["donor", "gift", "recipient"])
, ("ctuca", ["instructor", "audience/student(s)", "ideas/methods", "subject", "teaching method"])
, ("citka", ["consumer", "aliment"])
, ("ciska", ["writer", "text/symbols", "display/storage medium", "writing implement"])
, ("klama", ["traveler", "destination", "origin", "route", "means/vehicle"])
, ("bridi", ["predicate relationship", "relation", "arguments"])
, ("djuno", ["knower", "facts", "subject", "epistemology"])
, ("nupre", ["promisor", "promise", "beneficiary/victim"])
, ("cusku", ["expresser", "message", "audience", "expressive medium"])
, ("cizra", ["strange thing", "viewpoint holder", "property"])
, ("cmene", ["name/title", "name posessor", "name-giver/name-user"])
, ("cusku", ["agent", "expressed idea", "audience", "expressive medium"])
, ("djica", ["desirer", "event/state", "purpose"])
, ("gleki", ["happy entity", "event/state"])
, ("jimpe", ["understander", "fact/truth", "subject"])
, ("klama", ["traveler", "destination", "origin", "route", "means/vehicle"])
, ("mutce", ["much/extreme thing", "property", "extreme/direction"])
, ("nelci", ["liker", "object/state"])
, ("pilno", ["user", "instrument", "purpose"])
, ("sipna", ["asleep entity"])
, ("xamgu", ["good object/event", "beneficiary", "standard"])
, ("zgana", ["observer", "observed", "senses/means", "conditions"])
, ("bangu", ["language/dialect", "language user", "communicated idea"])
, ("cliva", ["agent", "point of departure", "route"])
, ("finti", ["inventor/composer", "invention", "purpose", "existing elements/ideas"])
, ("gunka", ["worker", "activity", "goal"])
, ("jundi", ["attentive entity", "object/affair"])
, ("kakne", ["capable entity", "capability", "conditions"])
, ("tcidu", ["reader", "text", "reading material"])
, ("valsi", ["word", "meaning", "language"])
, ("zvati", ["atendee/event", "location"])
, ("cinri", ["interesting abstraction", "interested entity"])
, ("drata", ["entity #1", "entity #2", "standard"])
, ("simsa", ["entity #1", "entity #2", "property/quantity"])
, ("klaku", ["crier", "tears", "reason"])
, ("melbi", ["beautiful entity", "viewpoint holder", "aspect", "aesthetic standard"])
, ("smuni", ["meaning/interpretation", "expression", "opinion holder"])
, ("vecnu", ["seller", "goods/service", "buyer", "price"])
, ("plise", ["apple", "species/strain"])
, ("prenu", ["person"])
, ("cilre", ["learner", "facts", "subject", "source", "method"])
, ("cnino", ["new entity", "observer", "feature", "standard"])
, ("drani", ["correct thing", "property", "situation", "standard"])
, ("fanva", ["translator", "text/utterance", "target language", "source language", "translation result"])
, ("gasnu", ["agent", "event"])
, ("kelci", ["player", "toy"])
, ("milxe", ["mild thing", "property"])
, ("mlatu", ["cat", "species/breed"])
, ("nitcu", ["needing entity", "necessity", "purpose"])
, ("pendo", ["friendly entity", "friendliness experiencer"])
, ("pensi", ["thinking entity", "subject/concept"])
, ("skami", ["computer", "purpose"])
, ("slabu", ["familiar/old thing", "observer", "feature", "standard"])
, ("troci", ["trier", "attempted event/state/property", "actions/method"])
, ("zdani", ["house", "owner/user"])
]
def encode_text(text):
if "#" in text:
return "\"%s\"" % text
else:
return text
for brivla in all_brivla:
word = brivla[0]
places = brivla[1]
x1, x2, x3, x4, x5 = [None, None, None, None, None]
try:
x1 = encode_text(places[0])
x2 = encode_text(places[1])
x3 = encode_text(places[2])
x4 = encode_text(places[3])
x5 = encode_text(places[4])
except:
pass
print("%s:" % word)
if x1:
print(" x1: %s" % x1)
if x2:
print(" x2: %s" % x2)
if x3:
print(" x3: %s" % x3)
if x4:
print(" x4: %s" % x4)
if x5:
print(" x5: %s" % x5)
print()
|
py | 1a38134b59aa58ef4763449495f62517fa3e414d | from enum import Enum
class LoggerPrefix(Enum):
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
|
py | 1a38137659b2e9b07d344f9256ab4e65ad15b789 | """Assets Browser Functions"""
import logging
import os
import sys
from PyQt5 import QtWidgets
logger = logging.getLogger(__name__)
def clear_layout(layout):
"""Clear layout?"""
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
def show_cwd():
"""Show CWD (Current Work Directory) as a QMessageBox."""
widget = QtWidgets.QWidget()
cwd = os.getcwd()
QtWidgets.QMessageBox.information(widget, "Information", cwd)
def close_app():
"""Terminate/Close App."""
sys.exit()
def restart_app():
"""Restart App.
99% it doesn't restart in an IDE like PyCharm for complex script but
it has been tested to work when execute through Python interpreter.
"""
# QtWidgets.qApp.exit(-123)
os.execv(sys.executable, [sys.executable] + sys.argv)
def ham():
"""When testing or in doubt, it's HAM time!"""
print('HAM! HAM! HAM!')
|
py | 1a381385a893515a46968d588bca8acfc2f349ed | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Taken and modified for fairscale from:
# https://github.com/facebookresearch/fairscale/blob/main/fairscale/nn/data_parallel/sharded_ddp.py
#Commit: 8acbec718f3c70a6b9785470bb9e05cd84fc3f8e
import os
import contextlib
import logging
import time
import functools
import numpy as np
from itertools import chain
from functools import reduce
from collections import deque
from types import MethodType
import paddle
from paddle import nn
import paddle.distributed as dist
from paddle.distributed.collective import _get_global_group
from ...utils.internal_storage import GradStorage
from ...meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2
from .sharding_utils import Taskflow, Type
def _trainable(param):
return param.trainable
class ShardingStage2(nn.Layer):
"""
A wrapper for Sharding Stage2 Layer in Dygraph.
.. warning: ShardingStage2 encapsulates the layer strategy and integrates it into the nn.Layer.
.. ZeRO: https://arxiv.org/pdf/1910.02054.pdf.
"""
# TODO (Baibaifan)
# Feature Notes::
# 1. Unified memory for param and param.grad to InternalStorage.
# 2. Divide param.grad according to rank to centrally apply for and release GPU memory.
# 3. Dynamically adjust training parameters and models。
# 4. Support offload function.
# 5. Support the establishment of independent communication groups.
def __init__(
self,
layer,
sharding_optimizer,
group=None,
sync_buffers=False,
pertrain_sync_models=True,
buffer_max_size=2**23, #8MB
auto_refresh_trainable=True,
device="gpu",
use_grad_storage=True,
accumulate_grads=False):
super().__init__()
# training options
self._layer = layer
self._sharding_optimizers = [sharding_optimizer] if not isinstance(
sharding_optimizer, list) else sharding_optimizer
assert all(
list(
map(lambda opt: isinstance(opt, ShardingOptimizerStage2),
self._sharding_optimizers))
), "Please use ShardingOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable
# Gradient accumulation, Gradient flip
self._accumulate_grads = accumulate_grads
# Communication related attributes
self._group = group
group = _get_global_group() if group is None else group
self._world_size_scaling = 1.0 / group.nranks
assert group.nranks > 1, "Training must be distributed, ranks must be greater than 1"
self._rank = group.rank
self._global_root_rank = 0 # picking rank 0 as the reference
self._default_device = device
# Global statistical parameters
self._all_params = list(
chain(*[optim.local_params for optim in self._sharding_optimizers]))
self._trainable_params = []
self._grad_reduced = []
self._trainable_param2rank = {}
self._trainable_param2align = {}
self._trainable_mask = list(map(_trainable, self._all_params))
self._param_grads = []
# Set grad storage size & Display param sizes and model sizes
model_size = sum(
[np.prod(p.shape) for p in self._layer.parameters()]).item()
self._buffer_max_size = self._rank_buffer_size(buffer_max_size,
model_size)
self._use_grad_storage = use_grad_storage
self._grad_storages = {} # {dtype: {rank: GradStorage}}
self._has_grad_storage = []
self._grad_storage_list = []
# Offload
# TODO(haohongxiang): Now it's not be supported for multi-optimizers using Offload strategy
self._offload_optims = list(
filter(lambda optim: optim.offload, self._sharding_optimizers))
if len(self._offload_optims) > 0:
assert len(
self._sharding_optimizers
) == 1, "Only support offload strategy for single optimizer"
self._offload = self._sharding_optimizers[0].offload
self._offload_device = "cpu"
# Set backward pass hooks
self._bw_hooks = []
# Synchronous all ranks models
if pertrain_sync_models:
self._sync_params_and_buffers()
# Set tasks flow
self._tasks_flow = deque()
# Define optimizer step and clear_grad
if self._accumulate_grads:
self._redefine_opt_step()
self._redefine_opt_clear()
def forward(self, *inputs, **kwargs):
"""
A wrapper for Sharding Stage2 layer.
- Fresh trainable params or rebuild grad storage
- Sync layer's buffer params
- Clear all flags states
- Forward for origin layers
"""
# Whether to need to reset trainable parameters
needs_fresh = len(self._bw_hooks) == 0 and self.training
if self._auto_refresh_trainable:
needs_fresh |= self._detect_train_change()
# Front hook
self._init_internal_storage(needs_fresh)
# Sync layer's buffers state
if self._sync_buffers:
self.__sync_buffers()
# Normal FW on the base model
fw = self._layer(*inputs, **kwargs)
return fw
def _clear_gradients(self):
"""
Set zero to the gradient of the optimizer's current rank trainable parameters.
"""
# Release grad storages
for dtype in self._grad_storages.keys():
if self._rank in self._grad_storages[dtype].keys():
if not self._offload:
self._grad_storages[dtype][self._rank].buffer.zero_()
# Release params
for param in self._trainable_params:
if param.name in self._param_grads and param.grad is not None:
param.clear_gradient()
def _grad_scale(self):
"""
Before the gradient accumulation, scale the gradient.
"""
if self._offload:
for param in self._trainable_params:
if param.name in self._sharding_optimizers[
0]._master_params.keys():
self._sharding_optimizers[0]._master_params[
param.name].grad.scale_(scale=self._world_size_scaling)
else:
# Scale grad storages
for dtype in self._grad_storages.keys():
if self._rank in self._grad_storages[dtype].keys():
self._grad_storages[dtype][self._rank].buffer.scale_(
scale=self._world_size_scaling)
# Scale params
for param in self._trainable_params:
if param.name in self._param_grads and param.grad is not None:
param.grad.scale_(scale=self._world_size_scaling)
param._reset_grad_inplace_version(True)
def _init_internal_storage(self, needs_fresh):
"""
Judge Fresh trainable params or rebuild grad storage.
"""
if needs_fresh:
self._fresh_trainable()
else:
self._build_grad_storages()
# Clear all flags state
self._clear_counters()
def to(self, device=None, dtype=None, blocking=True):
"""
Synchronously or asynchronously convert the data type of the layer, the device is not supported now.
"""
assert isinstance(device, str), "Device must be type str"
assert device == self._default_device, "New devices are not supported, because of the optimizer state is not sync"
self._layer.to(device=device, dtype=dtype, blocking=blocking)
# Re-build the buckets, hooks, etc..
self._fresh_trainable()
def _fresh_trainable(self):
""" Whether to update training parameters. """
# Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance)
if reduce(lambda x, y: x or y, self._grad_reduced, False):
logging.warning("Grads waiting to be reduced.")
self._trainable_params = list(
filter(lambda x: x.trainable, self._all_params))
self._trainable_params.sort(key=lambda x: np.prod(x.shape))
self._trainable_param2rank = {}
for optim in self._sharding_optimizers:
# Need to be wrappered for Sharding Stage2 Optimizer
if len(optim.param_storages.keys()) == 0:
optim.update_opt_status()
# Get the parameters split by the optimizer according to rank
for per_rank_params in optim.dtype_rank_params.values(
): # all the params from all ranks
for params in per_rank_params:
for param in filter(lambda x: x.trainable, params):
self._trainable_param2rank[
param.name] = optim.param2rank[param.name]
self._trainable_param2align[
param.name] = optim._param2align[param.name]
self._setup_use_grad_storage()
# wait next func hook support
self._setup_backward_hooks()
@paddle.no_grad()
def __sync_buffers(self):
"""
Sync all the param buffers from all ranks (exp: batch norm statistics).
"""
for buffer in self._layer.buffers(include_sublayers=True):
dist.broadcast(
buffer,
self._global_root_rank,
self._group,
use_calc_stream=True)
# Multi stream operation will be supported later
dist.wait(tensor=buffer, group=self._group, use_calc_stream=True)
def __getattr__(self, name):
"""Forward missing attributes to wrapped layer."""
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self._layer, name)
@paddle.no_grad()
def _clear_counters(self):
"""Reset all the grad reduce and call counters."""
if self.training:
self._grad_reduced = [True for _ in self._trainable_params]
if self._use_grad_storage:
for grad_storage in self._grad_storage_list:
grad_storage.reset_checked_in()
def _get_reduce_fn(self, index, param, dst_rank):
"""
There are two ways to reduce gradient.
- 1. Do not use use_grad_storage or exceeded buffer_max_size will be reduced separately.
- 2. Use grad_storage Reduce the storage to get the full gradient from different ranks.
"""
if not self._use_grad_storage or not self._has_grad_storage[index]:
# Direct reduction
@paddle.no_grad()
def reduce(*_):
# Skip gradient reduction, do not change status information
if self._grad_reduced[index]:
assert param.grad is not None, "Parameter gradient cannot be None"
# Change reduce information
self._grad_reduced[index] = False
if not self._accumulate_grads:
param.grad.scale_(scale=self._world_size_scaling)
param._reset_grad_inplace_version(True)
# Clear the gradient that does not belong to the current rank through the callback function
def cleanup():
if dst_rank != self._rank:
param.clear_gradient(False)
elif self._offload:
self._sharding_optimizers[0]._master_params[
param.name]._copy_gradient_from(param.grad.cpu(
).cast(dtype=Type.fp32.value))
param.clear_gradient(False)
# Synchronize the reduce parameter gradient
self._tasks_flow.append(
Taskflow(
task=dist.reduce(
tensor=param.grad,
dst=dst_rank,
group=self._group,
use_calc_stream=True),
callback=cleanup))
# Multi stream operation will be supported later
dist.wait(
tensor=param.grad,
group=self._group,
use_calc_stream=True)
# Clear the task flow and trigger callback to clear the redundant gradient
self._clear_task_flow()
else:
# Buffer reduction
@paddle.no_grad()
def reduce(*_):
# Skip gradient reduction, do not change status information
if self._grad_reduced[index]:
assert param.grad is not None, "Parameter gradient cannot be None"
# Change reduce information
self._grad_reduced[index] = False
grad_storage = self._grad_storages[param.dtype][dst_rank]
grad_storage.params_checked_in += 1
if grad_storage.all_checked_in:
assert grad_storage.buffer is not None
# Normalize all ranks grad_storage
if not self._accumulate_grads:
grad_storage.buffer.scale_(
scale=self._world_size_scaling)
# Clearing up the grad_storage buffer
def cleanup():
if dst_rank != self._rank:
for p in grad_storage._params:
p.clear_gradient(False)
p._gradient_set_empty(False)
grad_storage.buffer.value().get_tensor()._clear(
)
elif self._offload:
grad_storage.to(device=self._offload_device)
for param in grad_storage._params:
self._sharding_optimizers[0]._master_params[
param.name]._copy_gradient_from(
param.grad.cast(
dtype=Type.fp32.value))
grad_storage.buffer.value().get_tensor()._clear(
)
# Reduce the bucket
grad_storage.sent = True
self._tasks_flow.append(
Taskflow(
task=dist.reduce(
tensor=grad_storage.buffer,
dst=grad_storage.destination,
group=self._group,
use_calc_stream=True),
callback=cleanup))
# Multi stream operation will be supported later
dist.wait(
tensor=grad_storage.buffer,
group=self._group,
use_calc_stream=True)
# Clear the task flow and trigger callback to clear the redundant gradient
self._clear_task_flow()
return reduce
def _setup_backward_hooks(self):
"""
Set the backward hook to synchronize the gradients of all rank by reduce group ranks.
"""
# Remove previous backward hooks
while len(self._bw_hooks) > 0:
self._bw_hooks.pop().remove()
# Go through the parameters, attach the hook
if not self.training:
return
for index, param in enumerate(self._trainable_params):
dst_rank = self._trainable_param2rank[param.name]
reduce_function = self._get_reduce_fn(index, param, dst_rank)
self._bw_hooks.append(
param._register_backward_hook(reduce_function))
@paddle.no_grad()
def _sync_params_and_buffers(self):
"""
Sync all model states for all ranks
"""
for t in self._layer.parameters():
dist.broadcast(
t,
src=self._global_root_rank,
group=self._group,
use_calc_stream=True)
# Multi stream operation will be supported later
dist.wait(tensor=t, group=self._group, use_calc_stream=True)
def _setup_use_grad_storage(self):
"""
Integrate the parameters gradient into a continuous memory according to rank, and support the update of training parameters.
"""
if not self._use_grad_storage:
return
# According to parameters's numel sort, allocate memory of parameter gradient to continuous memory according to rank
self._grad_storages = {}
self._has_grad_storage = [False for _ in self._trainable_params]
for index, param in enumerate(self._trainable_params):
dst_rank = self._trainable_param2rank[param.name]
if param.dtype not in self._grad_storages.keys():
self._grad_storages[param.dtype] = {}
if dst_rank not in self._grad_storages[param.dtype].keys():
self._grad_storages[param.dtype][dst_rank] = GradStorage(
self._buffer_max_size[param.dtype],
dtype=param.dtype,
device=self._default_device,
destination=dst_rank,
parm2align=self._trainable_param2align)
# Criteria to decide whether this parameter is to be put in GradStorage
if self._grad_storages[param.dtype][dst_rank].can_add_grad_view(
param, self._trainable_param2align[param.name]):
self._grad_storages[param.dtype][dst_rank].add_grad(
param, self._trainable_param2align[param.name])
self._has_grad_storage[index] = True
else:
self._param_grads.append(param.name)
print(
"Can not add param: {}, param's shape: {}, param align: {}, grad_storages fill: {}, ".
format(param.name, param.shape, self._trainable_param2align[
param.name], self._grad_storages[param.dtype][dst_rank]
._fill))
self._grad_storage_list = list(
chain(*[
self._grad_storages[dtype].values()
for dtype in self._grad_storages.keys()
]))
def _clear_task_flow(self):
"""Try to consume the previous tasks."""
while len(self._tasks_flow) > 0:
task = self._tasks_flow.popleft()
if task.callback is not None:
task.callback()
def _detect_train_change(self):
# Current trainable parameters
trainable_mask = list(map(_trainable, self._all_params))
# Whether parameters trainability changed
trainability_changed = trainable_mask != self._trainable_mask
if trainability_changed:
logging.warning(
"Trainable params changed, because of eval/train mode or parameter freezing/unfreeze."
)
self._trainable_mask = trainable_mask
return trainability_changed
def _build_grad_storages(self):
"""
Rebuild grad storages.
"""
# Rebuild fp16/fp32 grad storages
for dtype in self._grad_storages.keys():
for dst_rank, grad_storage in self._grad_storages[dtype].items():
if self._offload or dst_rank != self._rank:
grad_storage.manumal_relase()
grad_storage.rebuild()
def _rank_buffer_size(self, buffer_max_size, model_size):
"""
Generate the minimum buffer size for each rank & Display param sizes and model sizes.
"""
# Initialize buffer size
rank_buffer_size = {}
for shard_opt in self._sharding_optimizers:
if shard_opt.rank_buffer_size:
for dtype in shard_opt.rank_buffer_size.keys():
sizes = max(shard_opt.rank_buffer_size[dtype].values())
rank_buffer_size[dtype] = min(sizes, buffer_max_size)
if Type.fp16.value in rank_buffer_size.keys():
# FP16 GradStorage and model size
print(
"====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".
format(rank_buffer_size[Type.fp16.value] / 2**19, model_size / 2
**19))
if Type.fp32.value in rank_buffer_size.keys():
# FP32 GradStorage and model size
print(
"====== FP32 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".
format(rank_buffer_size[Type.fp32.value] / 2**18, model_size / 2
**18))
return rank_buffer_size
def _redefine_opt_step(self):
if not self._accumulate_grads:
return
grad_func = self._grad_scale
for opt in self._sharding_optimizers:
opt_step = opt.step
def _opt_step(self):
grad_func()
opt_step()
opt.step = MethodType(_opt_step, opt)
def _redefine_opt_clear(self):
clear_func = self._clear_gradients
def _opt_clear(self):
clear_func()
for opt in self._sharding_optimizers:
opt.clear_grad = MethodType(_opt_clear, opt)
|
py | 1a38143b08356b44165324fdbbe5c8e26966d1ba | import os
import numpy
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDConfig
from chainer_chemistry.config import WEAVE_DEFAULT_NUM_MAX_ATOMS
from chainer_chemistry.dataset.preprocessors.common \
import construct_atomic_number_array
from chainer_chemistry.dataset.preprocessors.common \
import MolFeatureExtractionError
from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms
from chainer_chemistry.dataset.preprocessors.mol_preprocessor \
import MolPreprocessor
ATOM = ['H', 'C', 'N', 'O', 'S', 'Cl', 'Br', 'F', 'P', 'I']
MAX_DISTANCE = 2 # 7
# --- Atom feature extraction ---
def construct_atom_type_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS,
atom_list=None, include_unknown_atom=False):
atom_list = atom_list or ATOM
if include_unknown_atom:
# all atom not in `atom_list` as considered as "unknown atom"
# and its index is `len(atom_list)`
n_atom_type = len(atom_list) + 1
else:
n_atom_type = len(atom_list)
n_atom = mol.GetNumAtoms()
atom_type_vec = numpy.zeros((num_max_atoms, n_atom_type),
dtype=numpy.float32)
for i in range(n_atom):
a = mol.GetAtomWithIdx(i)
try:
atom_idx = atom_list.index(a.GetSymbol())
except ValueError as e:
if include_unknown_atom:
atom_idx = len(atom_list)
else:
raise MolFeatureExtractionError(e)
atom_type_vec[i, atom_idx] = 1.0
return atom_type_vec
def construct_formal_charge_vec(mol,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
n_atom = mol.GetNumAtoms()
formal_charge_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
for i in range(n_atom):
a = mol.GetAtomWithIdx(i)
formal_charge_vec[i, 0] = a.GetFormalCharge()
return formal_charge_vec
def construct_hybridization_vec(mol,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
# TODO(Oono)
# Can we enhance preprocessing speed by making factory once
# prior to calling this function many times?
n_atom = mol.GetNumAtoms()
hybridization_vec = numpy.zeros((num_max_atoms, 3), dtype=numpy.float32)
for i in range(n_atom):
a = mol.GetAtomWithIdx(i)
if a.GetHybridization() is None:
continue
hybridization_type = str(a.GetHybridization())
if hybridization_type == 'SP1':
hybridization_vec[i, 0] = 1.0
elif hybridization_type == 'SP2':
hybridization_vec[i, 1] = 1.0
elif hybridization_type == 'SP3':
hybridization_vec[i, 2] = 1.0
return hybridization_vec
def construct_partial_charge_vec(
mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
AllChem.ComputeGasteigerCharges(mol)
n = mol.GetNumAtoms()
partial_charge_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
for i in range(n):
a = mol.GetAtomWithIdx(i)
partial_charge_vec[i, 0] = a.GetProp("_GasteigerCharge")
return partial_charge_vec
def construct_atom_ring_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
nAtom = mol.GetNumAtoms()
sssr = Chem.GetSymmSSSR(mol)
ring_feature = numpy.zeros((num_max_atoms, 6,), dtype=numpy.float32)
for ring in sssr:
ring = list(ring)
for i in range(nAtom):
if i in ring:
ring_size = len(ring)
if ring_size >= 3 and ring_size <= 8:
ring_feature[i, ring_size - 3] = 1.0
return ring_feature
def construct_hydrogen_bonding(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
fdefName = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
factory = ChemicalFeatures.BuildFeatureFactory(fdefName)
feats = factory.GetFeaturesForMol(mol)
hydrogen_bonding_vec = numpy.zeros((num_max_atoms, 2), dtype=numpy.float32)
for f in feats:
if f.GetFamily() == 'Donor':
idx = f.GetAtomIds()[0]
hydrogen_bonding_vec[idx, 0] = 1.0
if f.GetFamily() == 'Acceptor':
idx = f.GetAtomIds()[0]
hydrogen_bonding_vec[idx, 1] = 1.0
return hydrogen_bonding_vec
def construct_num_hydrogens_vec(mol,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
n_hydrogen_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
n_atom = mol.GetNumAtoms()
for i in range(n_atom):
n = 0
for j in range(n_atom):
if i == j:
continue
a = mol.GetAtomWithIdx(j)
if a.GetSymbol() != 'H':
continue
k = mol.GetBondBetweenAtoms(i, j)
if k is not None:
n += 1
n_hydrogen_vec[i, 0] = n
return n_hydrogen_vec
def construct_aromaticity_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
aromaticity_vec = numpy.zeros((num_max_atoms, 1), dtype=numpy.float32)
aromatix_atoms = mol.GetAromaticAtoms()
for a in aromatix_atoms:
aromaticity_vec[a.GetIdx()] = 1.0
return aromaticity_vec
def construct_atom_feature(mol, add_Hs,
num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS,
atom_list=None, include_unknown_atom=False):
"""construct atom feature
Args:
mol (Mol): mol instance
add_Hs (bool): if the `mol` instance was added Hs, set True.
num_max_atoms (int): number of max atoms
atom_list (list): list of atoms to extract feature. If None, default
`ATOM` is used as `atom_list`
include_unknown_atom (bool): If False, when the `mol` includes atom
which is not in `atom_list`, it will raise
`MolFeatureExtractionError`.
If True, even the atom is not in `atom_list`, `atom_type` is set
as "unknown" atom.
Returns (numpy.ndarray): 2 dimensional array. First axis size is
`num_max_atoms`, representing each atom index.
Second axis for feature.
"""
atom_type_vec = construct_atom_type_vec(
mol, num_max_atoms, atom_list=atom_list,
include_unknown_atom=include_unknown_atom)
# TODO(nakago): Chilarity
formal_charge_vec = construct_formal_charge_vec(
mol, num_max_atoms=num_max_atoms)
partial_charge_vec = construct_partial_charge_vec(
mol, num_max_atoms=num_max_atoms)
atom_ring_vec = construct_atom_ring_vec(
mol, num_max_atoms=num_max_atoms)
hybridization_vec = construct_hybridization_vec(
mol, num_max_atoms=num_max_atoms)
hydrogen_bonding = construct_hydrogen_bonding(
mol, num_max_atoms=num_max_atoms)
aromaticity_vec = construct_aromaticity_vec(
mol, num_max_atoms=num_max_atoms)
if add_Hs:
num_hydrogens_vec = construct_num_hydrogens_vec(
mol, num_max_atoms=num_max_atoms)
feature = numpy.hstack((atom_type_vec, formal_charge_vec,
partial_charge_vec, atom_ring_vec,
hybridization_vec, hydrogen_bonding,
aromaticity_vec, num_hydrogens_vec))
else:
feature = numpy.hstack((atom_type_vec, formal_charge_vec,
partial_charge_vec, atom_ring_vec,
hybridization_vec, hydrogen_bonding,
aromaticity_vec))
return feature
# --- Pair feature extraction ---
def construct_bond_vec(mol, i, j):
bond_feature_vec = numpy.zeros((4, ), dtype=numpy.float32)
k = mol.GetBondBetweenAtoms(i, j)
if k is not None:
bond_type = str(k.GetBondType())
if bond_type == 'SINGLE':
bond_feature_vec[0] = 1.0
elif bond_type == 'DOUBLE':
bond_feature_vec[1] = 1.0
elif bond_type == 'TRIPLE':
bond_feature_vec[2] = 1.0
elif bond_type == 'AROMATIC':
bond_feature_vec[3] = 1.0
else:
raise ValueError("Unknown bond type {}".format(bond_type))
return bond_feature_vec
def construct_distance_vec(distance_matrix, i, j):
distance = min(MAX_DISTANCE, int(distance_matrix[i][j]))
distance_feature = numpy.zeros((MAX_DISTANCE, ), dtype=numpy.float32)
distance_feature[:distance] = 1.0
return distance_feature
def construct_ring_feature_vec(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
n_atom = mol.GetNumAtoms()
sssr = Chem.GetSymmSSSR(mol)
ring_feature_vec = numpy.zeros(
(num_max_atoms ** 2, 1,), dtype=numpy.float32)
for ring in sssr:
ring = list(ring)
n_atom_in_ring = len(ring)
for i in range(n_atom_in_ring):
for j in range(n_atom_in_ring):
a0 = ring[i]
a1 = ring[j]
ring_feature_vec[a0 * n_atom + a1] = 1
return ring_feature_vec
def construct_pair_feature(mol, num_max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS):
"""construct pair feature
Args:
mol (Mol): mol instance
num_max_atoms (int): number of max atoms
Returns (numpy.ndarray): 2 dimensional array. First axis size is
`num_max_atoms` ** 2, representing index of each atom pair.
Second axis for feature.
"""
n_atom = mol.GetNumAtoms()
distance_matrix = Chem.GetDistanceMatrix(mol)
distance_feature = numpy.zeros((num_max_atoms ** 2, MAX_DISTANCE,),
dtype=numpy.float32)
for i in range(n_atom):
for j in range(n_atom):
distance_feature[i * n_atom + j] = construct_distance_vec(
distance_matrix, i, j)
bond_feature = numpy.zeros((num_max_atoms ** 2, 4,), dtype=numpy.float32)
for i in range(n_atom):
for j in range(n_atom):
bond_feature[i * n_atom + j] = construct_bond_vec(mol, i, j)
ring_feature = construct_ring_feature_vec(mol, num_max_atoms=num_max_atoms)
feature = numpy.hstack((distance_feature, bond_feature, ring_feature))
return feature
class WeaveNetPreprocessor(MolPreprocessor):
"""WeaveNetPreprocessor
WeaveNet must have fixed-size atom list for now, zero_padding option
is always set to True.
Args:
max_atoms (int): Max number of atoms for each molecule, if the
number of atoms is more than this value, this data is simply
ignored.
Setting negative value indicates no limit for max atoms.
add_Hs (bool): If True, implicit Hs are added.
use_fixed_atom_feature (bool):
If True, atom feature is extracted used in original paper.
If it is False, atomic number is used instead.
atom_list (list): list of atoms to extract feature. If None, default
`ATOM` is used as `atom_list`
include_unknown_atom (bool): If False, when the `mol` includes atom
which is not in `atom_list`, it will raise
`MolFeatureExtractionError`.
If True, even the atom is not in `atom_list`, `atom_type` is set
as "unknown" atom.
kekulize (bool): If True, Kekulizes the molecule.
"""
def __init__(self, max_atoms=WEAVE_DEFAULT_NUM_MAX_ATOMS, add_Hs=True,
use_fixed_atom_feature=False, atom_list=None,
include_unknown_atom=False, kekulize=False):
super(WeaveNetPreprocessor, self).__init__(
add_Hs=add_Hs, kekulize=kekulize)
zero_padding = True
if zero_padding and max_atoms <= 0:
raise ValueError('max_atoms must be set to positive value when '
'zero_padding is True')
self.max_atoms = max_atoms
self.add_Hs = add_Hs
self.zero_padding = zero_padding
self.use_fixed_atom_feature = use_fixed_atom_feature
self.atom_list = atom_list
self.include_unknown_atom = include_unknown_atom
def get_input_features(self, mol):
"""get input features for WeaveNet
WeaveNetPreprocessor automatically add `H` to `mol`
Args:
mol (Mol):
"""
type_check_num_atoms(mol, self.max_atoms)
if self.use_fixed_atom_feature:
# original paper feature extraction
atom_array = construct_atom_feature(mol, self.add_Hs,
self.max_atoms, self.atom_list,
self.include_unknown_atom)
else:
# embed id of atomic numbers
atom_array = construct_atomic_number_array(mol, self.max_atoms)
pair_feature = construct_pair_feature(mol,
num_max_atoms=self.max_atoms)
return atom_array, pair_feature
|
py | 1a3814eb7ca054d68fcb0885580bbea0daad3e3b | """
ApiGateway for CloudWedge
Provides implementation details for apigateway service. It follows contract
outlined in cloudwedge.models.AWSService
"""
from os import environ
import boto3
import jmespath
from typing import List, Any, Dict, Optional
from cloudwedge.utils.logger import get_logger
from cloudwedge.utils.tags import TagsApi
from cloudwedge.models import AWSService, AWSResource
REGION = environ.get('REGION')
LOGGER = get_logger("cloudwedge.apigateway")
# Model for Service, extending AWSResource
class ApiGatewayResource(AWSResource):
pass
# Class for Service
class ApiGatewayService(AWSService):
# Name of the service, must be unique
name = "apigateway"
# Cloudwatch alarm service specific values
cloudwatch_namespace = "AWS/ApiGateway"
cloudwatch_dashboard_section_title = "Api Gateway"
cloudwatch_dimension = "EnvironmentName"
# Default metric to be used when metrics are not explicit in tags
default_metrics = ["Latency",
"IntegrationLatency", "5XXError", "4XXError"]
# Alarm defaults for the service, applied if metric default doesnt exist
default_alarm_props = {
'Statistic': "Sum"
}
# List of supported metrics and default configurations
supported_metrics = {
'Latency' :{},
'IntegrationLatency' :{},
'5XXError' :{},
'4XXError' :{}
}
# There are dashboard additions that can be added at the metric level
override_dashboard_metric_properties = {}
@staticmethod
def build_dashboard_widgets(resources: List[ApiGatewayResource]) -> List[Any]:
"""
Build dashboard widgets for the resources
"""
# Get widgets with base method (like calling super)
return AWSService.build_dashboard_widgets(ApiGatewayService, resources)
@ staticmethod
def get_resources(session: boto3.session.Session) -> List[ApiGatewayResource]:
"""
Return all AWS ApiGateway resources within scope, based on the tags
"""
try:
# Get things in a neat apigateway resource object
cleaned_resources: List[ApiGatewayResource] = []
# Get paginator for service
paginator = session.client('apigateway').get_paginator(
'get_rest_apis').paginate()
# Collect all resources
for page_resources in paginator:
for rest_api in page_resources['items']:
rest_api_tags = rest_api.get('tags', {})
# Api gateway returns tag as key value dict, convert it to standard format
# e.g. {'STAGE': 'prod', 'cloudwedge:active': 'true'}
converted_tags = TagsApi.convert_dict_to_tags(rest_api_tags)
# If the active monitoring tag is on the instance, include in resource collection
# Stripping key so no whitespace mismatch
if any((tag['Key'].strip() == AWSService.TAG_ACTIVE and tag['Value'] == 'true') for tag in converted_tags):
# This resource has opted in to cloudwedge
# Get values from tags if they exist
owner_from_tag = TagsApi.get_owner_from_tags(converted_tags)
name_from_tag = TagsApi.get_name_from_tags(converted_tags)
rest_api_name = rest_api['name']
# Setup ApiGateway values
service = ApiGatewayService.name
resource_name = name_from_tag or rest_api_name
resource_id = rest_api_name
resource_owner = owner_from_tag
tags = converted_tags
# Create ApiGateway
clean_resource = ApiGatewayResource(
service=service,
name=resource_name,
uniqueId=resource_id,
cloudwatchDimensionId=resource_id,
owner=resource_owner,
tags=tags
)
# Add to collection
cleaned_resources.append(clean_resource)
return cleaned_resources
except Exception as err:
LOGGER.info(
f"Failed to get resources information with error: {err}")
raise err
|
py | 1a3815a1eb5f805cad0962415fe81d25816ff6b7 | #basic python program
print("double quoted: hello world")
print('single quoted: hello world')
|
py | 1a3817f5d423758f9a12e087f66368deda93e149 | import sys
import os
import numpy as np
inDir = sys.argv[1]
print(inDir)
ratesInWins = {}
for fileName in os.listdir(inDir):
if fileName.endswith(".txt"):
print(fileName)
with open(inDir + "/" + fileName, "rt") as f:
sys.stderr.write("reading {}/{}\n".format(inDir, fileName))
first = True
for line in f:
if first:
first = False
else:
chrom, winMid, recRate = line.strip().split()[:3]
if chrom not in ratesInWins:
ratesInWins[chrom] = []
winMid = int(winMid)
recRate = float(recRate)
ratesInWins[chrom].append((winMid, recRate))
def getWinLenForChrom(ratesInWinsForChrom):
prevWin = ratesInWinsForChrom[0][0]
winLens = {}
for win, recRates in ratesInWinsForChrom[1:]:
winLen = win-prevWin
if winLen in winLens:
winLens[winLen] += 1
else:
winLens[winLen] = 1
prevWin = win
if len(winLens) != 1:
sys.stderr.write("window lengths not consistent within chrom arms!! ARRGHHHH!\n")
winLens = sorted(winLens.keys(), key=lambda x: winLens[x])
return winLens[-1]
def getWinLens(ratesInWins):
winLens = {}
for chrom in ratesInWins:
winLens[chrom] = getWinLenForChrom(ratesInWins[chrom])
return winLens
winLens = getWinLens(ratesInWins)
allRates = []
for chrom in ratesInWins:
for win, recRate in ratesInWins[chrom]:
allRates.append(recRate)
allRates.sort()
lenCutoff = 1/np.mean(allRates) * 1e6
rateCutoff = allRates[int(len(allRates)*0.05)]
sys.stderr.write("rate cutoff: {}; length cutoff: {}\n".format(rateCutoff, lenCutoff))
for chrom in ratesInWins:
halfWinLen = int(winLens[chrom]/2)
mode = 0
runLen = 0
runStart = 1
for winMid, recRate in ratesInWins[chrom]:
winStart = winMid - halfWinLen
winEnd = winMid + halfWinLen
if mode == 1:
if recRate <= rateCutoff:
mode = 0
runLen = 1
runStart = winStart
else:
pass
elif mode == 0:
if recRate <= rateCutoff:
runLen += 1
else:
if winStart-runStart >= lenCutoff:
print(chrom, runStart, winStart, winStart-runStart, runLen)
mode = 1
if mode == 0:
if winEnd-runStart >= lenCutoff:
print(chrom, runStart, winEnd, winEnd-runStart, runLen)
|
py | 1a38187ca2c41952fd9f7fa5a1694f9484ee738e | import json
import random
from hashlib import md5
import pytz
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.db.models import Q
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import get_language, gettext_lazy as _, override
from rest_framework.authtoken.models import Token
from pretalx.common.urls import build_absolute_uri
class UserManager(BaseUserManager):
"""The user manager class."""
def create_user(self, password: str = None, **kwargs):
user = self.model(**kwargs)
user.set_password(password)
user.save()
return user
def create_superuser(self, password: str, **kwargs):
user = self.create_user(password=password, **kwargs)
user.is_staff = True
user.is_administrator = True
user.is_superuser = False
user.save(update_fields=['is_staff', 'is_administrator', 'is_superuser'])
return user
def assign_code(obj, length=6):
# This omits some character pairs completely because they are hard to read even on screens (1/I and O/0)
# and includes only one of two characters for some pairs because they are sometimes hard to distinguish in
# handwriting (2/Z, 4/A, 5/S, 6/G).
while True:
code = get_random_string(length=length, allowed_chars=User.CODE_CHARSET)
if not User.objects.filter(code__iexact=code).exists():
obj.code = code
return code
class User(PermissionsMixin, AbstractBaseUser):
"""The pretalx user model.
Users describe all kinds of persons who interact with pretalx: Organisers, reviewers, submitters, speakers.
:param code: A user's alphanumeric code is autogenerated, may not be
changed, and is the unique identifier of that user.
:param name: A name fit for public display. Will be used in the user
interface and for public display for all speakers in all of their
events.
:param password: The password is stored using Django's PasswordField. Use
the ``set_password`` and ``check_password`` methods to interact with it.
:param nick: The nickname field has been deprecated and is scheduled to be
deleted. Use the email field instead.
:param groups: Django internals, not used in pretalx.
:param user_permissions: Django internals, not used in pretalx.
"""
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'email'
CODE_CHARSET = list('ABCDEFGHJKLMNPQRSTUVWXYZ3789')
objects = UserManager()
code = models.CharField(max_length=16, unique=True, null=True)
nick = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(
max_length=120,
verbose_name=_('Name'),
help_text=_('Please enter the name you wish to be displayed publicly. This name will be used for all events you are participating in on this server.'),
)
email = models.EmailField(
unique=True,
verbose_name=_('E-Mail'),
help_text=_(
'Your email address will be used for password resets and notification about your event/submissions.'
),
)
is_active = models.BooleanField(default=True, help_text='Inactive users are not allowed to log in.')
is_staff = models.BooleanField(default=False, help_text='A default Django flag. Not in use in pretalx.')
is_administrator = models.BooleanField(default=False, help_text='Should only be ``True`` for people with administrative access to the server pretalx runs on.')
is_superuser = models.BooleanField(default=False, help_text='Never set this flag to ``True``, since it short-circuits all authorization mechanisms.')
locale = models.CharField(
max_length=32,
default=settings.LANGUAGE_CODE,
choices=settings.LANGUAGES,
verbose_name=_('Preferred language'),
)
timezone = models.CharField(
choices=[(tz, tz) for tz in pytz.common_timezones], max_length=30, default='UTC'
)
avatar = models.ImageField(
null=True,
blank=True,
verbose_name=_('Profile picture'),
help_text=_('If possible, upload an image that is least 120 pixels wide.'),
)
get_gravatar = models.BooleanField(
default=False,
verbose_name=_('Retrieve profile picture via gravatar'),
help_text=_(
'If you have registered with an email address that has a gravatar account, we can retrieve your profile picture from there.'
),
)
pw_reset_token = models.CharField(null=True, max_length=160, verbose_name='Password reset token')
pw_reset_time = models.DateTimeField(null=True, verbose_name='Password reset time')
def __str__(self) -> str:
"""For public consumption as it is used for Select widgets, e.g. on the feedback form."""
return self.name or str(_('Unnamed user'))
def get_display_name(self) -> str:
"""Returns a user's name or 'Unnamed user'."""
return self.name if self.name else str(_('Unnamed user'))
def save(self, *args, **kwargs):
self.email = self.email.lower().strip()
if not self.code:
assign_code(self)
return super().save(args, kwargs)
def event_profile(self, event):
"""Retrieve (and/or create) the event.
:class:`~pretalx.person.models.profile.SpeakerProfile` for this user.
:type event: :class:`pretalx.event.models.event.Event`
:retval: :class:`pretalx.person.models.profile.EventProfile`
"""
from pretalx.person.models.profile import SpeakerProfile
profile = self.profiles.select_related('event').filter(event=event).first()
if profile:
return profile
profile = SpeakerProfile(event=event, user=self)
if self.pk:
profile.save()
return profile
def log_action(self, action: str, data: dict=None, person=None, orga: bool=False):
"""Create a log entry for this user.
:param action: The log action that took place.
:param data: Addition data to be saved.
:param person: The person modifying this user. Defaults to this user.
:type person: :class:`~pretalx.person.models.user.User`
:param orga: Was this action initiated by a privileged user?
"""
from pretalx.common.models import ActivityLog
if data:
data = json.dumps(data)
ActivityLog.objects.create(
person=person or self,
content_object=self,
action_type=action,
data=data,
is_orga_action=orga,
)
def logged_actions(self):
"""Returns all log entries that were made about this user."""
from pretalx.common.models import ActivityLog
return ActivityLog.objects.filter(
content_type=ContentType.objects.get_for_model(type(self)),
object_id=self.pk,
)
def own_actions(self):
"""Returns all log entries that were made by this user."""
from pretalx.common.models import ActivityLog
return ActivityLog.objects.filter(person=self)
def deactivate(self):
"""Delete the user by unsetting all of their information."""
from pretalx.submission.models import Answer
self.email = f'deleted_user_{random.randint(0, 999)}@localhost'
while self.__class__.objects.filter(email__iexact=self.email).exists():
self.email = f'deleted_user_{random.randint(0, 999)}'
self.name = 'Deleted User'
self.is_active = False
self.is_superuser = False
self.is_administrator = False
self.locale = 'en'
self.timezone = 'UTC'
self.pw_reset_token = None
self.pw_reset_time = None
self.save()
self.profiles.all().update(biography='')
Answer.objects.filter(
person=self, question__contains_personal_data=True
).delete()
for team in self.teams.all():
team.members.remove(self)
@cached_property
def gravatar_parameter(self) -> str:
return md5(self.email.strip().encode()).hexdigest()
@cached_property
def has_avatar(self) -> bool:
return self.get_gravatar or self.has_local_avatar
@cached_property
def has_local_avatar(self) -> bool:
return self.avatar and self.avatar != 'False'
def get_events_with_any_permission(self):
"""Returns a queryset of events for which this user has any type of
permission."""
from pretalx.event.models import Event
if self.is_administrator:
return Event.objects.all()
return Event.objects.filter(
Q(
organiser_id__in=self.teams.filter(all_events=True).values_list(
'organiser', flat=True
)
)
| Q(id__in=self.teams.values_list('limit_events__id', flat=True))
)
def get_events_for_permission(self, **kwargs):
"""Returns a queryset of events for which this user as all of the given
permissions.
Permissions are given as named arguments, e.g.
``get_events_for_permission(is_reviewer=True)``.
"""
from pretalx.event.models import Event
if self.is_administrator:
return Event.objects.all()
orga_teams = self.teams.filter(**kwargs)
absolute = orga_teams.filter(all_events=True).values_list(
'organiser', flat=True
)
relative = orga_teams.filter(all_events=False).values_list(
'limit_events', flat=True
)
return Event.objects.filter(
models.Q(organiser__in=absolute) | models.Q(pk__in=relative)
).distinct()
def get_permissions_for_event(self, event) -> set:
"""Returns a set of all permission a user has for the given event.
:type event: :class:`~pretalx.event.models.event.Event`
"""
if self.is_administrator:
return {
'can_create_events',
'can_change_teams',
'can_change_organiser_settings',
'can_change_event_settings',
'can_change_submissions',
'is_reviewer',
}
teams = event.teams.filter(members__in=[self])
if not teams:
return set()
return set().union(*[team.permission_set for team in teams])
def remaining_override_votes(self, event) -> int:
"""Returns the amount of override votes a user may still give in
reviews in the given event.
:type event: :class:`~pretalx.event.models.event.Event`
"""
allowed = max(
event.teams.filter(members__in=[self], is_reviewer=True).values_list(
'review_override_votes', flat=True
)
or [0]
)
overridden = self.reviews.filter(
submission__event=event, override_vote__isnull=False
).count()
return max(allowed - overridden, 0)
def regenerate_token(self) -> Token:
"""Generates a new API access token, deleting the old one."""
self.log_action(action='pretalx.user.token.reset')
Token.objects.filter(user=self).delete()
return Token.objects.create(user=self)
@transaction.atomic
def reset_password(self, event, user=None):
from pretalx.mail.models import QueuedMail
self.pw_reset_token = get_random_string(32)
self.pw_reset_time = now()
self.save()
context = {
'name': self.name or '',
'url': build_absolute_uri(
'orga:auth.recover', kwargs={'token': self.pw_reset_token}
),
}
mail_text = _(
'''Hi {name},
you have requested a new password for your pretalx account.
To reset your password, click on the following link:
{url}
If this wasn\'t you, you can just ignore this email.
All the best,
the pretalx robot'''
)
with override(get_language()):
mail = QueuedMail.objects.create(
subject=_('Password recovery'),
text=str(mail_text).format(**context),
)
mail.to_users.add(self)
mail.send()
self.log_action(
action='pretalx.user.password.reset', person=user, orga=bool(user)
)
|
py | 1a38198fbba11b7a1be9dd67b7eaa85be9a59ed2 | # -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage, (C) 2016 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
# my-objectname are dummy values, please replace them with original values.
import time
from datetime import datetime
from minio import Minio, CopyConditions
from minio.error import ResponseError
client = Minio('s3.amazonaws.com',
access_key='YOUR-ACCESSKEY',
secret_key='YOUR-SECRETKEY')
# client.trace_on(sys.stderr)
copy_conditions = CopyConditions()
# Set modified condition, copy object modified since 2014 April.
t = (2014, 4, 0, 0, 0, 0, 0, 0, 0)
mod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_modified_since(mod_since)
# Set unmodified condition, copy object unmodified since 2014 April.
# copy_conditions.set_unmodified_since(mod_since)
# Set matching ETag condition, copy object which matches the following ETag.
# copy_conditions.set_match_etag("31624deb84149d2f8ef9c385918b653a")
# Set matching ETag except condition, copy object which does not match the
# following ETag.
# copy_conditions.set_match_etag_except("31624deb84149d2f8ef9c385918b653a")
try:
copy_result = client.copy_object("my-bucket", "my-object",
"/my-sourcebucket/my-sourceobject",
copy_conditions)
print(copy_result)
except ResponseError as err:
print(err)
|
py | 1a381a0f4140cf754f64a4f6aaa603611a7684b1 | """
Space : O(1)
Time : O(log n)
"""
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
low = 1
high = n
while(low <= high):
mid = (low + high) // 2
if not isBadVersion(mid):
if low != mid:
low = mid
else:
low = mid + 1
elif isBadVersion(mid):
if not isBadVersion(mid-1) and isBadVersion(mid):
return mid
else:
high = mid
|
py | 1a381a44ce62b0cb51029a4fc0527f70b8d0d814 | """PIL/Tkinter based simulator for InkyWHAT and InkyWHAT."""
import numpy
from . import inky
from . import inky_uc8159
class InkyMock(inky.Inky):
"""Base simulator class for Inky."""
def __init__(self, colour, h_flip=False, v_flip=False):
"""Initialise an Inky pHAT Display.
:param colour: one of red, black or yellow, default: black
"""
global tkinter, ImageTk, Image
try:
import tkinter
except ImportError:
raise ImportError('Simulation requires tkinter')
try:
from PIL import ImageTk, Image
except ImportError:
raise ImportError('Simulation requires PIL ImageTk and Image')
resolution = (self.WIDTH, self.HEIGHT)
if resolution not in inky._RESOLUTION.keys():
raise ValueError('Resolution {}x{} not supported!'.format(*resolution))
self.resolution = resolution
self.width, self.height = resolution
self.cols, self.rows, self.rotation = inky._RESOLUTION[resolution]
self.buf = numpy.zeros((self.height, self.width), dtype=numpy.uint8)
if colour not in ('red', 'black', 'yellow', 'multi'):
raise ValueError('Colour {} is not supported!'.format(colour))
self.colour = colour
self.h_flip = h_flip
self.v_flip = v_flip
impression_palette = [57, 48, 57, # black
255, 255, 255, # white
58, 91, 70, # green
61, 59, 94, # blue
156, 72, 75, # red
208, 190, 71, # yellow
177, 106, 73, # orange
255, 255, 255] # clear
bw_inky_palette = [255, 255, 255, # 0 = white
0, 0, 0] # 1 = black
red_inky_palette = [255, 255, 255, # 0 = white
0, 0, 0, # 1 = black
255, 0, 0] # index 2 is red
ylw_inky_palette = [255, 255, 255, # 0 = white
0, 0, 0, # 1 = black
223, 204, 16] # index 2 is yellow
# yellow color value: screen capture from
# https://www.thoughtsmakethings.com/Pimoroni-Inky-pHAT
self.c_palette = {'black': bw_inky_palette,
'red': red_inky_palette,
'yellow': ylw_inky_palette,
'multi': impression_palette}
self._tk_done = False
self.tk_root = tkinter.Tk()
self.tk_root.title('Inky Preview')
self.tk_root.geometry('{}x{}'.format(self.WIDTH, self.HEIGHT))
self.tk_root.aspect(self.WIDTH, self.HEIGHT, self.WIDTH, self.HEIGHT)
self.tk_root.protocol('WM_DELETE_WINDOW', self._close_window)
self.cv = None
self.cvh = self.HEIGHT
self.cvw = self.WIDTH
def wait_for_window_close(self):
"""Wait until the Tkinter window has closed."""
while not self._tk_done:
self.tk_root.update_idletasks()
self.tk_root.update()
def _close_window(self):
self._tk_done = True
self.tk_root.destroy()
def resize(self, event):
"""Resize background image to window size."""
# adapted from:
# https://stackoverflow.com/questions/24061099/tkinter-resize-background-image-to-window-size
# https://stackoverflow.com/questions/19838972/how-to-update-an-image-on-a-canvas
self.cvw = event.width
self.cvh = event.height
self.cv.config(width=self.cvw, height=self.cvh)
image = self.disp_img_copy.resize([self.cvw, self.cvh])
self.photo = ImageTk.PhotoImage(image)
self.cv.itemconfig(self.cvhandle, image=self.photo, anchor='nw')
self.tk_root.update()
def _send_command(self, command, data=None):
pass
def _simulate(self, region):
pass
def _display(self, region):
im = Image.fromarray(region, 'P')
im.putpalette(self.c_palette[self.colour])
self.disp_img_copy = im.copy() # can be changed due to window resizing, so copy
image = self.disp_img_copy.resize([self.cvw, self.cvh])
self.photo = ImageTk.PhotoImage(image)
if self.cv is None:
self.cv = tkinter.Canvas(self.tk_root, width=self.WIDTH, height=self.HEIGHT)
self.cv.pack(side='top', fill='both', expand='yes')
self.cvhandle = self.cv.create_image(0, 0, image=self.photo, anchor='nw')
self.cv.bind('<Configure>', self.resize)
self.tk_root.update()
def show(self, busy_wait=True):
"""Show buffer on display.
:param busy_wait: Ignored. Updates are simulated and instant.
"""
print('>> Simulating {} {}x{}...'.format(self.colour, self.WIDTH, self.HEIGHT))
region = self.buf
if self.v_flip:
region = numpy.fliplr(region)
if self.h_flip:
region = numpy.flipud(region)
if self.rotation:
region = numpy.rot90(region, self.rotation // 90)
self._simulate(region)
class InkyMockPHAT(InkyMock):
"""Inky PHAT (212x104) e-Ink Display Simulator."""
WIDTH = 212
HEIGHT = 104
WHITE = 0
BLACK = 1
RED = 2
YELLOW = 2
def _simulate(self, region):
region = numpy.rot90(region, self.rotation // 90)
region = numpy.flipud(region) # spec: phat rotated -90
region = numpy.fliplr(region) # spec: phat rotated -90
self._display(region)
class InkyMockPHATSSD1608(InkyMock):
"""Inky PHAT SSD1608 (250x122) e-Ink Display Simulator."""
WIDTH = 250
HEIGHT = 122
WHITE = 0
BLACK = 1
RED = 2
YELLOW = 2
def _simulate(self, region):
region = numpy.rot90(region, self.rotation // 90)
region = numpy.flipud(region) # spec: phat rotated -90
region = numpy.fliplr(region) # spec: phat rotated -90
self._display(region)
class InkyMockWHAT(InkyMock):
"""Inky wHAT e-Ink Display Simulator."""
WIDTH = 400
HEIGHT = 300
WHITE = 0
BLACK = 1
RED = 2
YELLOW = 2
def _simulate(self, region):
region = numpy.rot90(region, self.rotation // 90)
region = region.reshape(300, 400) # for display
self._display(region)
class InkyMockImpression(InkyMock):
"""Inky Impression e-Ink Display Simulator."""
BLACK = 0
WHITE = 1
GREEN = 2
BLUE = 3
RED = 4
YELLOW = 5
ORANGE = 6
CLEAN = 7
WIDTH = 600
HEIGHT = 448
def __init__(self):
"""Initialize a new mock Inky Impression."""
InkyMock.__init__(self, 'multi')
def _simulate(self, region):
self._display(region)
def set_pixel(self, x, y, v):
"""Set a single pixel on the display."""
self.buf[y][x] = v & 0xf
def set_image(self, image, saturation=0.5):
"""Copy an image to the display.
:param image: PIL image to copy, must be 600x448
:param saturation: Saturation for quantization palette - higher value results in a more saturated image
"""
if not image.size == (self.width, self.height):
raise ValueError("Image must be ({}x{}) pixels!".format(self.width, self.height))
if not image.mode == "P":
if Image is None:
raise RuntimeError("PIL is required for converting images: sudo apt install python-pil python3-pil")
palette = inky_uc8159.Inky._palette_blend(self, saturation)
# Image size doesn't matter since it's just the palette we're using
palette_image = Image.new("P", (1, 1))
# Set our 7 colour palette (+ clear) and zero out the other 247 colours
palette_image.putpalette(palette + [0, 0, 0] * 248)
# Force source image data to be loaded for `.im` to work
image.load()
image = image.im.convert("P", True, palette_image.im)
self.buf = numpy.array(image, dtype=numpy.uint8).reshape((self.rows, self.cols))
|
py | 1a381afc76417cb474130043db904626cd45af1b | # Handler for ulno-iot devkit1
import config
if config.display:
import ulno_iot_display as dp
if config.devel:
import ulno_iot_devel as dv
if config.ht:
import ulno_iot_ht as ht
import gc
gc.collect()
import wifi
import machine
from machine import Pin
import time
import ubinascii
from umqtt.simple import MQTTClient
gc.collect()
# make unique
config.mqtt_client_id += b"_" + ubinascii.hexlify(machine.unique_id())
blue_state_topic = config.mqtt_topic + b"/blue"
blue_command_topic = blue_state_topic + b"/set"
if config.devel:
red_state_topic = config.mqtt_topic + b"/red"
red_command_topic = red_state_topic + b"/set"
yellow_state_topic = config.mqtt_topic + b"/yellow"
yellow_command_topic = yellow_state_topic + b"/set"
left_state_topic = config.mqtt_topic + b"/left"
right_state_topic = config.mqtt_topic + b"/right"
bottom_state_topic = config.mqtt_topic + b"/bottom"
if config.ht:
temperature_state_topic = config.mqtt_topic + b"/temperature"
humidity_state_topic = config.mqtt_topic + b"/humidity"
if config.display:
text_command_topic = config.mqtt_topic + b"/text"
OVERFLOW = 1000
onoff = [b'off', b'on']
def publish_status():
global client
try:
client.publish(blue_state_topic, onoff[dv.blue() ^ 1])
if config.devel:
client.publish(red_state_topic, onoff[dv.red()])
client.publish(yellow_state_topic, onoff[dv.yellow()])
client.publish(left_state_topic, onoff[dv.left_button() ^ 1])
client.publish(right_state_topic, onoff[dv.right_button() ^ 1])
client.publish(bottom_state_topic, onoff[dv.lower_button() ^ 1])
if config.ht:
client.publish(temperature_state_topic, str(ht.temperature()).encode())
client.publish(humidity_state_topic, str(ht.humidity()).encode())
print('Published status.')
except:
print('Trouble publishing.')
init_client()
def callback(topic, msg):
if config.devel and topic == red_command_topic:
print("Received red in callback:", msg)
msg = msg.decode().lower()
if msg.startswith('on'):
dv.red.high()
elif msg.startswith('off'):
dv.red.low()
elif config.devel and topic == yellow_command_topic:
print("Received yellow in callback:", msg)
msg = msg.decode().lower()
if msg.startswith('on'):
dv.yellow.high()
elif msg.startswith('off'):
dv.yellow.low()
elif config.devel and topic == blue_command_topic:
msg = msg.decode().lower()
if msg.startswith('on'):
dv.blue.low()
elif msg.startswith('off'):
dv.blue.high()
elif config.display and topic == text_command_topic:
print("Received text in callback:", msg)
try:
msg = msg.decode()
if msg == "&&clear":
dp.clear()
else:
dp.println(msg)
except:
pass
def init_client():
global client
print("Trying to connect to mqtt broker.")
wifi.connect()
try:
client = MQTTClient(config.mqtt_client_id, config.mqtt_broker, user=config.mqtt_user,
password=config.mqtt_password)
client.set_callback(callback)
client.connect()
print("Connected to {}".format(config.mqtt_broker))
t = config.mqtt_topic + b"/#"
client.subscribe(t)
print("Subscribed to %s topic" % t)
except:
print("Trouble to init mqtt.")
def receive_sub():
global client
try:
client.check_msg() # non blocking
except:
print("Trouble to receive from mqtt.")
def run():
init_client()
counter = 0
last_left = dv.left_button()
last_right = dv.right_button()
last_lower = dv.lower_button()
while True:
if counter % 10 == 0: # every 10th of second
receive_sub()
if last_left != dv.left_button() \
or last_right != dv.right_button() \
or last_lower != dv.lower_button():
last_left = dv.left_button()
last_right = dv.right_button()
last_lower = dv.lower_button()
publish_status()
if counter % 500 == 0: # every 5s
publish_status()
time.sleep(0.01)
counter += 1
if counter >= OVERFLOW:
counter = 0
|
py | 1a381b0ddb2da2aa22a5e99d709a5e741ca05107 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds useful functions for working with dictionaries representing policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
from typing import Dict, List, Tuple, Text
def get_best_response_actions_as_string(
best_response_actions: Dict[bytes, int]) -> Text:
"""Turns a dict<bytes, int> into a bytestring compatible with C++.
i.e. the bytestring can be copy-pasted as the brace initialization for a
{std::unordered_,std::,absl::flat_hash_}map<std::string, int>.
Args:
best_response_actions: A dict mapping bytes to ints.
Returns:
A bytestring that can be copy-pasted to brace-initialize a C++
std::map<std::string, T>.
"""
best_response_keys = sorted(best_response_actions.keys())
best_response_strings = [
"%s: %i" % (k, best_response_actions[k]) for k in best_response_keys
]
return "{%s}" % (", ".join(best_response_strings))
def tabular_policy_to_cpp_map(
policy: Dict[bytes, List[Tuple[int, np.float64]]]) -> Text:
"""Turns a policy into a C++ compatible bytestring for brace-initializing.
Args:
policy: A dict representing a tabular policy. The keys are infostate
bytestrings.
Returns:
A bytestring that can be copy-pasted to brace-initialize a C++
std::map<std::string, open_spiel::ActionsAndProbs>.
"""
cpp_entries = []
policy_keys = sorted(policy.keys())
for key in policy_keys:
tuple_strs = ["{%i, %s}" % (p[0], p[1].astype(str)) for p in policy[key]]
value = "{" + ", ".join(tuple_strs) + "}"
cpp_entries.append('{"%s", %s}' % (key, value))
return "{%s}" % (",\n".join(cpp_entries))
|
py | 1a381b4ebc02cfefe7f2d31dabc8724098cdfe8a | #database.py creates a .db file for performing umls searches.
import sqlite3
import os
import sys
import os
import atexit
features_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if features_dir not in sys.path:
sys.path.append(features_dir)
# find where umls tables are located
from read_config import enabled_modules
enabled = enabled_modules()
umls_tables = enabled['UMLS']
# set to True when create_db() is succesful
success = False
db_path = None
conn = None
MRSTY_TABLE_FILE = None
MRCON_TABLE_FILE = None
MRREL_TABLE_FILE = None
LRABR_TABLE_FILE = None
# this ensure files are closed properly and umls.db is removed if not succesful
@atexit.register
def umls_db_cleanup():
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
if conn is not None:
conn.close()
if MRSTY_TABLE_FILE is not None:
MRSTY_TABLE_FILE.close()
if MRCON_TABLE_FILE is not None:
MRCON_TABLE_FILE.close()
if MRREL_TABLE_FILE is not None:
MRREL_TABLE_FILE.close()
if LRABR_TABLE_FILE is not None:
LRABR_TABLE_FILE.close()
if success is False:
# remove umls.db, it is junk now
if db_path is not None:
os.remove(db_path)
print(sys.stderr, '\n\tError: umls.db was not created succesfully.\n')
def create_db():
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
print ("\ncreating umls.db")
#connect to the .db file we are creating.
db_path = os.path.join(umls_tables, 'umls.db')
conn = sqlite3.connect( db_path )
conn.text_factory = str
print ("opening files")
#load data in files.
try:
mrsty_path = os.path.join(umls_tables, 'MRSTY.RRF')
MRSTY_TABLE_FILE = open( mrsty_path, "r" )
except IOError:
print ("\nNo file to use for creating MRSTY.RRF table\n")
sys.exit()
try:
mrcon_path = os.path.join(umls_tables, 'MRCONSO.RRF')
MRCON_TABLE_FILE = open( mrcon_path , "r" )
except IOError:
print ("\nNo file to use for creating MRCONSO.RRF table\n")
sys.exit()
try:
mrrel_path = os.path.join(umls_tables, 'MRREL.RRF')
MRREL_TABLE_FILE = open( mrrel_path , "r" )
except IOError:
print ("\nNo file to use for creating MRREL.RRF table\n")
sys.exit()
try:
lrabr_path = os.path.join(umls_tables, 'LRABR')
LRABR_TABLE_FILE = open( lrabr_path , "r" )
except IOError:
print ("\nNo file to use for creating LRABR table\n")
sys.exit()
print ("creating tables")
c = conn.cursor()
#create tables.
c.execute( "CREATE TABLE MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) ;" )
c.execute( "CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) ;" )
c.execute( "CREATE TABLE MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF );")
c.execute( "CREATE TABLE LRABR( EUI1, ABR, TYPE, EUI2, STR);")
print ("inserting data into MRSTY table")
for line in MRSTY_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 6
c.execute( "INSERT INTO MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) values( ?, ?, ?, ?, ?, ?)" , tuple(line))
print ("inserting data into MRCON table")
for line in MRCON_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 18
c.execute( "INSERT INTO MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);", tuple(line))
print ("inserting data into MRREL table")
for line in MRREL_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 16
c.execute( "INSERT INTO MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF ) values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" , tuple(line))
print ( "inserting into LRABR table")
for line in LRABR_TABLE_FILE:
line = line.strip('\n')
assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])
line = line.split('|')
line.pop()
assert len(line) == 5
c.execute( "INSERT INTO LRABR( EUI1, ABR, TYPE, EUI2, STR) values( ?, ?, ?, ?,?)" , tuple(line) )
print ( "creating indices")
#create indices for faster queries
c.execute( "CREATE INDEX mrsty_cui_map ON MRSTY(CUI)")
c.execute( "CREATE INDEX mrcon_str_map ON MRCON(STR)")
c.execute( "CREATE INDEX mrcon_cui_map ON MRCON(CUI)")
c.execute( "CREATE INDEX mrrel_cui2_map ON MRREL( CUI2 )" )
c.execute( "CREATE INDEX mrrel_cui1_map on MRREL( CUI1 ) " )
c.execute( "CREATE INDEX mrrel_rel_map on MRREL( REL )" )
c.execute( "CREATE INDEX lrabr_abr_map on LRABR(ABR)")
c.execute( "CREATE INDEX lrabr_str_map on LRABR(STR)")
#save changes to .db
conn.commit()
success = True
print ( "\nsqlite database created")
if __name__ == "__main__":
create_db()
|
py | 1a381b8511c7524f3f8331b5c64c3e07385898b1 | #!/usr/bin/env python3
import os
import time
import codecs
import getopt
import sys
import pickle
"""
Generates the C header file that #defines the numerical message log IDs
"""
def write_ids(defname, dstname, strname):
current = 0
strings = {}
seen = set()
with codecs.getreader('utf-8')(open(defname, 'rb')) as src:
with codecs.getwriter('utf-8')(open(dstname, 'wb')) as dst:
name = os.path.basename(dstname).upper().replace('.', '_')
dst.write('\n#ifndef %s\n' % name)
dst.write('#define %s\n\n' % name)
for line in src:
line = line.strip()
if not line:
dst.write('\n')
continue
if line.startswith('#'):
dst.write('/* %s */\n' % line[1:].strip())
continue
idx = line.index(' ')
name, value = line[:idx], line[idx:].strip()
if value in seen:
raise ValueError('Duplicate string ID "%s"' % value)
seen.add(value)
dst.write('#define %s %d // %s\n' % (name, current, value))
strings[current] = value
current += 1
if current == 65536:
raise ValueError('Too much string IDs')
dst.write('#endif\n')
with open(strname, 'wb') as fileobj:
pickle.dump(strings, fileobj)
def usage(code=1):
print('Usage: %s -o output_filename -s strings_filename input_filename' % sys.argv[0])
sys.exit(code)
def main(argv):
try:
opts, args = getopt.getopt(argv, 'ho:s:', ['help', 'output=', 'strings='])
except getopt.Error as exc:
print(exc)
usage()
if len(args) == 0:
print('No input file specified')
usage()
if len(args) > 1:
print('Too many arguments')
usage()
outname = None
strname = None
for opt, val in opts:
if opt in ('-h', '--help'):
usage(0)
if opt in ('-o', '--output'):
outname = val
if opt in ('-s', '--strings'):
strname = val
if outname is None:
print('No output file specified')
usage()
if strname is None:
print('No strings file specified')
usage()
try:
write_ids(args[0], outname, strname)
except Exception as exc:
print(exc)
usage()
if __name__ == '__main__':
main(sys.argv[1:])
|
py | 1a381bc7258251223d8cdeb6fc4416009f3eda63 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from jinja2 import Template
from flexget.plugins.parsers.parser_guessit import ParserGuessit
from flexget.plugins.parsers.parser_internal import ParserInternal
from flexget.utils.qualities import Quality
class TestQualityModule(object):
def test_get(self):
assert not Quality(), 'unknown quality is not false'
assert Quality('foobar') == Quality(), 'unknown not returned'
def test_common_name(self):
for test_val in ('720p', '1280x720'):
got_val = Quality(test_val).name
assert got_val == '720p', got_val
class TestQualityParser(object):
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True)
def parser(self, request):
if request.param == 'internal':
return ParserInternal
if request.param == 'guessit':
return ParserGuessit
@pytest.mark.parametrize("test_quality", [
('Test.File 1080p.web.vp9', '1080p webdl vp9', False),
('Test.File 1080p.web', '1080p webdl'),
('Test.File.2160p.web', '2160p webdl'),
('Test.File.1080.web-random', '1080p webdl'),
('Test.File.1080.webrandom', '1080p'),
('Test.File 1080p.web-dl', '1080p webdl'),
('Test.File.web-dl.1080p', '1080p webdl'),
('Test.File.WebHD.720p', '720p webdl'),
('Test.File.720p.bluray', '720p bluray'),
('Test.File.720hd.bluray', '720p bluray'),
('Test.File.1080p.bluray', '1080p bluray'),
('Test.File.2160p.bluray', '2160p bluray'),
('Test.File.1080p.cam', '1080p cam'),
('A Movie 2011 TS 576P XviD-DTRG', '576p ts xvid'),
('Test.File.720p.bluray.r5', '720p r5'),
('Test.File.1080p.bluray.rc', '1080p r5'),
# 10bit
('Test.File.480p.10bit', '480p 10bit'),
('Test.File.720p.10bit', '720p 10bit'),
('Test.File.720p.bluray.10bit', '720p bluray 10bit'),
('Test.File.1080p.10bit', '1080p 10bit'),
('Test.File.1080p.bluray.10bit', '1080p bluray 10bit'),
('Test.File.720p.web', '720p webdl'),
('Test.File.720p.webdl', '720p webdl'),
('Test.File.1280x720_web dl', '720p webdl'),
('Test.File.720p.h264.web.dl', '720p webdl h264'),
('Test.File.1080p.webhd.x264', '1080p webdl h264'),
('Test.File.480.hdtv.x265', '480p hdtv h265'),
('Test.File.web', 'webdl'),
('Test.File.web-dl', 'webdl'),
('Test.File.720P', '720p'),
('Test.File.1920x1080', '1080p'),
('Test.File.3840x2160', '2160p'),
('Test.File.1080i', '1080i'),
('Test File blurayrip', 'bluray'),
('Test.File.br-rip', 'bluray'),
('Test.File.720px', '720p'),
('Test.File.720p50', '720p'),
('Test.File.720p60', '720p'),
('Test.File.dvd.rip', 'dvdrip'),
('Test.File.dvd.rip.r5', 'r5'),
('Test.File.[576p][00112233].mkv', '576p'),
('Test.TS.FooBar', 'ts'),
('Test.File.360p.avi', '360p'),
('Test.File.[360p].mkv', '360p'),
('Test.File.368.avi', '368p'),
('Test.File.720p.hdtv.avi', '720p hdtv'),
('Test.File.1080p.hdtv.avi', '1080p hdtv'),
('Test.File.720p.preair.avi', '720p preair'),
# ('Test.File.ts.dvdrip.avi', 'ts'), This should no exists. Having Telesync and DVDRip is a non-sense.
('Test.File.HDTS.blah', 'ts'),
# ('Test.File.HDCAM.bluray.lie', 'cam'), This should no exists. Having Cam and Bluray is a non-sense.
# Test qualities as part of words. #1593
('Tsar.File.720p', '720p'),
('Camera.1080p', '1080p'),
# Some audio formats
('Test.File.DTSHDMA', 'dtshd'),
('Test.File.DTSHD.MA', 'dtshd'),
('Test.File.DTS.HDMA', 'dtshd'),
('Test.File.dts.hd.ma', 'dtshd'),
('Test.File.DTS.HD', 'dtshd'),
('Test.File.DTSHD', 'dtshd'),
('Test.File.DTS', 'dts'),
('Test.File.truehd', 'truehd'),
('Test.File.DTSHDMA', 'dtshd'),
('Test.File.DD2.0', 'dd5.1'),
('Test.File.AC35.1', 'ac3')
])
def test_quality_failures(self, parser, test_quality):
# Kind of a hack to get around the awful limitations of Guessit without creating extra tests
guessit = test_quality[2] if len(test_quality) > 2 else False
if not guessit and parser.__name__ == 'ParserGuessit':
return
quality = parser().parse_movie(test_quality[0]).quality
assert str(quality) == test_quality[1], ('`%s` quality should be `%s` not `%s`' % (
test_quality[0], test_quality[1], quality
))
class TestQualityInternalParser(object):
@pytest.mark.parametrize("test_quality", [
('Test.File.DD+5.1', 'dd+5.1'),
('Test.File.DDP5.1', 'dd+5.1'),
('Test.File.DDP7.1', 'dd+5.1'),
('Test.File.DD5.1', 'dd5.1'),
('Test.File.DD4.0', 'dd5.1'),
('Test.File.DD2.1', 'dd5.1'),
('Test.File.FLAC1.0', 'flac'),
])
def test_quality_failures(self, test_quality):
quality = ParserInternal().parse_movie(test_quality[0]).quality
assert str(quality) == test_quality[1], ('`%s` quality should be `%s` not `%s`' % (
test_quality[0], test_quality[1], quality
))
class TestFilterQuality(object):
_config = """
templates:
global:
parsing:
series: {{parser}}
movie: {{parser}}
mock:
- {title: 'Smoke.1280x720'}
- {title: 'Smoke.HDTV'}
- {title: 'Smoke.cam'}
- {title: 'Smoke.HR'}
accept_all: yes
tasks:
qual:
quality:
- hdtv
- 720p
min:
quality: HR+
max:
quality: "<=cam <HR"
min_max:
quality: HR-720i
"""
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'])
def config(self, request):
"""Override and parametrize default config fixture."""
return Template(self._config).render({'parser': request.param})
def test_quality(self, execute_task):
task = execute_task('qual')
entry = task.find_entry('rejected', title='Smoke.cam')
assert entry, 'Smoke.cam should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_min(self, execute_task):
task = execute_task('min')
entry = task.find_entry('rejected', title='Smoke.HDTV')
assert entry, 'Smoke.HDTV should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_max(self, execute_task):
task = execute_task('max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.cam')
assert entry, 'entry not found?'
assert entry in task.accepted, 'cam should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
def test_min_max(self, execute_task):
task = execute_task('min_max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.HR')
assert entry, 'entry not found?'
assert entry in task.accepted, 'HR should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
class TestQualityAudio(object):
config = """
tasks:
test_dd_audio_channels:
quality: "dd+5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD+7.1'}
- {title: 'My Show S01E05 720p HDTV DD+5.0'}
test_dd_audio_min:
quality: ">dd5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD5.1'}
- {title: 'My Show S01E05 720p HDTV DD+2.0'}
test_dd_audio_max:
quality: "<=dd5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD5.1'}
- {title: 'My Show S01E05 720p HDTV DD+5.1'}
- {title: 'My Show S01E05 720p HDTV DD+7.1'}
"""
def test_dd_audio_channels(self, execute_task):
task = execute_task('test_dd_audio_channels')
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+7.1')
assert entry, 'Entry "My Show S01E05 720p HDTV DD+7.1" should not have been rejected'
assert entry['quality'].audio == 'dd+5.1', 'audio "dd+7.1" should have been parsed as dd+5.1'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+5.0')
assert entry['quality'].audio == 'dd+5.1', 'audio "dd+5.0" should have been parsed as dd+5.1'
def test_dd_audio_min(self, execute_task):
task = execute_task('test_dd_audio_min')
assert len(task.rejected) == 1, 'should have rejected one'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+2.0')
assert entry, 'Entry "My Show S01E05 720p HDTV DD+2.0" should not have been rejected'
assert entry['quality'].audio == 'dd+5.1', 'audio should have been parsed as dd+5.1'
def test_dd_audio_max(self, execute_task):
task = execute_task('test_dd_audio_max')
assert len(task.rejected) == 2, 'should have rejected two'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD5.1')
assert entry, 'Entry "My Show S01E05 720p HDTV DD5.1" should not have been rejected'
assert entry['quality'].audio == 'dd5.1', 'audio should have been parsed as dd5.1'
|
py | 1a381cdd306a97f659d5ecd1c21f210d472af3b7 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import torchtext.data as data
from ..common.torchtext_test_case import TorchtextTestCase
class TestDataset(TorchtextTestCase):
def test_tabular_simple_data(self):
for data_format in ["csv", "tsv", "json"]:
self.write_test_ppid_dataset(data_format=data_format)
if data_format == "json":
question_field = data.Field(sequential=True)
label_field = data.Field(sequential=False)
fields = {"question1": ("q1", question_field),
"question2": ("q2", question_field),
"label": ("label", label_field)}
else:
question_field = data.Field(sequential=True)
label_field = data.Field(sequential=False)
fields = [("id", None), ("q1", question_field),
("q2", question_field), ("label", label_field)]
dataset = data.TabularDataset(
path=self.test_ppid_dataset_path, format=data_format, fields=fields)
assert len(dataset) == 3
expected_examples = [
(["When", "do", "you", "use", "シ", "instead", "of", "し?"],
["When", "do", "you", "use", "\"&\"",
"instead", "of", "\"and\"?"], "0"),
(["Where", "was", "Lincoln", "born?"],
["Which", "location", "was", "Abraham", "Lincoln", "born?"], "1"),
(["What", "is", "2+2"], ["2+2=?"], "1")]
# Ensure examples have correct contents / test __getitem__
for i in range(len(dataset)):
self.assertEqual(dataset[i].q1, expected_examples[i][0])
self.assertEqual(dataset[i].q2, expected_examples[i][1])
self.assertEqual(dataset[i].label, expected_examples[i][2])
# Test __getattr__
for i, (q1, q2, label) in enumerate(zip(dataset.q1, dataset.q2,
dataset.label)):
self.assertEqual(q1, expected_examples[i][0])
self.assertEqual(q2, expected_examples[i][1])
self.assertEqual(label, expected_examples[i][2])
# Test __iter__
for i, example in enumerate(dataset):
self.assertEqual(example.q1, expected_examples[i][0])
self.assertEqual(example.q2, expected_examples[i][1])
self.assertEqual(example.label, expected_examples[i][2])
def test_json_dataset_one_key_multiple_fields(self):
self.write_test_ppid_dataset(data_format="json")
question_field = data.Field(sequential=True)
spacy_tok_question_field = data.Field(sequential=True, tokenize="spacy")
label_field = data.Field(sequential=False)
fields = {"question1": [("q1", question_field),
("q1_spacy", spacy_tok_question_field)],
"question2": [("q2", question_field),
("q2_spacy", spacy_tok_question_field)],
"label": ("label", label_field)}
dataset = data.TabularDataset(
path=self.test_ppid_dataset_path, format="json", fields=fields)
expected_examples = [
(["When", "do", "you", "use", "シ", "instead", "of", "し?"],
["When", "do", "you", "use", "シ", "instead", "of", "し", "?"],
["When", "do", "you", "use", "\"&\"",
"instead", "of", "\"and\"?"],
["When", "do", "you", "use", "\"", "&", "\"",
"instead", "of", "\"", "and", "\"", "?"], "0"),
(["Where", "was", "Lincoln", "born?"],
["Where", "was", "Lincoln", "born", "?"],
["Which", "location", "was", "Abraham", "Lincoln", "born?"],
["Which", "location", "was", "Abraham", "Lincoln", "born", "?"],
"1"),
(["What", "is", "2+2"], ["What", "is", "2", "+", "2"],
["2+2=?"], ["2", "+", "2=", "?"], "1")]
for i, example in enumerate(dataset):
self.assertEqual(example.q1, expected_examples[i][0])
self.assertEqual(example.q1_spacy, expected_examples[i][1])
self.assertEqual(example.q2, expected_examples[i][2])
self.assertEqual(example.q2_spacy, expected_examples[i][3])
self.assertEqual(example.label, expected_examples[i][4])
def test_errors(self):
# Ensure that trying to retrieve a key not in JSON data errors
self.write_test_ppid_dataset(data_format="json")
question_field = data.Field(sequential=True)
label_field = data.Field(sequential=False)
fields = {"qeustion1": ("q1", question_field),
"question2": ("q2", question_field),
"label": ("label", label_field)}
with self.assertRaises(ValueError):
data.TabularDataset(
path=self.test_ppid_dataset_path, format="json", fields=fields)
def test_input_with_newlines_in_text(self):
# Smoke test for ensuring that TabularDataset works with files with newlines
example_with_newlines = [("\"hello \n world\"", "1"),
("\"there is a \n newline\"", "0"),
("\"there is no newline\"", "1")]
fields = [("text", data.Field(lower=True)),
("label", data.Field(sequential=False))]
for delim in [",", "\t"]:
with open(self.test_newline_dataset_path, "wt") as f:
for line in example_with_newlines:
f.write("{}\n".format(delim.join(line)))
format_ = "csv" if delim == "," else "tsv"
dataset = data.TabularDataset(
path=self.test_newline_dataset_path, format=format_, fields=fields)
# if the newline is not parsed correctly, this should raise an error
for example in dataset:
self.assert_(hasattr(example, "text"))
self.assert_(hasattr(example, "label"))
def test_csv_file_with_header(self):
example_with_header = [("text", "label"),
("HELLO WORLD", "0"),
("goodbye world", "1")]
TEXT = data.Field(lower=True, tokenize=lambda x: x.split())
fields = {
"label": ("label", data.Field(use_vocab=False,
sequential=False)),
"text": ("text", TEXT)
}
for format_, delim in zip(["csv", "tsv"], [",", "\t"]):
with open(self.test_has_header_dataset_path, "wt") as f:
for line in example_with_header:
f.write("{}\n".format(delim.join(line)))
# check that an error is raised here if a non-existent field is specified
with self.assertRaises(ValueError):
data.TabularDataset(
path=self.test_has_header_dataset_path, format=format_,
fields={"non_existent": ("label", data.Field())})
dataset = data.TabularDataset(
path=self.test_has_header_dataset_path, format=format_,
skip_header=False, fields=fields)
TEXT.build_vocab(dataset)
for i, example in enumerate(dataset):
self.assertEqual(example.text,
example_with_header[i + 1][0].lower().split())
self.assertEqual(example.label, example_with_header[i + 1][1])
# check that the vocabulary is built correctly (#225)
expected_freqs = {"hello": 1, "world": 2, "goodbye": 1, "text": 0}
for k, v in expected_freqs.items():
self.assertEqual(TEXT.vocab.freqs[k], v)
data_iter = data.Iterator(dataset, device=-1, batch_size=1,
sort_within_batch=False, repeat=False)
next(data_iter.__iter__())
def test_csv_file_no_header_one_col_multiple_fields(self):
self.write_test_ppid_dataset(data_format="csv")
question_field = data.Field(sequential=True)
spacy_tok_question_field = data.Field(sequential=True, tokenize="spacy")
label_field = data.Field(sequential=False)
# Field name/value as nested tuples
fields = [("ids", None),
(("q1", "q1_spacy"), (question_field, spacy_tok_question_field)),
(("q2", "q2_spacy"), (question_field, spacy_tok_question_field)),
("label", label_field)]
dataset = data.TabularDataset(
path=self.test_ppid_dataset_path, format="csv", fields=fields)
expected_examples = [
(["When", "do", "you", "use", "シ", "instead", "of", "し?"],
["When", "do", "you", "use", "シ", "instead", "of", "し", "?"],
["When", "do", "you", "use", "\"&\"",
"instead", "of", "\"and\"?"],
["When", "do", "you", "use", "\"", "&", "\"",
"instead", "of", "\"", "and", "\"", "?"], "0"),
(["Where", "was", "Lincoln", "born?"],
["Where", "was", "Lincoln", "born", "?"],
["Which", "location", "was", "Abraham", "Lincoln", "born?"],
["Which", "location", "was", "Abraham", "Lincoln", "born", "?"],
"1"),
(["What", "is", "2+2"], ["What", "is", "2", "+", "2"],
["2+2=?"], ["2", "+", "2=", "?"], "1")]
for i, example in enumerate(dataset):
self.assertEqual(example.q1, expected_examples[i][0])
self.assertEqual(example.q1_spacy, expected_examples[i][1])
self.assertEqual(example.q2, expected_examples[i][2])
self.assertEqual(example.q2_spacy, expected_examples[i][3])
self.assertEqual(example.label, expected_examples[i][4])
# 6 Fields including None for ids
assert len(dataset.fields) == 6
|
py | 1a381ceafbacd67bc57c618035e73056736f4b6a | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Copyright (c) 2020 The Supernode Coin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from io import BytesIO
from time import sleep
from test_framework.messages import CTransaction, CTxIn, CTxOut, COIN, COutPoint
from test_framework.mininode import network_thread_start
from test_framework.supernodecoin_node import SupernodeCoinTestNode
from test_framework.script import CScript, OP_CHECKSIG
from test_framework.test_framework import SupernodeCoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
p2p_port,
bytes_to_hex_str,
set_node_times,
sync_blocks,
sync_mempools,
)
# filter utxos based on first 5 bytes of scriptPubKey
def getDelegatedUtxos(utxos):
return [x for x in utxos if x["scriptPubKey"][:10] == '76a97b63d1']
class SupernodeCoin_ColdStakingTest(SupernodeCoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[]] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with PoW cache: 200 blocks
self._initialize_chain()
self.enable_mocktime()
def init_test(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
self.DEFAULT_FEE = 0.05
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(SupernodeCoinTestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start() # Start up network handling in another thread
# Let the test nodes get in sync
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack()
def setColdStakingEnforcement(self, fEnable=True):
sporkName = "SPORK_17_COLDSTAKING_ENFORCEMENT"
# update spork 17 with node[0]
if fEnable:
self.log.info("Enabling cold staking with SPORK 17...")
res = self.activate_spork(0, sporkName)
else:
self.log.info("Disabling cold staking with SPORK 17...")
res = self.deactivate_spork(0, sporkName)
assert_equal(res, "success")
sleep(1)
# check that node[1] receives it
assert_equal(fEnable, self.is_spork_active(1, sporkName))
self.log.info("done")
def isColdStakingEnforced(self):
# verify from node[1]
return self.is_spork_active(1, "SPORK_17_COLDSTAKING_ENFORCEMENT")
def run_test(self):
self.description = "Performs tests on the Cold Staking P2CS implementation"
self.init_test()
NUM_OF_INPUTS = 20
INPUT_VALUE = 249
# nodes[0] - coin-owner
# nodes[1] - cold-staker
# 1) nodes[0] and nodes[2] mine 25 blocks each
# --------------------------------------------
print("*** 1 ***")
self.log.info("Mining 50 Blocks...")
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pow(peer, self.mocktime)
sync_blocks(self.nodes)
# 2) node[1] sends his entire balance (50 mature rewards) to node[2]
# - node[2] stakes a block - node[1] locks the change
print("*** 2 ***")
self.log.info("Emptying node1 balance")
assert_equal(self.nodes[1].getbalance(), 50 * 250)
txid = self.nodes[1].sendtoaddress(self.nodes[2].getnewaddress(), (50 * 250 - 0.01))
assert (txid is not None)
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# lock the change output (so it's not used as stake input in generate_pos)
for x in self.nodes[1].listunspent():
assert (self.nodes[1].lockunspent(False, [{"txid": x['txid'], "vout": x['vout']}]))
# check that it cannot stake
sleep(1)
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
# 3) nodes[0] generates a owner address
# nodes[1] generates a cold-staking address.
# ---------------------------------------------
print("*** 3 ***")
owner_address = self.nodes[0].getnewaddress()
self.log.info("Owner Address: %s" % owner_address)
staker_address = self.nodes[1].getnewstakingaddress()
staker_privkey = self.nodes[1].dumpprivkey(staker_address)
self.log.info("Staking Address: %s" % staker_address)
# 4) Check enforcement.
# ---------------------
print("*** 4 ***")
# Check that SPORK 17 is disabled
assert (not self.isColdStakingEnforced())
self.log.info("Creating a stake-delegation tx before cold staking enforcement...")
assert_raises_rpc_error(-4, "Failed to accept tx in the memory pool (reason: cold-stake-inactive (code 16))\nTransaction canceled.",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, owner_address, False, False, True)
self.log.info("Good. Cold Staking NOT ACTIVE yet.")
# Enable SPORK
self.setColdStakingEnforcement()
# double check
assert (self.isColdStakingEnforced())
# 5) nodes[0] delegates a number of inputs for nodes[1] to stake em.
# ------------------------------------------------------------------
print("*** 5 ***")
self.log.info("First check warning when using external addresses...")
assert_raises_rpc_error(-5, "Only the owner of the key to owneraddress will be allowed to spend these coins",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT")
self.log.info("Good. Warning triggered.")
self.log.info("Now force the use of external address creating (but not sending) the delegation...")
res = self.nodes[0].rawdelegatestake(staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT", True)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now delegate with internal owner address..")
self.log.info("Try first with a value (0.99) below the threshold")
assert_raises_rpc_error(-8, "Invalid amount",
self.nodes[0].delegatestake, staker_address, 0.99, owner_address)
self.log.info("Nice. it was not possible.")
self.log.info("Then try (creating but not sending) with the threshold value (1.00)")
res = self.nodes[0].rawdelegatestake(staker_address, 1.00, owner_address)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now creating %d real stake-delegation txes..." % NUM_OF_INPUTS)
for i in range(NUM_OF_INPUTS):
res = self.nodes[0].delegatestake(staker_address, INPUT_VALUE, owner_address)
assert(res != None and res["txid"] != None and res["txid"] != "")
assert_equal(res["owner_address"], owner_address)
assert_equal(res["staker_address"], staker_address)
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
self.log.info("%d Txes created." % NUM_OF_INPUTS)
# check balances:
self.expected_balance = NUM_OF_INPUTS * INPUT_VALUE
self.expected_immature_balance = 0
self.checkBalances()
# 6) check that the owner (nodes[0]) can spend the coins.
# -------------------------------------------------------
print("*** 6 ***")
self.log.info("Spending back one of the delegated UTXOs...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_equal(NUM_OF_INPUTS, len(delegated_utxos))
assert_equal(len(delegated_utxos), len(self.nodes[0].listcoldutxos()))
u = delegated_utxos[0]
txhash = self.spendUTXOwithNode(u, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to spend - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after spend.
self.expected_balance -= float(u["amount"])
self.checkBalances()
self.log.info("Balances check out after spend")
assert_equal(NUM_OF_INPUTS-1, len(self.nodes[0].listcoldutxos()))
# 7) check that the staker CANNOT use the coins to stake yet.
# He needs to whitelist the owner first.
# -----------------------------------------------------------
print("*** 7 ***")
self.log.info("Trying to generate a cold-stake block before whitelisting the owner...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Nice. Cold staker was NOT able to create the block yet.")
self.log.info("Whitelisting the owner...")
ret = self.nodes[1].delegatoradd(owner_address)
assert(ret)
self.log.info("Delegator address %s whitelisted" % owner_address)
# 8) check that the staker CANNOT spend the coins.
# ------------------------------------------------
print("*** 8 ***")
self.log.info("Trying to spend one of the delegated UTXOs with the cold-staking key...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_greater_than(len(delegated_utxos), 0)
u = delegated_utxos[0]
assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed (Script failed an OP_CHECKCOLDSTAKEVERIFY operation",
self.spendUTXOwithNode, u, 1)
self.log.info("Good. Cold staker was NOT able to spend (failed OP_CHECKCOLDSTAKEVERIFY)")
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# 9) check that the staker can use the coins to stake a block with internal miner.
# --------------------------------------------------------------------------------
print("*** 9 ***")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], NUM_OF_INPUTS-1)
self.log.info("Generating one valid cold-stake block...")
self.mocktime = self.generate_pos(1, self.mocktime)
self.log.info("New block created by cold-staking. Trying to submit...")
newblockhash = self.nodes[1].getbestblockhash()
self.log.info("Block %s submitted" % newblockhash)
# Verify that nodes[0] accepts it
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(newblockhash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 10) check that the staker can use the coins to stake a block with a rawtransaction.
# ----------------------------------------------------------------------------------
print("*** 10 ***")
self.log.info("Generating another valid cold-stake block...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert(ret is None)
# Verify that nodes[0] accepts it
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(new_block.hash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
self.mocktime += 60
set_node_times(self.nodes, self.mocktime)
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 11) check that the staker cannot stake a block changing the coinstake scriptPubkey.
# ----------------------------------------------------------------------------------
print("*** 11 ***")
self.log.info("Generating one invalid cold-stake block (changing first coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block (with dummy key)
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, "")
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert("rejected" in ret)
# Verify that nodes[0] rejects it
sync_blocks(self.nodes)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 12) neither adding different outputs to the coinstake.
# ------------------------------------------------------
print("*** 12 ***")
self.log.info("Generating another invalid cold-stake block (adding coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
# Add output (dummy key address) to coinstake (taking 100 SUNO from the pot)
self.add_output_to_coinstake(new_block, 100)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert_equal(ret, "bad-p2cs-outs")
# Verify that nodes[0] rejects it
sync_blocks(self.nodes)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 13) Now node[0] gets mad and spends all the delegated coins, voiding the P2CS contracts.
# ----------------------------------------------------------------------------------------
self.log.info("Let's void the contracts.")
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
print("*** 13 ***")
self.log.info("Cancel the stake delegation spending the delegated utxos...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
# remove one utxo to spend later
final_spend = delegated_utxos.pop()
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void the stake delegations - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# deactivate SPORK 17 and check that the owner can still spend the last utxo
self.setColdStakingEnforcement(False)
assert (not self.isColdStakingEnforced())
txhash = self.spendUTXOsWithNode([final_spend], 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void a stake delegation (with SPORK 17 disabled) - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after big spend.
self.expected_balance = 0
self.checkBalances()
self.log.info("Balances check out after the delegations have been voided.")
# re-activate SPORK17
self.setColdStakingEnforcement()
assert (self.isColdStakingEnforced())
# 14) check that coinstaker is empty and can no longer stake.
# -----------------------------------------------------------
print("*** 14 ***")
self.log.info("Trying to generate one cold-stake block again...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Cigar. Cold staker was NOT able to create any more blocks.")
# 15) check balances when mature.
# -----------------------------------------------------------
print("*** 15 ***")
self.log.info("Staking 100 blocks to mature the cold stakes...")
for i in range(2):
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pos(peer, self.mocktime)
sync_blocks(self.nodes)
self.expected_balance = self.expected_immature_balance
self.expected_immature_balance = 0
self.checkBalances()
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert (txhash != None)
self.log.info("Good. Owner was able to spend the cold staked coins - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
self.expected_balance = 0
self.checkBalances()
def checkBalances(self):
w_info = self.nodes[0].getwalletinfo()
self.log.info("OWNER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_delegated_balance"]), self.expected_immature_balance)
assert_equal(float(w_info["cold_staking_balance"]), 0)
w_info = self.nodes[1].getwalletinfo()
self.log.info("STAKER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), 0)
assert_equal(float(w_info["cold_staking_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_cold_staking_balance"]), self.expected_immature_balance)
def spendUTXOwithNode(self, utxo, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE)
outputs = {}
outputs[new_addy] = out_amount
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def spendUTXOsWithNode(self, utxos, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = []
outputs = {}
outputs[new_addy] = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[new_addy] += float(utxo["amount"])
outputs[new_addy] -= self.DEFAULT_FEE
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def add_output_to_coinstake(self, block, value, peer=1):
coinstake = block.vtx[1]
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
coinstake.vout.append(
CTxOut(value * COIN, CScript([self.DUMMY_KEY.get_pubkey(), OP_CHECKSIG])))
coinstake.vout[1].nValue -= value * COIN
# re-sign coinstake
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstake.vin[0] = CTxIn(prevout)
stake_tx_signed_raw_hex = self.nodes[peer].signrawtransaction(
bytes_to_hex_str(coinstake.serialize()))['hex']
block.vtx[1] = CTransaction()
block.vtx[1].from_hex(stake_tx_signed_raw_hex)
# re-sign block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.re_sign_block()
if __name__ == '__main__':
SupernodeCoin_ColdStakingTest().main()
|
py | 1a381cffece244fa67a66b55ab568cc2b812dbf2 | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.12.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ProcessorsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def clear_state(self, id, **kwargs):
"""
Clears the state for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.clear_state(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ComponentStateEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.clear_state_with_http_info(id, **kwargs)
else:
(data) = self.clear_state_with_http_info(id, **kwargs)
return data
def clear_state_with_http_info(self, id, **kwargs):
"""
Clears the state for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.clear_state_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ComponentStateEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method clear_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `clear_state`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}/state/clear-requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ComponentStateEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_processor(self, id, **kwargs):
"""
Deletes a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_processor(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param str version: The revision is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_processor_with_http_info(id, **kwargs)
else:
(data) = self.delete_processor_with_http_info(id, **kwargs)
return data
def delete_processor_with_http_info(self, id, **kwargs):
"""
Deletes a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_processor_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param str version: The revision is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'version', 'client_id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_processor" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_processor`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'version' in params:
query_params.append(('version', params['version']))
if 'client_id' in params:
query_params.append(('clientId', params['client_id']))
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_processor(self, id, **kwargs):
"""
Gets a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_processor_with_http_info(id, **kwargs)
else:
(data) = self.get_processor_with_http_info(id, **kwargs)
return data
def get_processor_with_http_info(self, id, **kwargs):
"""
Gets a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_processor" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_processor`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_processor_diagnostics(self, id, **kwargs):
"""
Gets diagnostics information about a processor
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_diagnostics(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_processor_diagnostics_with_http_info(id, **kwargs)
else:
(data) = self.get_processor_diagnostics_with_http_info(id, **kwargs)
return data
def get_processor_diagnostics_with_http_info(self, id, **kwargs):
"""
Gets diagnostics information about a processor
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_diagnostics_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_processor_diagnostics" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_processor_diagnostics`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}/diagnostics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_processor_run_status_details(self, **kwargs):
"""
Submits a query to retrieve the run status details of all processors that are in the given list of Processor IDs
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_run_status_details(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RunStatusDetailsRequestEntity body: The request for the processors that should be included in the results
:return: ProcessorsRunStatusDetailsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_processor_run_status_details_with_http_info(**kwargs)
else:
(data) = self.get_processor_run_status_details_with_http_info(**kwargs)
return data
def get_processor_run_status_details_with_http_info(self, **kwargs):
"""
Submits a query to retrieve the run status details of all processors that are in the given list of Processor IDs
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_run_status_details_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RunStatusDetailsRequestEntity body: The request for the processors that should be included in the results
:return: ProcessorsRunStatusDetailsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_processor_run_status_details" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/run-status-details/queries', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorsRunStatusDetailsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_property_descriptor(self, id, property_name, **kwargs):
"""
Gets the descriptor for a processor property
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_property_descriptor(id, property_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param str property_name: The property name. (required)
:param str client_id: If the client id is not specified, new one will be generated. This value (whether specified or generated) is included in the response.
:return: PropertyDescriptorEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_property_descriptor_with_http_info(id, property_name, **kwargs)
else:
(data) = self.get_property_descriptor_with_http_info(id, property_name, **kwargs)
return data
def get_property_descriptor_with_http_info(self, id, property_name, **kwargs):
"""
Gets the descriptor for a processor property
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_property_descriptor_with_http_info(id, property_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param str property_name: The property name. (required)
:param str client_id: If the client id is not specified, new one will be generated. This value (whether specified or generated) is included in the response.
:return: PropertyDescriptorEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'property_name', 'client_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_property_descriptor" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_property_descriptor`")
# verify the required parameter 'property_name' is set
if ('property_name' not in params) or (params['property_name'] is None):
raise ValueError("Missing the required parameter `property_name` when calling `get_property_descriptor`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'client_id' in params:
query_params.append(('clientId', params['client_id']))
if 'property_name' in params:
query_params.append(('propertyName', params['property_name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}/descriptors', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PropertyDescriptorEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_state(self, id, **kwargs):
"""
Gets the state for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_state(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ComponentStateEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_state_with_http_info(id, **kwargs)
else:
(data) = self.get_state_with_http_info(id, **kwargs)
return data
def get_state_with_http_info(self, id, **kwargs):
"""
Gets the state for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_state_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ComponentStateEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_state`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}/state', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ComponentStateEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def terminate_processor(self, id, **kwargs):
"""
Terminates a processor, essentially \"deleting\" its threads and any active tasks
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.terminate_processor(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.terminate_processor_with_http_info(id, **kwargs)
else:
(data) = self.terminate_processor_with_http_info(id, **kwargs)
return data
def terminate_processor_with_http_info(self, id, **kwargs):
"""
Terminates a processor, essentially \"deleting\" its threads and any active tasks
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.terminate_processor_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method terminate_processor" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `terminate_processor`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}/threads', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_processor(self, id, body, **kwargs):
"""
Updates a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_processor(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param ProcessorEntity body: The processor configuration details. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_processor_with_http_info(id, body, **kwargs)
else:
(data) = self.update_processor_with_http_info(id, body, **kwargs)
return data
def update_processor_with_http_info(self, id, body, **kwargs):
"""
Updates a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_processor_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param ProcessorEntity body: The processor configuration details. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_processor" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_processor`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_processor`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_run_status(self, id, body, **kwargs):
"""
Updates run status of a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_run_status(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param ProcessorRunStatusEntity body: The processor run status. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_run_status_with_http_info(id, body, **kwargs)
else:
(data) = self.update_run_status_with_http_info(id, body, **kwargs)
return data
def update_run_status_with_http_info(self, id, body, **kwargs):
"""
Updates run status of a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_run_status_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param ProcessorRunStatusEntity body: The processor run status. (required)
:return: ProcessorEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_run_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_run_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_run_status`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/processors/{id}/run-status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 1a381da11e3ad72cb81052cc7c61e52f2854ae7c | def split_and_join(line):
split_list = line.split(" ") # String split will split the string into List.
join_str = "-".join(split_list) # Joins the list with the given string and returns a single string.
return join_str
if __name__ == '__main__':
line = input()
result = split_and_join(line) # Calling split_and_join to split using str.spli() and join using str.join()
print(result) |
py | 1a381f7f92f79a6805733d507344df2b2e093841 | from distutils.core import setup
setup(
name='python3-logstash',
packages=['logstash'],
version='0.4.7',
description='Python logging handler for Logstash.',
long_description=open('README.md').read(),
author='Israel Flores',
author_email='[email protected]',
url='https://github.com/israel-fl/python3-logstash',
classifiers=[
'Development Status :: 1 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Logging',
]
)
|
py | 1a381fa18e4a3d06ae85ab527ef516dd9e2c5c93 | #!/usr/bin/env python3
"""
Exercise 32: Flip a Dict
"Flip" a dictionary by swapping keys and values.
"""
def flip(dictionary):
return {v: k
for k, v in dictionary.items()
}
if __name__ == '__main__':
d = {'a': 1, 'b': 2, 'c': 3}
print(f'{d} -> {flip(d)}')
|
py | 1a381fb75e33547ba3c2149596fcb738b0bead2c | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper limit on word perplexity for kenlm ngram models
Command : python3 compute_upper_ppl_kenlm.py --vocab_file [...] --kenlm_preds [...]
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy
from utils import transform_asg
def compute_upper_limit_ppl_for_kenlm(known_words_file, kenlm_file):
with open(known_words_file, "r") as f:
known_words = set(list(map(transform_asg, f.readline().strip().split(" "))))
with open(kenlm_file, "r") as f:
sum_logp = 0
sum_logp_unk = 0
n_words = 0
n_words_unk = 0
n_letters = 0
for line in f:
if "Total" not in line:
continue
line = line.strip().split("\t")
word = ""
word_logp = 0
for token in line:
token_val = token.split("=")[0]
logp = float(token.split(" ")[-1])
if token_val == "|":
if word in known_words:
sum_logp += word_logp + numpy.log(numpy.power(10, logp))
n_words += 1
else:
sum_logp_unk += word_logp + numpy.log(numpy.power(10, logp))
n_words_unk += 1
word = ""
word_logp = 0
elif token_val == "</s>":
sum_logp += numpy.log(numpy.power(10, logp))
n_words += 1
else:
word += token_val
word_logp += numpy.log(numpy.power(10, logp))
n_letters += 1
if token_val == "</s>":
break
loss_letter = -(sum_logp + sum_logp_unk) / n_letters
ppl_word_no_unk = numpy.exp(-sum_logp / n_words)
ppl_word_unk = numpy.exp(-sum_logp_unk / n_words_unk)
ppl_word = numpy.exp(-(sum_logp + sum_logp_unk) / (n_words + n_words_unk))
print(
"Letter loss: {}, letter perplexity: {}".format(
loss_letter, numpy.exp(loss_letter)
)
)
print("Upper word perplexity for all words: {}".format(ppl_word))
print("Upper word perplexity for unknown words: {}".format(ppl_word_unk))
print(
"(Reported in the paper) "
"Upper word perplexity for known words: {}".format(ppl_word_no_unk)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper limit on word perplexity for kenlm predictions"
)
parser.add_argument(
"--vocab_file",
help="vocabulary of known words, use file "
"from --limit_vocab_file during word kenLM training.",
)
parser.add_argument(
"--kenlm_preds", help="file with kenlm predictions after query run"
)
args = parser.parse_args()
print("Evaluate file {}".format(args.kenlm_preds))
compute_upper_limit_ppl_for_kenlm(args.vocab_file, args.kenlm_preds)
|
py | 1a381fcd1c4946e470c2286c56c7ebec679894bb | # Generated by Django 4.0.3 on 2022-04-06 04:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_rename_tutor_name_course_extra_txt_and_more'),
]
operations = [
migrations.RemoveField(
model_name='profileuser',
name='shop_courses',
),
migrations.RemoveField(
model_name='user',
name='initial_test_performed',
),
migrations.CreateModel(
name='ViewVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.video')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': ('created',),
'abstract': False,
},
),
]
|
py | 1a381ff271df85fe15b83790c71f05e350617f0c | from functools import wraps
from typing import Optional
def _embed_ipython_shell(ns: Optional[dict] = None):
if ns is None:
ns = {}
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
@wraps(_embed_ipython_shell)
def wrapper(namespace=ns, banner=''):
config = load_default_config()
InteractiveShellEmbed.clear_instance()
shell = InteractiveShellEmbed.instance(
banner1=banner, user_ns=namespace, config=config
)
shell()
return wrapper
def start_python_console(namespace: Optional[dict] = None, banner: str = ''):
if namespace is None:
namespace = {}
try:
shell = _embed_ipython_shell()
shell(namespace, banner)
except SystemExit: # raised when invoking exit() hence safe to ignore
pass
|
py | 1a38200afca8b22d06ea1de1d45591b368d2b986 | import argparse
import os
import random
import string
import time
from abc import ABC, abstractmethod
from diffimg import diff
from selenium import webdriver
from selenium.webdriver import ChromeOptions, FirefoxOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
class UITester(ABC):
# This image diff threshold is set to an upper bound of 10% for now. We should try our best
# to at least maintain this upper bound.
_SCREENSHOT_DIFF_THRESHOLD_PERCENT = 10
_BASE_PATH = os.path.dirname(os.path.abspath(__file__))
_DEFAULT_USERNAME = os.getenv('CODALAB_USERNAME', 'codalab')
_DEFAULT_PASSWORD = os.getenv('CODALAB_PASSWORD', 'codalab')
def __init__(self, test_name, base_url='http://localhost'):
self._test_name = test_name
self._base_url = base_url
@abstractmethod
def test(self):
pass
def run(self):
def add_headless(browser_options):
if args.headless:
browser_options.add_argument('--headless')
# Test Chrome
options = ChromeOptions()
add_headless(options)
self.browser = webdriver.Chrome(chrome_options=options)
self.test()
self.browser.close()
# Test Firefox
options = FirefoxOptions()
add_headless(options)
self.browser = webdriver.Firefox(log_path='', firefox_options=options)
self.test()
self.browser.close()
def login(self, username='codalab', password='codalab'):
self.browser.get(self.get_url('/home'))
self.click(By.LINK_TEXT, 'LOGIN')
self.fill_field(By.ID, 'id_login', username)
self.fill_field(By.ID, 'id_password', password, press_enter=True)
def add_run_to_worksheet(self, command, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# ar = Add a new run
self.send_keyboard_shortcut('ar')
else:
self.click(By.CSS_SELECTOR, '[aria-label="Add New Run"]')
self.pause()
self.scroll_to_bottom('worksheet_container')
active_textbox = self.browser.switch_to.active_element
active_textbox.send_keys(command)
self.pause()
if use_keyboard_shortcut:
self.save_edit_keyboard_shortcut(active_textbox)
else:
self.click(By.XPATH, "//span[.='Confirm']")
self.longer_pause()
def rerun_last_bundle(self, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# Shift + g = Jump to the last bundle
self.send_keyboard_shortcut(Keys.SHIFT + 'g')
# Enter = Expand bundle
self.send_keyboard_shortcut(Keys.ENTER)
# an = Edit and add a rerun
# This keyboard shortcut only works if the bundle is expanded.
self.send_keyboard_shortcut('an')
else:
self.expand_last_bundle()
self.scroll_to_bottom('worksheet_container')
self.click(By.XPATH, "//span[.='Edit and Rerun']")
self.pause()
active_textbox = self.browser.switch_to.active_element
active_textbox.send_keys(' rerunning bundle...')
if use_keyboard_shortcut:
self.save_edit_keyboard_shortcut(active_textbox)
else:
self.scroll_to_bottom('worksheet_container')
self.click(By.XPATH, "//span[.='Confirm']")
self.longer_pause()
def edit_last_bundle_metadata(self, name, description, permission):
def edit_field(field, text):
field.click()
self.browser.switch_to.active_element.send_keys(text)
self.browser.switch_to.active_element.send_keys(Keys.ENTER)
# Edit name and description
self.expand_last_bundle()
editable_fields = self.browser.find_elements(By.CLASS_NAME, 'editable-field')
edit_field(editable_fields[-2], name)
edit_field(editable_fields[-1], description)
# Edit bundle permission
self.scroll_to_bottom('worksheet_container')
self.browser.find_elements_by_tag_name('svg')[-1].click()
select_boxes = self.browser.find_elements_by_tag_name('select')
self.select_option(select_boxes[-1], permission)
self.longer_pause()
def toggle_web_terminal(self, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# Shift + c = Show/hide web terminal
self.send_keyboard_shortcut(Keys.SHIFT + 'c')
else:
self.browser.find_element_by_id('terminal-button').click()
self.pause()
def edit_source(self, text, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# Shift + e = Edit source mode
self.send_keyboard_shortcut(Keys.SHIFT + 'e')
else:
self.click(By.CSS_SELECTOR, '[aria-label="Edit Source"]')
source_field = self.browser.switch_to.active_element
source_field.send_keys(Keys.ENTER + Keys.ENTER)
source_field.send_keys(text)
if use_keyboard_shortcut:
self.pause()
self.save_edit_keyboard_shortcut(source_field)
else:
self.click(By.CSS_SELECTOR, '[aria-label="Save Edit"]')
self.longer_pause()
def expand_last_bundle(self):
self.scroll_to_bottom('worksheet_container')
self.browser.find_elements_by_tag_name('button')[-1].click()
self.pause()
def add_text_to_worksheet(self, text, use_keyboard_shortcut=False):
if use_keyboard_shortcut:
# at = Add text
self.send_keyboard_shortcut('at')
else:
self.click(By.CSS_SELECTOR, '[aria-label="Add Text"]')
self.pause()
self.scroll_to_bottom('worksheet_container')
last_text_box = self.browser.find_elements_by_tag_name('textarea')[-1]
self.focus_and_send_keys(last_text_box, text)
if use_keyboard_shortcut:
self.save_edit_keyboard_shortcut(last_text_box)
else:
self.click(By.XPATH, "//span[.='Save']")
self.pause()
def save_edit_keyboard_shortcut(self, element):
# Control + Enter = Save current edit
webdriver.ActionChains(self.browser).move_to_element(element).key_down(
Keys.CONTROL
).key_down(Keys.ENTER).key_up(Keys.ENTER).key_up(Keys.CONTROL).perform()
def refresh_worksheet(self):
# Shift + r = Refresh worksheet
self.send_keyboard_shortcut(Keys.SHIFT + 'r')
def pause(self):
time.sleep(1)
def longer_pause(self):
time.sleep(3)
def set_browser_size(self, width=1500, height=1200):
self.browser.set_window_position(0, 0)
self.browser.set_window_size(width, height)
def click(self, by, selector):
self.browser.find_element(by, selector).click()
def focus_and_send_keys(self, element, keys):
webdriver.ActionChains(self.browser).move_to_element(element).send_keys(keys).perform()
def send_keyboard_shortcut(self, keys):
self.browser.find_element(By.TAG_NAME, 'html').send_keys(keys)
def fill_field(self, by, selector, text, press_enter=False):
textbox = self.browser.find_element(by, selector)
textbox.send_keys(text)
if press_enter:
textbox.send_keys(Keys.ENTER)
def wait_until_worksheet_content_loads(self):
self.wait_until_page_loads('ws-item')
# Wait until placeholder items have been resolved.
by = By.CLASS_NAME
selector = "codalab-item-placeholder"
timeout_message = 'Timed out while waiting for {}: {} to be hidden.'.format(by, selector)
WebDriverWait(self.browser, 300).until(
EC.invisibility_of_element_located((by, selector)), message=timeout_message
)
def wait_until_page_loads(self, selector, by=By.CLASS_NAME):
timeout_message = 'Timed out while waiting for {}: {}.'.format(by, selector)
return WebDriverWait(self.browser, 15).until(
EC.presence_of_element_located((by, selector)), message=timeout_message
)
def switch_to_new_tab(self):
# Just give enough time for the new tab to get opened
self.pause()
self.browser.switch_to.window(
self.browser.window_handles[len(self.browser.window_handles) - 1]
)
def select_option(self, element, to_select):
for option in element.find_elements_by_tag_name('option'):
if option.text in to_select:
option.click()
break
def constructPartialSelector(self, by, partial_selector):
return '//*[contains(@{}, "{}")]'.format(by, partial_selector)
def output_images(self, selector, num_of_screenshots=10):
output_dir = self._get_output_dir('out')
element = "document.getElementById('{}')".format(selector)
scroll_height = float(self.browser.execute_script('return {}.scrollHeight'.format(element)))
for i in range(num_of_screenshots):
y = (i / num_of_screenshots) * scroll_height
self.browser.execute_script('{}.scrollTo(0, {})'.format(element, y))
path = os.path.join(output_dir, '{}{}.png'.format(self._test_name, i + 1))
self.browser.save_screenshot(path)
def compare_to_baselines(self, num_of_screenshots=10):
out_dir = self._get_output_dir('out')
baselines_dir = self._get_output_dir('baselines')
diff_dir = self._get_output_dir('diff')
has_failed = False
for i in range(num_of_screenshots):
screenshot_filename = '{}{}.png'.format(self._test_name, i + 1)
out_img = os.path.join(out_dir, screenshot_filename)
baseline_img = os.path.join(baselines_dir, screenshot_filename)
diff_img = os.path.join(diff_dir, screenshot_filename)
diff_percent = (
diff(baseline_img, out_img, delete_diff_file=True, ignore_alpha=True) * 100
)
print(
'{}% difference in {} for {}'.format(
diff_percent, self._get_browser_name(), screenshot_filename
)
)
if diff_percent > UITester._SCREENSHOT_DIFF_THRESHOLD_PERCENT:
# If an image comparison has failed, generate diff and print an error message in red
has_failed = True
diff(
out_img,
baseline_img,
delete_diff_file=False,
diff_img_file=diff_img,
ignore_alpha=True,
)
print(
'\033[91mScreenshot comparison failed in {} for {} by {}%\033[0m'.format(
self._get_browser_name(), screenshot_filename, diff_percent
)
)
assert not has_failed
def get_url(self, path):
return '{}/{}'.format(self._base_url, path)
def make_name_unique(self, name):
# Appends some unique identifier to the string input
random_id = ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(16)
)
return name + random_id
def scroll_to_bottom(self, selector):
element = "document.getElementById('{}')".format(selector)
scroll_height = float(self.browser.execute_script('return {}.scrollHeight'.format(element)))
self.browser.execute_script('{}.scrollTo(0, {})'.format(element, scroll_height))
def _get_partial_matched_elements(self, by, selector):
return self.browser.find_elements(By.XPATH, self.constructPartialSelector(by, selector))
def _get_output_dir(self, folder_name):
def create_path(path):
if not os.path.isdir(path):
os.mkdir(path)
output_dir = os.path.join(UITester._BASE_PATH, folder_name)
create_path(output_dir)
output_dir = os.path.join(output_dir, self._test_name)
create_path(output_dir)
output_dir = os.path.join(output_dir, self._get_browser_name())
create_path(output_dir)
return output_dir
def _get_browser_name(self):
return self.browser.capabilities['browserName']
class WorksheetTest(UITester):
def __init__(self):
super().__init__('worksheet')
def test(self):
self.login()
self.wait_until_worksheet_content_loads()
# wait for small worksheet to be resolved from place holder item
by = By.LINK_TEXT
selector = "Small Worksheet [cl_small_worksheet]"
timeout_message = 'Timed out while waiting for {}: {}.'.format(by, selector)
WebDriverWait(self.browser, 10).until(
EC.presence_of_element_located((by, selector)), message=timeout_message
)
self.click(By.LINK_TEXT, 'Small Worksheet [cl_small_worksheet]')
self.switch_to_new_tab()
self.wait_until_worksheet_content_loads()
self.output_images('worksheet_container')
self.compare_to_baselines()
class EditWorksheetTest(UITester):
def __init__(self):
super().__init__('edit_worksheet')
def test(self):
self.set_browser_size()
self.login()
self.wait_until_worksheet_content_loads()
# Create a new worksheet
self.click(By.XPATH, '//*[@title="New Worksheet"]')
self.fill_field(By.ID, 'name', self.make_name_unique('test-worksheet'))
self.browser.find_element(By.XPATH, "//span[.='Confirm']").find_element(
By.XPATH, './..'
).click()
self.longer_pause()
# Add a title to the worksheet
self.click(By.CLASS_NAME, 'editable-field')
self.browser.switch_to.active_element.send_keys(
'Some Random Title for the UI Test Edit Worksheet in CodaLab'
)
self.browser.switch_to.active_element.send_keys(Keys.ENTER)
# Add text to the new worksheet
self.add_text_to_worksheet('This is some text. ' * 25)
# Add a bundle and rerun it
self.add_run_to_worksheet('echo hello')
self.rerun_last_bundle()
# Edit metadata of the last bundle
self.edit_last_bundle_metadata(
'New Name Given to this Bundle', 'New Description given to this bundle. ' * 5, 'none'
)
# Test keyboard shortcuts
self.add_run_to_worksheet('echo goodbye', use_keyboard_shortcut=True)
self.rerun_last_bundle(use_keyboard_shortcut=True)
# Select the last two bundles and delete them
# shift + g = Jump to the last bundle
self.send_keyboard_shortcut(Keys.SHIFT + 'g')
# x = Select the bundle row
self.send_keyboard_shortcut('x')
self.send_keyboard_shortcut(Keys.ARROW_UP)
self.send_keyboard_shortcut('x')
# Backspace = Attempt to delete the selected bundles
self.send_keyboard_shortcut(Keys.BACKSPACE)
self.browser.find_elements_by_tag_name('button')[-1].click()
# Wait for bundles to be deleted before proceeding
self.longer_pause()
# Add some more text via keyboard shortcuts
self.add_text_to_worksheet('Some more text. ' * 25, use_keyboard_shortcut=True)
# Edit source
self.edit_source('The End.', use_keyboard_shortcut=True)
# Refresh the page to ensure that new changes are persisted
self.browser.refresh()
self.wait_until_worksheet_content_loads()
self.toggle_web_terminal(use_keyboard_shortcut=True)
self.refresh_worksheet()
# Take screenshots and compare to the existing baseline images
num_of_screenshots = 1
self.output_images('worksheet_container', num_of_screenshots)
self.compare_to_baselines(num_of_screenshots)
def main():
# Add UI tests to the list to run them
all_tests = [
WorksheetTest(),
# TODO: this test is failing intermittently in GHA. Disabling for now.
# EditWorksheetTest()
]
start_time = time.time()
for test in all_tests:
test.run()
duration_seconds = time.time() - start_time
print('Success.')
print('\n--- Completion Time: {} minutes---'.format(duration_seconds / 60))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run frontend automation tests for the CodaLab UI')
parser.add_argument(
'--headless', action='store_true', help='Whether to test using headless browsers'
)
args = parser.parse_args()
main()
|
py | 1a3820362a980e358cd25d7ee2adc5ad877bd92f | import pytest
from shift_array import insert_shift_array
def test_insert_shift_array_even():
""" test the insert_shift_array function with an even # of elements"""
output_array = insert_shift_array([1,2,3,4], 5)
assert output_array[2] == 5
def test_insert_shift_array_odd():
""" test the insert_shift_array function with an odd # of elements"""
output_array = insert_shift_array([4,8,15,23,42], 16)
assert output_array[3] == 16
|
py | 1a38216c29f0f5362e0641bbf898b8d85bcefeab | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Cedicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import CedicoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
SEGWIT_HEIGHT = 120
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_block(block) if with_witness else msg_no_witness_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(CedicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight=-1"]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_no_witness_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_no_witness_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == 1
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older cedicoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let cedicoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(block_2.serialize().hex())
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert len(witness_program) == MAX_PROGRAM_LENGTH
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 1 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)])
connect_nodes(self.nodes[0], 2)
self.sync_blocks()
# Make sure that this peer thinks segwit has activated.
assert softfork_active(self.nodes[2], 'segwit')
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
if __name__ == '__main__':
SegWitTest().main()
|
py | 1a3821b26ef0ae55024ed8a2df0c698b20384ed6 | from string import punctuation, digits
import numpy as np
import random
# Part I
#pragma: coderesponse template
def get_order(n_samples):
try:
with open(str(n_samples) + '.txt') as fp:
line = fp.readline()
return list(map(int, line.split(',')))
except FileNotFoundError:
random.seed(1)
indices = list(range(n_samples))
random.shuffle(indices)
return indices
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_single(feature_vector, label, theta, theta_0):
"""
Finds the hinge loss on a single data point given specific classification
parameters.
Args:
feature_vector - A numpy array describing the given data point.
label - A real valued number, the correct classification of the data
point.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given data point and parameters.
"""
# Your code here
""" My solution:
return np.maximum(0, 1 - label * (np.dot(feature_vector, theta) + theta_0))
"""
# Instructor's solution: (same)
y = theta @ feature_vector + theta_0
return max(0, 1 - y * label)
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_full(feature_matrix, labels, theta, theta_0):
"""
Finds the total hinge loss on a set of data given specific classification
parameters.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given dataset and parameters. This number should be the average hinge
loss across all of the points in the feature matrix.
"""
# Your code here
""" My solution:
k = len(feature_matrix)
total = 0
for i in range(k):
total += hinge_loss_single(feature_matrix[i], labels[i], theta, theta_0)
return total / k
"""
# Instructor's solution: (same, though much cleaner)
ys = feature_matrix @ theta + theta_0
loss = np.maximum(1 - ys * labels, np.zeros(len(labels)))
return np.mean(loss)
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron_single_step_update(
feature_vector,
label,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the perceptron algorithm.
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
current_theta - The current theta being used by the perceptron
algorithm before this update.
current_theta_0 - The current theta_0 being used by the perceptron
algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
""" My solution:
epsilon = 1e-9
if label * (np.dot(current_theta, feature_vector) + current_theta_0) \
<= epsilon:
theta = current_theta + label * feature_vector
theta_0 = current_theta_0 + label
return theta, theta_0
else:
return current_theta, current_theta_0
"""
# Instructor's solution: (same)
if label * (np.dot(current_theta, feature_vector) + current_theta_0) <= 1e-7:
return (current_theta + label * feature_vector, current_theta_0 + label)
return (current_theta, current_theta_0)
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron(feature_matrix, labels, T):
"""
Runs the full perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
theta, the linear classification parameter, after T iterations through the
feature matrix and the second element is a real number with the value of
theta_0, the offset classification parameter, after T iterations through
the feature matrix.
"""
# Your code here
""" My solution:
n = len(feature_matrix[0])
theta = np.zeros(n)
theta_0 = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i],
labels[i],
theta, theta_0)
return theta, theta_0
"""
# Instructor's solution: (same)
(nsamples, nfeatures) = feature_matrix.shape
theta = np.zeros(nfeatures)
theta_0 = 0.0
for t in range(T):
for i in get_order(nsamples):
theta, theta_0 = perceptron_single_step_update(
feature_matrix[i], labels[i], theta, theta_0)
return (theta, theta_0)
#pragma: coderesponse end
#pragma: coderesponse template
def average_perceptron(feature_matrix, labels, T):
"""
Runs the average perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
the average theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the average theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
Hint: It is difficult to keep a running average; however, it is simple to
find a sum and divide.
"""
# Your code here
""" My solution:
n = len(feature_matrix[0])
k = len(get_order(feature_matrix.shape[0]))
theta = np.zeros(n)
theta_0 = 0
theta_sum = np.zeros(n)
theta_0_sum = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i],
labels[i],
theta, theta_0)
theta_sum += theta
theta_0_sum += theta_0
return theta_sum/(k*T), theta_0_sum/(k*T)
"""
# Instructor's solution: (same)
(nsamples, nfeatures) = feature_matrix.shape
theta = np.zeros(nfeatures)
theta_sum = np.zeros(nfeatures)
theta_0 = 0.0
theta_0_sum = 0.0
for t in range(T):
for i in get_order(nsamples):
theta, theta_0 = perceptron_single_step_update(
feature_matrix[i], labels[i], theta, theta_0)
theta_sum += theta
theta_0_sum += theta_0
return (theta_sum / (nsamples * T), theta_0_sum / (nsamples * T))
#pragma: coderesponse end
#pragma: coderesponse template
def pegasos_single_step_update(
feature_vector,
label,
L,
eta,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the Pegasos algorithm
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
L - The lamba value being used to update the parameters.
eta - Learning rate to update parameters.
current_theta - The current theta being used by the Pegasos
algorithm before this update.
current_theta_0 - The current theta_0 being used by the
Pegasos algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
""" My solution:
epsilon = 1e-7
if label * (np.dot(current_theta, feature_vector) + current_theta_0) - 1 \
<= epsilon:
theta = (1 - eta*L)*current_theta + eta*label*feature_vector
theta_0 = current_theta_0 + eta*label
else:
theta = (1 - eta*L)*current_theta
theta_0 = current_theta_0
return theta, theta_0
"""
# Instructor's solution: (uses 0 instead of epsilon)
mult = 1 - (eta * L)
if label * (np.dot(feature_vector, current_theta) + current_theta_0) <= 1:
return ((mult * current_theta) + (eta * label * feature_vector),
(current_theta_0) + (eta * label))
return (mult * current_theta, current_theta_0)
#pragma: coderesponse template
def pegasos(feature_matrix, labels, T, L):
"""
Runs the Pegasos algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
For each update, set learning rate = 1/sqrt(t),
where t is a counter for the number of updates performed so far (between 1
and nT inclusive).
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the algorithm
should iterate through the feature matrix.
L - The lamba value being used to update the Pegasos
algorithm parameters.
Returns: A tuple where the first element is a numpy array with the value of
the theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
"""
# Your code here
""" My solution:
n = len(feature_matrix[0])
theta = np.zeros(n)
theta_0 = 0
time = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
time += 1
eta = 1 / np.sqrt(time)
theta, theta_0 = pegasos_single_step_update(feature_matrix[i],
labels[i], L, eta,
theta, theta_0)
return theta, theta_0
"""
# Instructor's solution: (same)
(nsamples, nfeatures) = feature_matrix.shape
theta = np.zeros(nfeatures)
theta_0 = 0
count = 0
for t in range(T):
for i in get_order(nsamples):
count += 1
eta = 1.0 / np.sqrt(count)
(theta, theta_0) = pegasos_single_step_update(feature_matrix[i],
labels[i], L, eta,
theta, theta_0)
return (theta, theta_0)
#pragma: coderesponse end
# Part II
#pragma: coderesponse template
def classify(feature_matrix, theta, theta_0):
"""
A classification function that uses theta and theta_0 to classify a set of
data points.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
theta - A numpy array describing the linear classifier.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A numpy array of 1s and -1s where the kth element of the array is
the predicted classification of the kth row of the feature matrix using the
given theta and theta_0. If a prediction is GREATER THAN zero, it should
be considered a positive classification.
"""
# Your code here
""" My solution:
n = len(feature_matrix)
predictions = np.zeros(n)
for k in range(n):
if (np.dot(theta, feature_matrix[k]) + theta_0) > 0:
predictions[k] = 1
else:
predictions[k] = -1
return predictions
"""
# Instructor's solution: (MUCH cleaner!)
return (feature_matrix @ theta + theta_0 > 1e-7) * 2.0 - 1
#pragma: coderesponse end
#pragma: coderesponse template
def classifier_accuracy(
classifier,
train_feature_matrix,
val_feature_matrix,
train_labels,
val_labels,
**kwargs):
"""
Trains a linear classifier and computes accuracy.
The classifier is trained on the train data. The classifier's
accuracy on the train and validation data is then returned.
Args:
classifier - A classifier function that takes arguments
(feature matrix, labels, **kwargs) and returns (theta, theta_0)
train_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
val_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
train_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the training
feature matrix.
val_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the validation
feature matrix.
**kwargs - Additional named arguments to pass to the classifier
(e.g. T or L)
Returns: A tuple in which the first element is the (scalar) accuracy of the
trained classifier on the training data and the second element is the
accuracy of the trained classifier on the validation data.
"""
# Your code here
""" My solution:
theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)
train_preds = classify(train_feature_matrix, theta, theta_0)
train_accuracy = accuracy(train_preds, train_labels)
val_preds = classify(val_feature_matrix, theta, theta_0)
val_accuracy = accuracy(val_preds, val_labels)
return train_accuracy, val_accuracy
"""
# Instructor's solution: (same)
theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)
train_predictions = classify(train_feature_matrix, theta, theta_0)
val_predictions = classify(val_feature_matrix, theta, theta_0)
train_accuracy = accuracy(train_predictions, train_labels)
validation_accuracy = accuracy(val_predictions, val_labels)
return (train_accuracy, validation_accuracy)
#pragma: coderesponse end
#pragma: coderesponse template
def extract_words(input_string):
"""
Helper function for bag_of_words()
Inputs a text string
Returns a list of lowercase words in the string.
Punctuation and digits are separated out into their own words.
"""
for c in punctuation + digits:
input_string = input_string.replace(c, ' ' + c + ' ')
return input_string.lower().split()
#pragma: coderesponse end
#pragma: coderesponse template
def bag_of_words(texts):
"""
Inputs a list of string reviews
Returns a dictionary of unique unigrams occurring over the input
Feel free to change this code as guided by Problem 9
"""
# Your code here
dictionary = {} # maps word to unique index
# stopwords = np.loadtxt("stopwords.txt", dtype="str")
for text in texts:
word_list = extract_words(text)
for word in word_list:
# if word in stopwords:
# continue
if word not in dictionary:
dictionary[word] = len(dictionary)
return dictionary
#pragma: coderesponse end
#pragma: coderesponse template
def extract_bow_feature_vectors(reviews, dictionary):
"""
Inputs a list of string reviews
Inputs the dictionary of words as given by bag_of_words
Returns the bag-of-words feature matrix representation of the data.
The returned matrix is of shape (n, m), where n is the number of reviews
and m the total number of entries in the dictionary.
Feel free to change this code as guided by Problem 9
"""
# Your code here
num_reviews = len(reviews)
feature_matrix = np.zeros([num_reviews, len(dictionary)])
for i, text in enumerate(reviews):
word_list = extract_words(text)
for word in word_list:
if word in dictionary:
feature_matrix[i, dictionary[word]] = 1
# feature_matrix[i, dictionary[word]] += 1
return feature_matrix
#pragma: coderesponse end
#pragma: coderesponse template
def accuracy(preds, targets):
"""
Given length-N vectors containing predicted and target labels,
returns the percentage and number of correct predictions.
"""
return (preds == targets).mean()
#pragma: coderesponse end
|
py | 1a3822b2286b3c413066797943f523ff566c2e7d | from utils import read_data
import numpy as np
def test_mnist_images():
train_images = read_data.get_mnist_data(read_data.MNIST_TRAIN_IMAGES_URL)
assert train_images.shape == (60000, 28, 28)
test_images = read_data.get_mnist_data(read_data.MNIST_TEST_IMAGES_URL)
assert test_images.shape == (10000, 28, 28)
def test_mnist_labels():
train_labels = read_data.get_mnist_data(read_data.MNIST_TRAIN_LABELS_URL)
assert train_labels.shape == (60000, )
test_labels = read_data.get_mnist_data(read_data.MNIST_TEST_LABELS_URL)
assert test_labels.shape == (10000, )
|
py | 1a3822b311aad35c06a2217b9b674914fe48c61b | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from django_api.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
|
py | 1a3822bafe4b5a8d3aa60282f2c555002db9cd4d | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Reports builder for BIDS-Apps.
Generalizes report generation across BIDS-Apps
"""
from pathlib import Path
import re
from itertools import compress
from collections import defaultdict
from pkg_resources import resource_filename as pkgrf
from bids.layout import BIDSLayout, add_config_paths
import jinja2
from nipype.utils.filemanip import copyfile
# Add a new figures spec
try:
add_config_paths(figures=pkgrf("niworkflows", "data/nipreps.json"))
except ValueError as e:
if "Configuration 'figures' already exists" != str(e):
raise
PLURAL_SUFFIX = defaultdict(str("s").format, [("echo", "es")])
SVG_SNIPPET = [
"""\
<object class="svg-reportlet" type="image/svg+xml" data="./{0}">
Problem loading figure {0}. If the link below works, please try \
reloading the report in your browser.</object>
</div>
<div class="elem-filename">
Get figure file: <a href="./{0}" target="_blank">{0}</a>
</div>
""",
"""\
<img class="svg-reportlet" src="./{0}" style="width: 100%" />
</div>
<div class="elem-filename">
Get figure file: <a href="./{0}" target="_blank">{0}</a>
</div>
""",
]
class Element(object):
"""Just a basic component of a report"""
def __init__(self, name, title=None):
self.name = name
self.title = title
class Reportlet(Element):
"""
A reportlet has title, description and a list of components with either an
HTML fragment or a path to an SVG file, and possibly a caption. This is a
factory class to generate Reportlets reusing the layout from a ``Report``
object.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> from bids.layout import BIDSLayout
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> out_figs = testdir / 'out' / 'fmriprep'
>>> bl = BIDSLayout(str(testdir / 'work' / 'reportlets'),
... config='figures', validate=False)
.. doctest::
>>> bl.get(subject='01', desc='reconall') # doctest: +ELLIPSIS
[<BIDSFile filename='.../fmriprep/sub-01/figures/sub-01_desc-reconall_T1w.svg'>]
>>> len(bl.get(subject='01', space='.*', regex_search=True))
2
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'reconall'},
... 'description': 'Some description'})
>>> r.name
'datatype-figures_desc-reconall'
>>> r.components[0][0].startswith('<img')
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'reconall'},
... 'description': 'Some description', 'static': False})
>>> r.name
'datatype-figures_desc-reconall'
>>> r.components[0][0].startswith('<object')
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'summary'},
... 'description': 'Some description'})
>>> r.components[0][0].startswith('<h3')
True
>>> r.components[0][1] is None
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title',
... 'bids': {'datatype': 'figures', 'space': '.*', 'regex_search': True},
... 'caption': 'Some description {space}'})
>>> sorted(r.components)[0][1]
'Some description MNI152NLin2009cAsym'
>>> sorted(r.components)[1][1]
'Some description MNI152NLin6Asym'
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title',
... 'bids': {'datatype': 'fmap', 'space': '.*', 'regex_search': True},
... 'caption': 'Some description {space}'})
>>> r.is_empty()
True
.. testcleanup::
>>> os.chdir(cwd)
"""
def __init__(self, layout, out_dir, config=None):
if not config:
raise RuntimeError("Reportlet must have a config object")
# PY35: Sorted config dict for consistent behavior
self.name = config.get(
"name", "_".join("%s-%s" % i for i in sorted(config["bids"].items()))
)
self.title = config.get("title")
self.subtitle = config.get("subtitle")
self.description = config.get("description")
# Query the BIDS layout of reportlets
files = layout.get(**config["bids"])
self.components = []
for bidsfile in files:
src = Path(bidsfile.path)
ext = "".join(src.suffixes)
desc_text = config.get("caption")
contents = None
if ext == ".html":
contents = src.read_text().strip()
elif ext == ".svg":
entities = dict(bidsfile.entities)
if desc_text:
desc_text = desc_text.format(**entities)
try:
html_anchor = src.relative_to(out_dir)
except ValueError:
html_anchor = src.relative_to(Path(layout.root).parent)
dst = out_dir / html_anchor
dst.parent.mkdir(parents=True, exist_ok=True)
copyfile(src, dst, copy=True, use_hardlink=True)
contents = SVG_SNIPPET[config.get("static", True)].format(html_anchor)
# Our current implementations of dynamic reportlets do this themselves,
# however I'll leave the code here since this is potentially something we
# will want to transfer from every figure generator to this location.
# The following code misses setting preserveAspecRatio="xMidYMid meet"
# if not is_static:
# # Remove height and width attributes from initial <svg> tag
# svglines = out_file.read_text().splitlines()
# expr = re.compile(r' (height|width)=["\'][0-9]+(\.[0-9]*)?[a-z]*["\']')
# for l, line in enumerate(svglines[:6]):
# if line.strip().startswith('<svg'):
# newline = expr.sub('', line)
# svglines[l] = newline
# out_file.write_text('\n'.join(svglines))
# break
if contents:
self.components.append((contents, desc_text))
def is_empty(self):
return len(self.components) == 0
class SubReport(Element):
"""SubReports are sections within a Report."""
def __init__(self, name, isnested=False, reportlets=None, title=""):
self.name = name
self.title = title
self.reportlets = reportlets or []
self.isnested = isnested
class Report:
"""
The full report object. This object maintains a BIDSLayout to index
all reportlets.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> from bids.layout import BIDSLayout
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> out_figs = testdir / 'out' / 'fmriprep'
.. doctest::
>>> robj = Report(testdir / 'out', 'madeoutuuid', subject_id='01', packagename='fmriprep',
... reportlets_dir=testdir / 'work' / 'reportlets')
>>> robj.layout.get(subject='01', desc='reconall') # doctest: +ELLIPSIS
[<BIDSFile filename='.../figures/sub-01_desc-reconall_T1w.svg'>]
>>> robj.generate_report()
0
>>> len((testdir / 'out' / 'fmriprep' / 'sub-01.html').read_text())
36693
.. testcleanup::
>>> os.chdir(cwd)
"""
def __init__(
self,
out_dir,
run_uuid,
config=None,
out_filename="report.html",
packagename=None,
reportlets_dir=None,
subject_id=None,
):
self.root = Path(reportlets_dir or out_dir)
# Initialize structuring elements
self.sections = []
self.errors = []
self.out_dir = Path(out_dir)
self.out_filename = out_filename
self.run_uuid = run_uuid
self.packagename = packagename
self.subject_id = subject_id
if subject_id is not None:
self.subject_id = (
subject_id[4:] if subject_id.startswith("sub-") else subject_id
)
self.out_filename = f"sub-{self.subject_id}.html"
# Default template from niworkflows
self.template_path = Path(pkgrf("niworkflows", "reports/report.tpl"))
self._load_config(Path(config or pkgrf("niworkflows", "reports/default.yml")))
assert self.template_path.exists()
def _load_config(self, config):
from yaml import safe_load as load
settings = load(config.read_text())
self.packagename = self.packagename or settings.get("package", None)
if self.packagename is not None:
self.root = self.root / self.packagename
self.out_dir = self.out_dir / self.packagename
if self.subject_id is not None:
self.root = self.root / "sub-{}".format(self.subject_id)
if "template_path" in settings:
self.template_path = config.parent / settings["template_path"]
self.index(settings["sections"])
def init_layout(self):
self.layout = BIDSLayout(self.root, config="figures", validate=False)
def index(self, config):
"""
Traverse the reports config definition and instantiate reportlets.
This method also places figures in their final location.
"""
# Initialize a BIDS layout
self.init_layout()
for subrep_cfg in config:
# First determine whether we need to split by some ordering
# (ie. sessions / tasks / runs), which are separated by commas.
orderings = [
s for s in subrep_cfg.get("ordering", "").strip().split(",") if s
]
entities, list_combos = self._process_orderings(orderings, self.layout)
if not list_combos: # E.g. this is an anatomical reportlet
reportlets = [
Reportlet(self.layout, self.out_dir, config=cfg)
for cfg in subrep_cfg["reportlets"]
]
else:
# Do not use dictionary for queries, as we need to preserve ordering
# of ordering columns.
reportlets = []
for c in list_combos:
# do not display entities with the value None.
c_filt = list(filter(None, c))
ent_filt = list(compress(entities, c))
# Set a common title for this particular combination c
title = "Reports for: %s." % ", ".join(
[
'%s <span class="bids-entity">%s</span>'
% (ent_filt[i], c_filt[i])
for i in range(len(c_filt))
]
)
for cfg in subrep_cfg["reportlets"]:
cfg["bids"].update({entities[i]: c[i] for i in range(len(c))})
rlet = Reportlet(self.layout, self.out_dir, config=cfg)
if not rlet.is_empty():
rlet.title = title
title = None
reportlets.append(rlet)
# Filter out empty reportlets
reportlets = [r for r in reportlets if not r.is_empty()]
if reportlets:
sub_report = SubReport(
subrep_cfg["name"],
isnested=bool(list_combos),
reportlets=reportlets,
title=subrep_cfg.get("title"),
)
self.sections.append(sub_report)
# Populate errors section
error_dir = (
self.out_dir / "sub-{}".format(self.subject_id) / "log" / self.run_uuid
)
if error_dir.is_dir():
from ..utils.misc import read_crashfile
self.errors = [read_crashfile(str(f)) for f in error_dir.glob("crash*.*")]
def generate_report(self):
"""Once the Report has been indexed, the final HTML can be generated"""
logs_path = self.out_dir / "logs"
boilerplate = []
boiler_idx = 0
if (logs_path / "CITATION.html").exists():
text = (
re.compile("<body>(.*?)</body>", re.DOTALL | re.IGNORECASE)
.findall((logs_path / "CITATION.html").read_text())[0]
.strip()
)
boilerplate.append(
(boiler_idx, "HTML", f'<div class="boiler-html">{text}</div>')
)
boiler_idx += 1
if (logs_path / "CITATION.md").exists():
text = (logs_path / "CITATION.md").read_text()
boilerplate.append((boiler_idx, "Markdown", f"<pre>{text}</pre>\n"))
boiler_idx += 1
if (logs_path / "CITATION.tex").exists():
text = (
re.compile(
r"\\begin{document}(.*?)\\end{document}", re.DOTALL | re.IGNORECASE
)
.findall((logs_path / "CITATION.tex").read_text())[0]
.strip()
)
boilerplate.append(
(
boiler_idx,
"LaTeX",
f"""<pre>{text}</pre>
<h3>Bibliography</h3>
<pre>{Path(pkgrf(self.packagename, 'data/boilerplate.bib')).read_text()}</pre>
""",
)
)
boiler_idx += 1
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=str(self.template_path.parent)),
trim_blocks=True,
lstrip_blocks=True,
autoescape=False,
)
report_tpl = env.get_template(self.template_path.name)
report_render = report_tpl.render(
sections=self.sections, errors=self.errors, boilerplate=boilerplate
)
# Write out report
self.out_dir.mkdir(parents=True, exist_ok=True)
(self.out_dir / self.out_filename).write_text(report_render, encoding="UTF-8")
return len(self.errors)
@staticmethod
def _process_orderings(orderings, layout):
"""
Generate relevant combinations of orderings with observed values.
Arguments
---------
orderings : :obj:`list` of :obj:`list` of :obj:`str`
Sections prescribing an ordering to select across sessions, acquisitions, runs, etc.
layout : :obj:`bids.layout.BIDSLayout`
The BIDS layout
Returns
-------
entities: :obj:`list` of :obj:`str`
The relevant orderings that had unique values
value_combos: :obj:`list` of :obj:`tuple`
Unique value combinations for the entities
"""
# get a set of all unique entity combinations
all_value_combos = {
tuple(bids_file.get_entities().get(k, None) for k in orderings)
for bids_file in layout.get()
}
# remove the all None member if it exists
none_member = tuple([None for k in orderings])
if none_member in all_value_combos:
all_value_combos.remove(tuple([None for k in orderings]))
# see what values exist for each entity
unique_values = [
{value[idx] for value in all_value_combos} for idx in range(len(orderings))
]
# if all values are None for an entity, we do not want to keep that entity
keep_idx = [
False if (len(val_set) == 1 and None in val_set) or not val_set else True
for val_set in unique_values
]
# the "kept" entities
entities = list(compress(orderings, keep_idx))
# the "kept" value combinations
value_combos = [
tuple(compress(value_combo, keep_idx)) for value_combo in all_value_combos
]
# sort the value combinations alphabetically from the first entity to the last entity
value_combos.sort(
key=lambda entry: tuple(
str(value) if value is not None else "0" for value in entry
)
)
return entities, value_combos
def run_reports(
out_dir,
subject_label,
run_uuid,
config=None,
reportlets_dir=None,
packagename=None,
):
"""
Run the reports.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> (testdir / 'fmriprep').mkdir(parents=True, exist_ok=True)
.. doctest::
>>> run_reports(testdir / 'out', '01', 'madeoutuuid', packagename='fmriprep',
... reportlets_dir=testdir / 'work' / 'reportlets')
0
.. testcleanup::
>>> os.chdir(cwd)
"""
return Report(
out_dir,
run_uuid,
config=config,
subject_id=subject_label,
packagename=packagename,
reportlets_dir=reportlets_dir,
).generate_report()
def generate_reports(
subject_list, output_dir, run_uuid, config=None, work_dir=None, packagename=None
):
"""Execute run_reports on a list of subjects."""
reportlets_dir = None
if work_dir is not None:
reportlets_dir = Path(work_dir) / "reportlets"
report_errors = [
run_reports(
output_dir,
subject_label,
run_uuid,
config=config,
packagename=packagename,
reportlets_dir=reportlets_dir,
)
for subject_label in subject_list
]
errno = sum(report_errors)
if errno:
import logging
logger = logging.getLogger("cli")
error_list = ", ".join(
"%s (%d)" % (subid, err)
for subid, err in zip(subject_list, report_errors)
if err
)
logger.error(
"Preprocessing did not finish successfully. Errors occurred while processing "
"data from participants: %s. Check the HTML reports for details.",
error_list,
)
return errno
|
py | 1a38258c6c7d3349df3953b596cd4c509d31abf9 | """Test functions for the sparse.linalg._expm_multiply module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_, assert_equal
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import SparseEfficiencyWarning
import scipy.linalg
from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
_onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
_expm_multiply_interval)
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
class TestExpmActionSimple(object):
"""
These tests do not consider the case of multiple time steps in one call.
"""
def test_theta_monotonicity(self):
pairs = sorted(_theta.items())
for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
assert_(theta_a < theta_b)
def test_p_max_default(self):
m_max = 55
expected_p_max = 8
observed_p_max = _compute_p_max(m_max)
assert_equal(observed_p_max, expected_p_max)
def test_p_max_range(self):
for m_max in range(1, 55+1):
p_max = _compute_p_max(m_max)
assert_(p_max*(p_max - 1) <= m_max + 1)
p_too_big = p_max + 1
assert_(p_too_big*(p_too_big - 1) > m_max + 1)
def test_onenormest_matrix_power(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
for p in range(4):
if not p:
M = np.identity(n)
else:
M = np.dot(M, A)
estimated = _onenormest_matrix_power(A, p)
exact = np.linalg.norm(M, 1)
assert_(less_than_or_close(estimated, exact))
assert_(less_than_or_close(exact, 3*estimated))
def test_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
expected = np.dot(scipy.linalg.expm(A), B)
assert_allclose(observed, expected)
def test_matrix_vector_multiply(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
observed = expm_multiply(A, v)
expected = np.dot(scipy.linalg.expm(A), v)
assert_allclose(observed, expected)
def test_scaled_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
for t in (0.2, 1.0, 1.5):
with np.errstate(invalid='ignore'):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = np.dot(scipy.linalg.expm(t*A), B)
assert_allclose(observed, expected)
def test_scaled_expm_multiply_single_timepoint(self):
np.random.seed(1234)
t = 0.1
n = 5
k = 2
A = np.random.randn(n, n)
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = scipy.linalg.expm(t*A).dot(B)
assert_allclose(observed, expected)
def test_sparse_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
expected = scipy.linalg.expm(A).dot(B)
assert_allclose(observed, expected)
def test_complex(self):
A = np.array([
[1j, 1j],
[0, 1j]], dtype=complex)
B = np.array([1j, 1j])
observed = expm_multiply(A, B)
expected = np.array([
1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
1j * np.exp(1j)], dtype=complex)
assert_allclose(observed, expected)
class TestExpmActionInterval(object):
def test_sparse_expm_multiply_interval(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
n = 40
k = 3
endpoint = True
for num in (14, 13, 2):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
v = np.random.randn(n)
for target in (B, v):
X = expm_multiply(A, target,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
for solution, t in zip(X, samples):
assert_allclose(solution,
scipy.linalg.expm(t*A).dot(target))
def test_expm_multiply_interval_vector(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
X = expm_multiply(A, v,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(v))
def test_expm_multiply_interval_matrix(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
for k in (1, 2):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
X = expm_multiply(A, B,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
def test_sparse_expm_multiply_interval_dtypes(self):
# Test A & B int
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A complex, B int
A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A int, B complex
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = 1j*np.ones(5, dtype=complex)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
def test_expm_multiply_interval_status_0(self):
self._help_test_specific_expm_interval_status(0)
def test_expm_multiply_interval_status_1(self):
self._help_test_specific_expm_interval_status(1)
def test_expm_multiply_interval_status_2(self):
self._help_test_specific_expm_interval_status(2)
def _help_test_specific_expm_interval_status(self, target_status):
np.random.seed(1234)
start = 0.1
stop = 3.2
num = 13
endpoint = True
n = 5
k = 2
nrepeats = 10
nsuccesses = 0
for num in [14, 13, 2] * nrepeats:
A = np.random.randn(n, n)
B = np.random.randn(n, k)
status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=True)
if status == target_status:
X, status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=False)
assert_equal(X.shape, (num, n, k))
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
nsuccesses += 1
if not nsuccesses:
msg = 'failed to find a status-' + str(target_status) + ' interval'
raise Exception(msg)
|
py | 1a3825edc6228f154b8a64d0497fc0593bc1b18c | import math
import os
import sys
from random import choice
from random import randint
# --------------------------------------------------------------------------- #
# Player Setup #
class player:
def __init__(self):
self.name = ""
self.lvl = 1
self.hp = 10
self.str = 5
self.dex = 5
self.drd = 5
self.exp = 0
my_player = player()
# --------------------------------------------------------------------------- #
|
py | 1a3826de00026389b16dc664916b06671b9d7e9c | from booster import app, db
from datetime import datetime
class News(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256))
tease = db.Column(db.Text)
body = db.Column(db.Text)
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
expiry = db.Column(db.DateTime)
@staticmethod
def get_expired():
return News.query.filter(News.expiry < datetime.now()).order_by(News.created_at).all()
@staticmethod
def get_current():
return News.query.filter(News.expiry >= datetime.now()).order_by(News.created_at).all()
@property
def expiry_time(self):
return self.expiry.time()
@property
def expiry_date(self):
return self.expiry.date()
|
py | 1a3828086e94b14ce6884678bf9db297d4326eae | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.document
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.document import RedlineDisplayType as RedlineDisplayType
if hasattr(RedlineDisplayType, '_constants') and isinstance(RedlineDisplayType._constants, dict):
RedlineDisplayType._constants['__ooo_ns__'] = 'com.sun.star.document'
RedlineDisplayType._constants['__ooo_full_ns__'] = 'com.sun.star.document.RedlineDisplayType'
RedlineDisplayType._constants['__ooo_type_name__'] = 'const'
def build_enum():
global RedlineDisplayTypeEnum
ls = [f for f in dir(RedlineDisplayType) if not callable(getattr(RedlineDisplayType, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(RedlineDisplayType, name)
RedlineDisplayTypeEnum = IntEnum('RedlineDisplayTypeEnum', _dict)
build_enum()
else:
from ...lo.document.redline_display_type import RedlineDisplayType as RedlineDisplayType
class RedlineDisplayTypeEnum(IntEnum):
"""
Enum of Const Class RedlineDisplayType
specifies which changes in a document are displayed.
"""
NONE = RedlineDisplayType.NONE
"""
no changes are displayed.
"""
INSERTED = RedlineDisplayType.INSERTED
"""
only inserted parts are displayed and attributed.
"""
INSERTED_AND_REMOVED = RedlineDisplayType.INSERTED_AND_REMOVED
"""
only inserted parts are displayed and attributed.
"""
REMOVED = RedlineDisplayType.REMOVED
"""
only removed parts are displayed and attributed.
"""
__all__ = ['RedlineDisplayType', 'RedlineDisplayTypeEnum']
|
py | 1a3829e9f8ed05c1c1e8cda28dd2ea8d2d7b6bb5 | #############################################################################
##
## Copyright (c) 2020 Riverbank Computing Limited <[email protected]>
##
## This file is part of PyQt5.
##
## This file may be used under the terms of the GNU General Public License
## version 3.0 as published by the Free Software Foundation and appearing in
## the file LICENSE included in the packaging of this file. Please review the
## following information to ensure the GNU General Public License version 3.0
## requirements will be met: http://www.gnu.org/copyleft/gpl.html.
##
## If you do not wish to use this file under the terms of the GPL version 3.0
## then you may purchase a commercial license. For more information contact
## [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
# If pluginType is MODULE, the plugin loader will call moduleInformation. The
# variable MODULE is inserted into the local namespace by the plugin loader.
pluginType = MODULE
# moduleInformation() must return a tuple (module, widget_list). If "module"
# is "A" and any widget from this module is used, the code generator will write
# "import A". If "module" is "A[.B].C", the code generator will write
# "from A[.B] import C". Each entry in "widget_list" must be unique.
def moduleInformation():
return "PyQt5.QAxContainer", ("QAxWidget", )
|
py | 1a382b120105eedc97fb1aec1879ca526b186747 | from datetime import datetime
from time import sleep
from redis import StrictRedis
from random import random
from rediscache_decorator import Cache
### Comment this section if you don't have redis instance ###
redis = StrictRedis(decode_responses=True)
cache = Cache(redis)
@cache.ttl(300)
def pseudo_calc():
sleep(1)
print("Computation in progress")
return str(datetime.now())
for i in range(10):
print(pseudo_calc())
@cache.ttl(123)
def another():
return "hello"
# Example: rediscache_decorator dict
@cache.dict(60)
def return_a_dict(*args, **kwargs):
sleep(1)
print("Computation in progress")
return {"now": str(datetime.now())}
for i in range(5):
print(return_a_dict())
# Example: rediscache_decorator float number
@cache.float(60)
def return_a_float(*args, **kwargs):
return random()
for i in range(5):
print(return_a_float())
|
py | 1a382b48915587726c4bee11f87e7a536a01b358 | import os
# toolchains options
ARCH='sparc-v8'
CPU='bm3803'
CROSS_TOOL='gcc'
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\97981\Downloads\bcc-2.1.1-gcc\bin'
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'sparc-gaisler-elf-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=v8 -nostartfiles'
#DEVICE = ' '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__'
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-bm3803.map -T bm3803.lds -Ttext=0x40000000'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -Wall'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -Wall'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\
SIZE + ' $TARGET \n'
|
py | 1a382b5c221cdee83bc29bb37aa65b0cbb157cd6 | """
Only one, either a template, or render() can be specified:
# PY2 - remove '+IGNORE_EXCEPTION_DETAIL' when dropping Python 2 support:
>>> grok.testing.grok(__name__) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.configuration.config.ConfigurationExecutionError:\
martian.error.GrokError: Multiple possible ways to render view\
<class 'grokcore.view.tests.base.view.eithertemplateorrender.CavePainting'>.\
It has both a 'render' method as well as an associated template.
"""
import grokcore.view as grok
class Mammoth(grok.Context):
pass
class CavePainting(grok.View):
def render(self):
pass
cavepainting = grok.PageTemplate("nothing")
|
py | 1a382b7ca5c602723a16904551ce3664740eba2c | """
test duo_piano_midi.py
"""
import unittest
from duo_piano_midi import DuoPiano
from keyread import keyread
# class TestKeyHitWait(unittest.TestCase):
# def test_user_for_P_key(self):
# self.assertTrue(keyhit('p'))
# class TestKeyboardWait(unittest.TestCase):
# def test_user_for_P_key_yn_false(self):
# key = keyread("Press the P Key: And Press Enter...", yn=False)
# self.assertEqual(key, "P")
# def test_user_for_P_key_yn_false(self):
# key = keyread("Press the P Key: And Press Enter...")
# self.assertEqual(key, False)
# def test_timeout(self):
# key = keyread("Dont press anything for 5 seconds")
# self.assertIsNone(key)
class TestDuoPianoMidi(unittest.TestCase):
name = "GENERAL:GENERAL MIDI 1"
def find_in_list(self, str, ports):
for port in ports:
if str in port:
# print (port)
return True
return False
def test_duo_piano_out_port_aquire(self):
duo_piano = DuoPiano()
self.assertEqual(duo_piano.portsout, duo_piano.aquire_out_ports())
def test_duo_piano_in_port_aquire(self):
duo_piano = DuoPiano()
self.assertEqual(duo_piano.portsin, duo_piano.aquire_in_ports())
def test_duo_piano_in_port_recognised_portsin(self):
duo_piano = DuoPiano()
self.assertTrue(self.find_in_list(self.name, duo_piano.portsin))
def test_duo_piano_out_port_recognised_portsout(self):
duo_piano = DuoPiano()
self.assertTrue(self.find_in_list(self.name, duo_piano.portsout))
def test_duo_piano_in_port_recognised_aquire_in(self):
duo_piano = DuoPiano()
self.assertTrue(self.find_in_list(self.name, duo_piano.portsin))
def test_duo_piano_out_port_recognised_aquire_out(self):
duo_piano = DuoPiano()
self.assertTrue(self.find_in_list(self.name, duo_piano.portsout))
def test_recognised_in_port_and_open(self):
"""
Find one and only one (the first) DuoPiano IN MIDI port
"""
duo_piano = DuoPiano()
din = duo_piano.get_duo_in()
self.assertIsInstance(din, type(duo_piano.midiin))
def test_recognised_out_port_and_open(self):
"""
Find one and only one (the first) DuoPiano OUT MIDI port
"""
duo_piano = DuoPiano()
dout = duo_piano.get_duo_out()
self.assertIsInstance(dout, type(duo_piano.midiout))
class TestDuoPianoMidiOut(unittest.TestCase):
# def test_middle_c_quiet_midi_channel_1(self):
# duo_piano = DuoPiano()
# duo_piano.strike_note(1, 60, 10, 0.25)
# keyread("Did You hear that Quiet note:...? (yY/nN")
# duo_piano.strike_note(1, 60, 100, 0.25)
# keyread("Did You hear that Loud Note:...? (yY/nN")
def test_middle_c_loud_midi_channel_1(self):
duo_piano = DuoPiano()
dout = duo_piano.get_duo_out()
duo_piano.strike_note(1,60,10,.25)
self.assertTrue(keyread("Did You hear a note ...? Press Enter if you did\n",yn=False, ret=True))
print('Press a note on the keyboard Ctrl-C to break out. . . \n')
print(duo_piano.read_note())
if __name__ == "__main__":
unittest.main()
|
py | 1a382baa77f5b476479a8056d79916b2478a32a1 | from django.conf.urls import include, url
from .import views
urlpatterns = [
url(r'^editoriales/$', views.EditorialesListView.as_view()),
url(r'^editoriales/nuevo/$', views.CreateEditorialView.as_view()),
url(r'^editoriales/delete/$', views.delete_editoriales_view),
]
|
py | 1a382badb51f1604b9eabdd95a155f3a44f761db | list1 = [x*x for x in range(10)]
print(list1)
# list2 列表创建语句慎重执行,先注释
# list2 = [x*x for x in range(1000000000000000000000000)]
generator1 = (x*x for x in range(1000000000000000000000000))
print(generator1)
print(type(generator1))
generator2 = (x*x for x in range(3))
print(next(generator2))
print(next(generator2))
print(next(generator2))
# print(next(generator2))
generator3 = (x*x for x in range(5))
for index in generator3:
print(index)
def print_a(max):
i = 0
while i < max:
i += 1
yield i
a = print_a(10)
print(a)
print(type(a))
print(next(a))
print(next(a))
print(next(a))
print(next(a))
print(a.__next__())
print(a.__next__())
def print_b(max):
i = 0
while i < max:
i += 1
args = yield i
print('传入参数为:' + args)
b = print_b(20)
print(next(b))
print(b.send('Python'))
def print_c():
while True:
print('执行 A ')
yield None
def print_d():
while True:
print('执行 B ')
yield None
c = print_c()
d = print_d()
while True:
c.__next__()
d.__next__() |
py | 1a382bb18f88d99738e9798ec16ff168c5ebf13a | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, MetaData
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from .common import app, db
# The `framework.models.main_database` module`...
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower()
local_table = local_cls.__table__
if name in local_table.columns:
newname = name + "_"
return newname
return name
def name_for_collection_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower() + '_collection'
for c in referred_cls.__table__.columns:
if c == name:
name += "_"
return name
def classes(model_map):
classes_found = {}
with app.app_context():
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI']) # app.config['MAIN_DATABASE_URI'])
metadata = MetaData(engine)
session = Session(engine)
metadata.reflect(bind=engine, only=model_map.keys()) #app.config['MAIN_DATABASE_MODEL_MAP'].keys())
Model = declarative_base(metadata=metadata, cls=(db.Model,), bind=engine)
Base = automap_base(metadata=metadata, declarative_base=Model)
Base.prepare(
name_for_scalar_relationship=name_for_scalar_relationship,
name_for_collection_relationship=name_for_collection_relationship
)
for cls in Base.classes:
cls.__table__.info = {'bind_key': 'main'}
if cls.__table__.name in model_map: #app.config['MAIN_DATABASE_MODEL_MAP']:
#globals()[app.config['MAIN_DATABASE_MODEL_MAP'][cls.__table__.name]] = cls
classes_found[model_map[cls.__table__.name]] = cls
return classes_found
|
py | 1a382c136a7d69aed981369b6f2c3758faba4c11 | # Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .meta import Base, init, destroy
from .apptype_access import ApptypeAccess
from .app_deployment import AppDeployment
from .app_hipchat_rooms import app_hipchat_rooms
from .app_jmx_attribute import app_jmx_attribute
from .app_packages import app_package
from .asset import Asset
from .cname import Cname
from .datacenter import Datacenter
from .default_spec import DefaultSpec
from .deployment import Deployment
from .environment import Environment
from .ganglia import Ganglia
from .hipchat import Hipchat
from .host_deployment import HostDeployment
from .host_interface import HostInterface
from .host_ip import HostIp
from .host_spec import HostSpec
from .hw_chassis import HwChassis
from .host import Host
from .application import AppDefinition
from .iloms import Ilom
from .jmx_attribute import JmxAttribute
from .ldap_group import LdapGroup
from .lock import lock
from .nag_apptypes_services import NagApptypesServices
from .nag_check_commands import NagCheckCommand
from .nag_command_arguments import NagCommandArgument
from .nag_contact_groups import NagContactGroup
from .nag_contacts import NagContact
from .nag_contact_groups_members import nag_contact_groups_members
from .nag_hosts_services import NagHostsServices
from .nag_services_contact_groups import nag_services_contact_groups
from .nag_services import NagService
from .nag_services_arguments import NagServicesArguments
from .nag_services_contacts import nag_services_contacts
from .nag_time_periods import NagTimePeriod
from .net_default_ip import NetDefaultIP
from .net_default_map import NetDefaultMap
from .net_default_trunk import net_default_trunk
from .network_device import NetworkDevice
from .ns_device import NsDevice
from .ns_monitor import NsMonitor
from .ns_service import NsService
from .ns_service_bind import ns_service_bind
from .ns_service_max import NsServiceMax
from .ns_service_param import NsServiceParam
from .ns_vip import NsVip
from .ns_vip_binds import NsVipBinds
from .ns_weight import NsWeight
from .package_name import PackageName
from .package import Package
from .package_definition import PackageDefinition
from .package_location import PackageLocation
from .port import Port
from .project_package import ProjectPackage
from .project import Project
from .service_event import ServiceEvent
from .subnet import Subnet
from .vlan import Vlan
from .vm_info import VmInfo
from .zone import Zone
Application = AppDefinition
__all__ = [
'Base', 'init', 'destroy', 'ApptypeAccess', 'Application',
'AppDefinition', 'AppDeployment', 'app_hipchat_rooms',
'app_jmx_attribute', 'app_package', 'Asset', 'Cname', 'Datacenter',
'DefaultSpec', 'Deployment', 'Environment', 'Ganglia',
'Hipchat', 'HostDeployment', 'HostInterface', 'HostIp',
'HostSpec', 'Host', 'Ilom', 'JmxAttribute', 'LdapGroup', 'lock',
'NagApptypesServices', 'NagCheckCommand', 'NagCommandArgument',
'NagContactGroup', 'NagContact', 'nag_contact_groups_members',
'NagHostsServices', 'nag_services_contact_groups', 'NagService',
'NagServicesArguments', 'nag_services_contacts', 'NagTimePeriod',
'NetDefaultIP', 'NetDefaultMap', 'net_default_trunk',
'NetworkDevice', 'NsDevice', 'NsMonitor', 'NsService',
'ns_service_bind', 'NsServiceMax', 'NsServiceParam', 'NsVip',
'NsVipBinds', 'NsWeight', 'PackageName', 'Package',
'PackageDefinition', 'PackageLocation', 'Port',
'ProjectPackage', 'Project', 'ServiceEvent', 'Subnet', 'Vlan',
'VmInfo', 'Zone'
]
|
py | 1a382c18542d864ca54977da555339283bbfcc4f | from collections import OrderedDict
from django.forms import BoundField, CheckboxInput, CheckboxSelectMultiple, FileInput, RadioSelect
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from django_jinja import library
from bootstrapform_jinja.config import BOOTSTRAP_COLUMN_COUNT
@library.test
def checkbox_field(field):
"""
Jinja test to check if a field is a checkbox
"""
return isinstance(field.field.widget, CheckboxInput)
@library.test
def multiple_checkbox_field(field):
"""
Jinja test to check if a field is a multiple value checkbox
"""
return isinstance(field.field.widget, CheckboxSelectMultiple)
@library.test
def radio_field(field):
"""
Jinja test to check if a field is a radio select
"""
return isinstance(field.field.widget, RadioSelect)
def add_input_classes(field):
"""
Add form-control to class attribute of the widget of the given field.
"""
if not isinstance(field.field.widget, (CheckboxInput, CheckboxSelectMultiple, RadioSelect, FileInput)):
attrs = field.field.widget.attrs
attrs['class'] = attrs.get('class', '') + ' form-control'
@library.filter
def bootstrap(element):
"""
Render field, form or formset with bootstrap styles
"""
return render(element)
@library.filter
def bootstrap_inline(element):
"""
Render field, form or formset with bootstrap styles in single line
"""
return render(element, {'label': 'sr-only'})
@library.filter
def bootstrap_horizontal(element, label_cols=None, max_columns=None):
"""
Render field, form or formset with bootstrap styles in horizontal layout
"""
if not label_cols:
label_cols = ('col-sm-2', 'col-lg-2')
if isinstance(label_cols, str):
label_cols = label_cols.split()
# ensure that label_cols includes only strings and doesn't have duplicates
label_cols = tuple(OrderedDict.fromkeys(map(str, label_cols)).keys())
if not max_columns:
max_columns = BOOTSTRAP_COLUMN_COUNT
cls_value = []
cls_single_value = []
for cl in label_cols:
base, sep, value_nb_cols = cl.rpartition('-')
prefix = base + sep
try:
value_nb_cols = int(value_nb_cols)
except ValueError:
value_nb_cols = max_columns
if value_nb_cols >= max_columns:
split_class = prefix + str(max_columns)
else:
offset_class = prefix + 'offset-' + str(value_nb_cols)
split_class = prefix + str(max_columns - value_nb_cols)
cls_single_value.extend((split_class, offset_class))
cls_value.append(split_class)
classes = {
'label': ' '.join(label_cols),
'value': ' '.join(cls_value),
'single_value': ' '.join(cls_single_value),
}
return render(element, classes)
def render(element, markup_classes=None):
"""
Internal render function used by boostrap filters
"""
classes = {'label': '', 'value': '', 'single_value': ''}
if markup_classes:
classes.update(markup_classes)
if isinstance(element, BoundField):
# InputField
add_input_classes(element)
template = get_template('bootstrapform/field.jinja')
context = {'field': element, 'form': element.form, 'classes': classes}
elif getattr(element, 'management_form', None):
# FormSet
for form in element.forms:
for field in form.visible_fields():
add_input_classes(field)
template = get_template('bootstrapform/formset.jinja')
context = {'formset': element, 'classes': classes}
else:
# Form
for field in element.visible_fields():
add_input_classes(field)
template = get_template('bootstrapform/form.jinja')
context = {'form': element, 'classes': classes}
return mark_safe(template.render(context))
@library.filter
def bootstrap_classes(field):
"""
Filter that adds form-control to given input field
"""
add_input_classes(field)
return mark_safe(field)
|
py | 1a382d2a25e18fd464dc703eea9e24a08246f45e | import json
from pathlib import Path
def save_json(filepath, content, append=False, topcomment=None):
"""
Saves content to a json file
:param filepath: path to a file (must include .json)
:param content: dictionary of stuff to save
"""
fp = Path(filepath)
if fp.suffix not in (".json"):
raise ValueError(f"Filepath {fp} not valid should point to json file")
with open(filepath, "w") as json_file:
json.dump(content, json_file, indent=4)
def load_json(filepath):
"""
Load a json file
:param filepath: path to json file
"""
fp = Path(filepath)
if not fp.exists():
raise ValueError("Unrecognized file path: {}".format(filepath))
with open(filepath) as f:
data = json.load(f)
return data
|
py | 1a382d68de0f9ec54506be9a41354da8a49b6724 | # This code is derived from https://github.com/esa/pykep/pull/127
# originally developed by Moritz v. Looz @mlooz .
# It was modified following suggestions from Waldemar Martens @MartensWaldemar_gitlab
# Solar orbiter is quite a challenge for state of the art optimizers, but
# good solutions fulfilling the requirements can be found and an example is
# shown in check_good_solution()
#
# See https://www.esa.int/Science_Exploration/Space_Science/Solar_Orbiter
import math
from math import cos, pi, sin, sqrt
from fcmaes import retry, advretry
from fcmaes.optimizer import logger, de_cma, single_objective, de, Bite_cpp
import matplotlib.pyplot as plt
import pygmo as pg
from pykep import RAD2DEG, AU
from solo_mgar_udp import solo_mgar_udp
logger("solarorbiter.log")
def read_solutions(fname):
ys = []
xs = []
with open(fname) as csvfile:
lines = csvfile.readlines()
for line in lines:
row = line.split(' ')
if len(row) < 12:
continue
ys.append(float(row[0]))
x = []
i = -1
while(True):
xi = row[i]
while not xi[-1].isdigit():
xi = xi[:-1]
if not (xi[0].isdigit() or xi[0] == '-'):
xi = xi[1:]
x.insert(0, float(xi))
break
x.insert(0, float(xi))
i -= 1
xs.append(x)
return ys, xs
def verify(ys, xs):
for i in range(len(ys)):
solo_mgar = solo_mgar_udp([7000, 8000])
y0 = ys[i]
x = xs[i]
if len(x) != 10:
continue
lambert_legs = []
resonances = []
solo_mgar._compute_dvs(x, lambert_legs, resonances)
resos = [reso._resonance for reso in resonances]
# assert resos0 == resos
y = solo_mgar.fitness(x)[0]
print(y0, y, y0-y)
assert abs(y0 - y < 0.23)
def check_good_solution(x):
solo_mgar = solo_mgar_udp([7000, 8000])
prob = pg.problem(solo_mgar)
print (str(prob.fitness(x)))
solo_mgar.pretty(x)
solo_mgar.plot(x)
solo_mgar.plot_distance_and_flybys(x)
def print_good_solutions(xs):
from functools import reduce
for i in range(len(xs)):
solo_mgar = solo_mgar_udp([7000, 8000])
lambert_legs = []
resos = []
x = xs[i]
rvt_outs, rvt_ins, rvt_pls, _, _ = solo_mgar._compute_dvs(x, lambert_legs, resos)
#rvt_outs = [rvt.rotate(solo_mgar._rotation_axis, solo_mgar._theta) for rvt in rvt_outs]
rvt_out = rvt_outs[-1].rotate(solo_mgar._rotation_axis, solo_mgar._theta) # rotate
a, e, incl, _, _, _ = rvt_out.kepler()
# orbit should be as polar as possible, but we do not care about prograde/retrograde
corrected_inclination = abs(abs(incl) % pi - pi / 2) * RAD2DEG
final_perhelion = a * (1 - e) / AU
y = solo_mgar.fitness(x)
resos = [str(resos[i]._resonance) for i in range(len(resos))]
resos = reduce((lambda x, y: x + ',' + y), resos)
print (str(i) + ' ' + str(incl*RAD2DEG) + ' ' + str(final_perhelion) + ' [' + str(y[0]), ', [' + resos + '], ' + str(x) + '],')
def optimize():
solo_mgar = solo_mgar_udp([7000, 8000])
prob = pg.problem(solo_mgar)
fprob = single_objective(prob)
# logger().info('solar orbiter' + ' de -> cmaes c++ smart retry')
# ret = advretry.minimize(fprob.fun, bounds=fprob.bounds, num_retries = 60000,
# logger = logger(), optimizer=de_cma(1500))
logger().info('solar orbiter' + ' BiteOpt parallel retry')
ret = retry.minimize(fprob.fun, bounds=fprob.bounds, num_retries = 32000,
logger = logger(), optimizer=Bite_cpp(120000, M=6))
return ret
def archipelago():
udp = solo_mgar_udp([7000, 8000])
#uda = pg.sga(gen = 6000)
uda = pg.sade(memory=True,variant=1,gen=6000)
# instantiate an unconnected archipelago
for _ in range(1000):
archi = pg.archipelago(t = pg.topologies.unconnected())
for _ in range(32):
alg = pg.algorithm(uda)
#alg.set_verbosity(1)
prob = pg.problem(udp)
pop = pg.population(prob, 20)
isl = pg.island(algo=alg, pop=pop)
archi.push_back(isl)
archi.evolve()
archi.wait_check()
def optimize_pagmo():
solo_mgar = solo_mgar_udp([7000, 8000])
for i in range(6000):
prob = pg.problem(solo_mgar)
pop = pg.population(prob=prob, size=32)
alg = pg.algorithm(pg.sade(memory=True,gen=1))
pop = alg.evolve(pop)
print(i, pop.champion_f, solo_mgar.fitness(pop.champion_x))
if __name__ == '__main__':
#optimize()
#archipelago()
ys, xs = read_solutions('data/solo_results.txt')
#print_good_solutions(xs)
#verify(ys, xs)
check_good_solution(xs[0])
plt.show()
pass
|
py | 1a382de47cc603409e0aa658fc7e1712afc40722 | from unittest.mock import ANY
import pytest
from reconcile.closedbox_endpoint_monitoring_base import (
queries,
get_endpoints,
parse_prober_url,
fill_desired_state,
)
from reconcile.blackbox_exporter_endpoint_monitoring import (
build_probe as blackbox_exporter_probe_builder,
PROVIDER as BLACKBOX_EXPORTER_PROVIDER,
)
from reconcile.signalfx_endpoint_monitoring import (
build_probe as signalfx_probe_builder,
PROVIDER as SIGNALFX_PROVIDER,
)
from reconcile.utils.openshift_resource import ResourceInventory
from .fixtures import Fixtures
fxt = Fixtures("closedbox_exporter_endpoint_monitoring")
def get_endpoint_fixtures(path: str) -> dict:
return fxt.get_anymarkup(path)["appInterface"]["apps"]
def test_invalid_endpoints(mocker):
query = mocker.patch.object(queries, "get_service_monitoring_endpoints")
query.return_value = get_endpoint_fixtures("test_invalid_endpoints.yaml")
endpoints = get_endpoints(BLACKBOX_EXPORTER_PROVIDER)
assert len(endpoints) == 0
def test_blackbox_exporter_endpoint_loading(mocker):
ep_query = mocker.patch.object(queries, "get_service_monitoring_endpoints")
ep_query.return_value = get_endpoint_fixtures("test_endpoint.yaml")
endpoints = get_endpoints(BLACKBOX_EXPORTER_PROVIDER)
assert endpoints is not None
assert len(endpoints) == 1
provider = list(endpoints.keys())[0]
assert provider.provider == BLACKBOX_EXPORTER_PROVIDER
provider_endpoints = endpoints.get(provider)
assert provider_endpoints is not None
assert len(provider_endpoints) == 1
assert len(provider_endpoints[0].monitoring) == 1
def test_parse_prober_url():
assert parse_prober_url("http://host:1234/path") == {
"url": "host:1234",
"scheme": "http",
"path": "/path",
}
assert parse_prober_url("http://host") == {"url": "host", "scheme": "http"}
def test_invalid_prober_url():
# scheme missing
with pytest.raises(ValueError):
parse_prober_url("host:1234/path")
def test_blackbox_exporter_probe_building(mocker):
ep_query = mocker.patch.object(queries, "get_service_monitoring_endpoints")
ep_query.return_value = get_endpoint_fixtures("test_blackbox_probe_building.yaml")
endpoints = get_endpoints(BLACKBOX_EXPORTER_PROVIDER)
assert len(endpoints) == 1
provider = list(endpoints.keys())[0]
provider_endpoints = endpoints.get(provider)
assert provider_endpoints is not None
probe_resource = blackbox_exporter_probe_builder(provider, provider_endpoints)
assert probe_resource is not None
# verify prober url decomposition
spec = probe_resource.body.get("spec")
assert spec.get("prober") == {
"url": "exporterhost:9115",
"scheme": "http",
"path": "/probe",
}
# verify labels
labels = spec["targets"]["staticConfig"]["labels"]
assert labels.get("environment") == "staging"
# verify timeout and interval
assert spec["scrapeTimeout"] == provider.timeout
assert spec["interval"] == provider.checkInterval
# verify targets
assert "https://test1.url" in spec["targets"]["staticConfig"]["static"]
assert "https://test2.url" in spec["targets"]["staticConfig"]["static"]
def test_signalfx_probe_building(mocker):
ep_query = mocker.patch.object(queries, "get_service_monitoring_endpoints")
ep_query.return_value = get_endpoint_fixtures("test_signalfx_probe_building.yaml")
endpoints = get_endpoints(SIGNALFX_PROVIDER)
assert len(endpoints) == 1
provider = list(endpoints.keys())[0]
provider_endpoints = endpoints.get(provider)
assert provider_endpoints is not None
probe_resource = signalfx_probe_builder(provider, provider_endpoints)
assert probe_resource is not None
# verify prober url decomposition
spec = probe_resource.body.get("spec")
assert spec.get("prober") == {
"url": "signalfxexporter:9091",
"scheme": "http",
"path": "/metrics/probe",
}
# verify labels
labels = spec["targets"]["staticConfig"]["labels"]
assert labels.get("environment") == "staging"
# verify timeout and interval
assert spec["scrapeTimeout"] == provider.timeout
assert spec["interval"] == provider.checkInterval
# verify targets
assert "test_1" in spec["targets"]["staticConfig"]["static"]
assert "test_2" in spec["targets"]["staticConfig"]["static"]
# verify relabeling
assert {
"action": "replace",
"regex": "^test_1$",
"replacement": "https://test1.url",
"sourceLabels": ["instance"],
"targetLabel": "instance",
} in spec["targets"]["staticConfig"]["relabelingConfigs"]
assert {
"action": "replace",
"regex": "^test_2$",
"replacement": "https://test2.url",
"sourceLabels": ["instance"],
"targetLabel": "instance",
} in spec["targets"]["staticConfig"]["relabelingConfigs"]
def test_blackbox_exporter_filling_desired_state(mocker):
ep_query = mocker.patch.object(queries, "get_service_monitoring_endpoints")
ep_query.return_value = get_endpoint_fixtures("test_endpoint.yaml")
add_desired_mock = mocker.patch.object(ResourceInventory, "add_desired")
endpoints = get_endpoints(BLACKBOX_EXPORTER_PROVIDER)
provider = list(endpoints.keys())[0]
probe = blackbox_exporter_probe_builder(provider, endpoints[provider])
assert probe is not None
fill_desired_state(provider, probe, ResourceInventory())
assert add_desired_mock.call_count == 1
add_desired_mock.assert_called_with(
cluster="app-sre-stage-01",
namespace="openshift-customer-monitoring",
resource_type="Probe",
name="blackbox-exporter-http-2xx",
value=ANY,
)
def test_signalfx_filling_desired_state(mocker):
ep_query = mocker.patch.object(queries, "get_service_monitoring_endpoints")
ep_query.return_value = get_endpoint_fixtures("test_endpoint.yaml")
add_desired_mock = mocker.patch.object(ResourceInventory, "add_desired")
endpoints = get_endpoints(SIGNALFX_PROVIDER)
provider = list(endpoints.keys())[0]
probe = signalfx_probe_builder(provider, endpoints[provider])
assert probe is not None
fill_desired_state(provider, probe, ResourceInventory())
assert add_desired_mock.call_count == 1
add_desired_mock.assert_called_with(
cluster="app-sre-stage-01",
namespace="openshift-customer-monitoring",
resource_type="Probe",
name="signalfx-exporter-http-2xx",
value=ANY,
)
def test_loading_multiple_providers_per_endpoint(mocker):
ep_query = mocker.patch.object(queries, "get_service_monitoring_endpoints")
ep_query.return_value = get_endpoint_fixtures(
"test_multiple_providers_per_endpoint.yaml"
)
endpoints = get_endpoints(BLACKBOX_EXPORTER_PROVIDER)
assert len(endpoints) == 2
for provider, eps in endpoints.items():
assert provider.provider == BLACKBOX_EXPORTER_PROVIDER
assert len(eps) == 2
|
py | 1a382e8a5b1d5af0171cf4f19630de276ab66cf1 | from ald.rtp import *
from ald.core import *
from ald.abp import *
from ald.microrheology import *
|
py | 1a382fab1dcb4942527154c0e779df8116c55005 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import copy
import functools
import itertools
import multiprocessing.pool
import os
import re
import sys
import time
import weakref
from absl.testing import parameterized
import numpy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.lang import directives
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.layers import convolutional
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_sendrecv_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.structured import structured_tensor
from tensorflow.python.platform import test
from tensorflow.python.saved_model.load import load
from tensorflow.python.saved_model.save import save
from tensorflow.python.training import training_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
def total_function_cache(defined):
return defined._list_all_concrete_functions() # pylint: disable=protected-access
def _example_indexed_slices_with_dense_shape():
return indexed_slices.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]),
constant_op.constant([2]))
def _example_indexed_slices_without_dense_shape():
return indexed_slices.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]))
def _spec_for_value(value):
"""Returns the (nested) TypeSpec for a value."""
if nest.is_nested(value):
return nest.map_structure(_spec_for_value, value)
elif isinstance(value, (ops.Tensor, composite_tensor.CompositeTensor)):
return type_spec.type_spec_from_value(value)
else:
return value
# This dummy decorator imitates ordinary decorators utilizing tf_decorator.
def dummy_tf_decorator(method):
def wrapper(*args, **kwargs):
return method(*args, **kwargs)
return tf_decorator.make_decorator(method, wrapper)
# TODO(mdan): Organize these tests.
class FunctionTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(FunctionTest, self).setUp()
cpus = config.list_physical_devices('CPU')
# Set 4 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def testBasic(self):
matmul = def_function.function(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
sq2 = matmul(sq, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
def testPythonFunctionNotCallable(self):
with self.assertRaisesRegex(TypeError, 'is not a callable object'):
def_function.function(1)
def testOnExitCallback(self):
values = []
def append_1():
values.append(1)
def append_2():
values.append(2)
def g(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_1)
self.assertEqual(old_values, values)
return x + 1
tf_g = def_function.function(g)
def f(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_2)
self.assertEqual(old_values, values)
return tf_g(x)
tf_f = def_function.function(f)
self.assertEmpty(values)
tf_f(constant_op.constant(1.0))
self.assertEqual(values, [1, 2]) # Once for g, once for f.
tf_f(constant_op.constant([1.0])) # force a retrace
self.assertEqual(values, [1, 2, 1, 2]) # And again.
def testCannotAddExitCallbackWhenNotInFunctionScope(self):
with self.assertRaisesRegex(RuntimeError, 'when not building a function.'):
ops.add_exit_callback_to_default_func_graph(lambda: None)
def testVariable(self):
v1 = variables.Variable(1.0)
add = def_function.function(lambda x, v: x + v1 + v)
v2 = variables.Variable(1.0)
x = constant_op.constant(1.0)
r = add(x, v2)
self.assertEqual(3.0, self.evaluate(r))
def testVariableOnly(self):
v = variables.Variable(1.0)
add = def_function.function(lambda x: x.assign_add(1.0))
r1 = add(v)
self.assertEqual(2.0, self.evaluate(r1))
c = constant_op.constant(1.0)
with self.assertRaisesRegex(AttributeError, 'no attribute'):
add(c)
@test_util.disable_tfrt('Packed tensor is not supported in tfrt yet.')
def testPackedVariable(self):
with ops.device('/cpu:0'):
v0_0 = resource_variable_ops.ResourceVariable(1.0)
with ops.device('/cpu:1'):
v0_1 = resource_variable_ops.ResourceVariable(2.0)
v1_0 = resource_variable_ops.ResourceVariable(3.0)
with ops.device('/cpu:2'):
v1_1 = resource_variable_ops.ResourceVariable(4.0)
packed_var_0 = ops.pack_eager_tensors([v0_0.handle, v0_1.handle])
packed_var_1 = ops.pack_eager_tensors([v1_0.handle, v1_1.handle])
# TODO(b/145922293): use ResourceVariable.assign_add and
# ResourceVariable.read_value directly once we support packing multiple
# ResourceVariable into one ResourceVariable.
@def_function.function
def read_var():
resource_variable_ops.assign_add_variable_op(
packed_var_0, constant_op.constant(5.0))
resource_variable_ops.assign_add_variable_op(
packed_var_1, constant_op.constant(6.0))
with ops.device('/cpu:0'):
read0 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
with ops.device('/cpu:1'):
read1 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
read2 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
with ops.device('/cpu:2'):
read3 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
return read0, read1, read2, read3
arg_attrs = read_var.get_concrete_function().function_def.arg_attr
self.assertLen(arg_attrs, 2)
self.assertEqual(arg_attrs[0].attr['_composite_device'].s,
compat.as_bytes(packed_var_0.device))
self.assertEqual(arg_attrs[1].attr['_composite_device'].s,
compat.as_bytes(packed_var_1.device))
self.assertAllEqual(read_var(), (1 + 5, 2 + 5, 3 + 6, 4 + 6))
def testImplementsAttributeBasic(self):
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(function.IMPLEMENTS_ATTRIBUTE_NAME, f.attr, f)
else:
present += 1
self.assertEqual(f.attr[function.IMPLEMENTS_ATTRIBUTE_NAME].s,
'func'.encode('ascii'), f)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testImplementsAttributeAssertsOnSideInput(self):
with context.graph_mode(), self.cached_session():
z = array_ops.zeros(0)
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y + z)
a = array_ops.ones((1,))
b = array_ops.ones((1,))
with self.assertRaisesRegex(AssertionError,
'variables are always captured'):
v(a, b)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertEmpty(functions)
def testImplementsAttributeWorksWithGradientTape(self):
add = lambda x, y: x + y ** 2
add = def_function.function(experimental_implements='MyFunc')(add)
x = variables.Variable(3.0)
y = variables.Variable(2.0)
with backprop.GradientTape() as tape:
g = add(x, y)
dg_dy, dg_dx = tape.gradient(g, [y, x])
self.assertEqual(dg_dy.numpy(), 4.0)
self.assertEqual(dg_dx.numpy(), 1.0)
def testImplementsAttributeWorksOnVariables(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable((1.0,))
b = variables.Variable((1.0,))
r1 = v(a, b)
_ = v(a, a)
functions = ops.get_default_graph().as_graph_def().library.function
# Verify that we created only one function
self.assertLen(functions, 1)
# Verify that eval() reads the current values.
a.initializer.run()
b.initializer.run()
self.assertEqual(r1.eval(), 2)
a.assign_add([1]).eval()
self.assertEqual(r1.eval(), 3)
def testImplementsAttributeWorksOnConstants(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, 2.)
r2 = v(2., a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 1)
self.assertLen(functions[0].signature.input_arg, 2)
# Verify that eval() reads the current values.
a.initializer.run()
self.assertEqual(r1.eval(), 3)
self.assertEqual(r2.eval(), 3)
def testImplementsAttributeSpecializes(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, [2.])
r2 = v([2., 2], a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 2)
# Ensure that all parameters are still there and haven't been inlined!
self.assertLen(functions[0].signature.input_arg, 2)
self.assertLen(functions[1].signature.input_arg, 2)
# Verify that eval() reads the current values.
a.initializer.run()
numpy.testing.assert_equal(r1.eval(), [3.])
numpy.testing.assert_equal(r2.eval(), [3., 3.])
def testImplementsWorksWithTensorSpec(self):
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
v = v.get_concrete_function(
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32))
x = v(1., 2.)
self.assertEqual(x.numpy(), 3.)
def testImplementsAttributeAsNameAttrList(self):
implements_attr = (
'name: "embedding_matmul" attr { key: "key1" value { i: 2 } '
'} attr { key: "key2" value { b: false } }')
v = def_function.function(
experimental_implements=implements_attr)(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(function.IMPLEMENTS_ATTRIBUTE_NAME, f.attr, f)
else:
present += 1
attr_value = f.attr[function.IMPLEMENTS_ATTRIBUTE_NAME]
self.assertIsNotNone(attr_value.func, f)
self.assertEqual(attr_value.func.name, 'embedding_matmul')
name_attrs = attr_value.func.attr
self.assertLen(name_attrs, 2)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testExternalControlDependency(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
@function.defun
def f():
with ops.control_dependencies([op]):
return 1.0
self.evaluate(f())
self.assertAllEqual(self.evaluate(v), 2.0)
def testInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(constant_op.constant([1.0]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
def testInputShapeRelaxationOnInstanceMethod(self):
# Test that experimental_relax_shapes is passed during
# instance method bounding.
unknown_dim = [False]
class Foo(object):
@def_function.function(experimental_relax_shapes=True)
def func(self, a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
foo = Foo()
foo.func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
foo.func(constant_op.constant([1.0]))
self.assertFalse(unknown_dim[0])
foo.func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
def testInputShapeFunctionRelaxationWithRaggedTensors(self):
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
check_trace( # Initial call gets traced.
ragged_factory_ops.constant([[1], [2, 3, 4]]),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32))
check_trace( # Input TypeSpec is the same -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4]]), None)
check_trace( # Even if component tensor shapes change -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4, 5, 6]]), None)
check_trace( # Different TypeSpec shape (nrows): retrace
ragged_factory_ops.constant([[1], [2], [3]]),
ragged_tensor.RaggedTensorSpec([3, None], dtypes.int32))
check_trace( # Different nrows again: relax & retrace
ragged_factory_ops.constant([[1], [2], [3], [4]]),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32))
check_trace( # Different nrows yet again: not retrace
ragged_factory_ops.constant([[1]]), None)
check_trace( # Different ragged_rank: retrace
ragged_factory_ops.constant([[[1]]]),
ragged_tensor.RaggedTensorSpec([1, None, None], dtypes.int32))
check_trace( # Different ragged_rank again: retrace & relax
ragged_factory_ops.constant([[[1]], [[2]]]),
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32))
def testInputShapeFunctionRelaxationWithStructuredTensors(self):
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
# If we have TypeSpecs that differ in ways other than just their shape,
# then retrace each time.
check_trace(
structured_tensor.StructuredTensor.from_pyval({'a': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((1,), dtypes.int32)}))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'b': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'b': tensor_spec.TensorSpec((1,), dtypes.int32)}))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'c': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'c': tensor_spec.TensorSpec((1,), dtypes.int32)}))
# But if we call again with only shape different, then do relax:
check_trace( # retrace
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((2,), dtypes.int32)}))
check_trace( # relax & retrace
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((None,), dtypes.int32)}))
check_trace( # use relaxed graph
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3, 4]}),
None)
def testInputShapeFunctionRelaxationWithDatasetIterators(self):
# For dataset iterators, the TypeSpec includes type information that's
# not derivable from the component tensors. Make sure that the TypeSpec
# shapes get relaxed as appropriate.
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
ds_1_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([1, 2]))
ds_2_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 2]))
ds_3_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([3, 2]))
ds_4_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([4, 2]))
ds_2_1 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 1]))
check_trace( # shape=[1, 2]: retrace
dataset_ops.make_one_shot_iterator(ds_1_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([1, 2], dtypes.float32)))
check_trace( # shape=[1, 2]: no retrace (use the [1, 2] graph)
dataset_ops.make_one_shot_iterator(ds_1_2), None)
check_trace( # shape=[2, 2]: retrace
dataset_ops.make_one_shot_iterator(ds_2_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([2, 2], dtypes.float32)))
check_trace( # shape=[3, 2]: relax to [None, 2] and retrace
dataset_ops.make_one_shot_iterator(ds_3_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([None, 2], dtypes.float32)))
check_trace( # shape=[4, 2]: no retrace (use the [None, 2] graph)
dataset_ops.make_one_shot_iterator(ds_4_2), None)
check_trace( # shape=[2, 1]: relax to [None, None] and retrace
dataset_ops.make_one_shot_iterator(ds_2_1),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([None, None], dtypes.float32)))
def testCapturesVariables(self):
a = variables.Variable(1.0, trainable=False)
b = variables.Variable(1.0)
cc = [None]
@def_function.function
def f():
c = cc[0]
if c is None:
c = cc[0] = variables.Variable(1.)
return a + b + c + 1
cf = f.get_concrete_function()
c = cc[0]
captured_variables = {v.ref() for v in (a, b, c)}
trainable_variables = {v.ref() for v in (b, c)}
self.assertEqual({v.ref() for v in cf.variables}, captured_variables)
self.assertEqual({v.ref() for v in cf.trainable_variables},
trainable_variables)
self.assertEqual(cf.variables, cf.graph.variables)
self.assertEqual(cf.trainable_variables, cf.graph.trainable_variables)
def testNestedInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a_, b_=None):
del a_ # Only used to check which cache is used.
self.assertEqual(b_[0]._shape_tuple(), ())
if b_[1]._shape_tuple()[0] is None:
unknown_dim[0] = True
return b_[0] + 1
a = 'hi'
b0 = constant_op.constant(1.0)
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(a, b_=[b0, constant_op.constant([1.0, 1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
unknown_dim[0] = False
# Now do the same except with a new a which is not a tensor; this should
# change the cache key.
a = 'bye'
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
# Since we already marked a cache miss for a function with the same
# non-input signatures, here we will immediately start relaxing shapes.
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
def testNestedShapeFunctionRelaxation(self):
got_shape = [None]
# The inner function will go through shape relaxation because the shapes it
# receives will be [1], [2], [3], ...
@def_function.function(experimental_relax_shapes=True)
def bar(x_shape):
got_shape[0] = x_shape._shape_tuple()
return x_shape
# The outer function will not go through shape relaxation because the shapes
# it receives will be [1], [[1]], [[[1]]], ...
@def_function.function(experimental_relax_shapes=True)
def foo(ones):
return bar(array_ops.shape(ones))
for rank in range(1, 6):
x_shape = self.evaluate(foo(array_ops.ones([1] * rank)))
self.assertAllEqual(x_shape, [1] * rank)
if rank < 3:
self.assertEqual(got_shape[0], (rank,))
else:
self.assertEqual(got_shape[0], (None,))
def testNoHash(self):
@def_function.function()
def f(_):
return 1.0
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r'could not be represented through the generic tracing'):
f(set([]))
def testFuncName(self):
@function.defun_with_attributes(attributes={'func_name': 'multiply'})
def add(x, y):
_ = x * y
return x + y
@function.defun
def add_2(x, y):
_ = x * y
return x + y
self.assertEqual(add._name, 'multiply')
self.assertEqual(add_2._name, 'add_2')
def testBasicGraphMode(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = sq(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedInputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = a_times_b(pair({'a': t}, {'b': t}))
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function()
def pairs_mul(pair_a, pair_b):
return pair(matmul(pair_a.a, pair_b.a), matmul(pair_a.b, pair_b.b))
a = constant_op.constant([[1.0, 2.0], [1.0, 2.0]])
b = constant_op.constant([[3.0, 4.0], [3.0, 4.0]])
out = pairs_mul(pair(a, b), pair(b, a))
expected = pair(math_ops.matmul(a, b).numpy(),
math_ops.matmul(b, a).numpy())
self.assertAllClose(out, expected)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testNestedFunctionGraphNotOutOfDate(self, function_decorator):
@function_decorator
def f():
return constant_op.constant(1.)
class _Model(object):
@function_decorator
def g(self):
self.f = f.get_concrete_function()
model = _Model()
model.g()
concrete = model.f
weak_g_graph = weakref.ref(model.g.get_concrete_function().graph)
self.assertIs(weak_g_graph(), concrete.graph.outer_graph)
weak_g = weakref.ref(model.g)
del model
self.assertIsNone(weak_g())
self.assertIsNone(weak_g_graph())
self.assertIsNotNone(concrete.graph.outer_graph)
self.assertIs(ops.get_default_graph(), concrete.graph.outer_graph)
def testGraphEagerIsolation(self):
@function.defun
def f():
self.v = variables.Variable(1.0)
return self.v.read_value()
self.assertAllEqual(f(), 1.0)
with ops.Graph().as_default():
self.assertEqual(f().shape, ())
def testBasicGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testGetConcreteFunctionThreadSafety(self):
@def_function.function
def sq():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
return math_ops.matmul(t, t)
concrete_functions = []
def thread_func(_):
cf = sq.get_concrete_function()
concrete_functions.append(cf)
num_threads = 100
pool = multiprocessing.pool.ThreadPool(num_threads)
_ = pool.map(thread_func, list(range(num_threads)))
self.assertLen(set(concrete_functions), 1)
def testGetConcreteFunctionThreadSafetyWithArgs(self):
@def_function.function
def add_100(*args):
return math_ops.add_n(args)
p = multiprocessing.pool.ThreadPool(2)
args = (constant_op.constant(1.),) * 100
f1, f2 = p.map(add_100.get_concrete_function, [args] * 2)
# I see about len(args) + max(0, len(args) - 3) arguments expected.
f1(*args)
del f2
def testInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
sq_op = sq.get_concrete_function(
tensor_spec.TensorSpec((None, None), dtypes.float32))
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out1 = sq_op(t1)
self.assertAllEqual(out1, math_ops.matmul(t1, t1).numpy())
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out2 = sq_op(t2)
self.assertAllEqual(out2, math_ops.matmul(t2, t2).numpy())
def testNestedInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(mats):
((a, b),) = mats
return matmul(a, b)
sq_op_autonamed = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32),
tensor_spec.TensorSpec((None, None), dtypes.float32))])
self.assertEqual([None, None], sq_op_autonamed.output_shapes.as_list())
sq_op = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32,
name='first_mat'),
tensor_spec.TensorSpec((None, None), dtypes.float32,
name='second_mat'))])
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.4, 2.4], [3.4, 4.4]])
out = sq_op(first_mat=t1, second_mat=t2)
self.assertAllEqual(out, math_ops.matmul(t1, t2).numpy())
self.assertAllEqual(sq_op_autonamed(t1, t2),
math_ops.matmul(t1, t2).numpy())
def testExecutingStatelessDefunConcurrently(self):
@def_function.function
def stateless(x):
return math_ops.multiply(2.0, x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(1.0 * x) for x in range(100)]
outputs = [float(out) for out in pool.map(stateless, inputs)]
expected = [float(2.0 * x) for x in inputs]
self.assertSequenceEqual(outputs, expected)
def testExecutingManyStatelessDefunsConcurrently(self):
@def_function.function
def stateless(x):
del x
return math_ops.multiply(2.0, 2.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
objects = [object() for _ in range(100)]
outputs = [float(out) for out in pool.map(stateless, objects)]
expected = [4.0] * 100
self.assertSequenceEqual(outputs, expected)
@test_util.disable_tfrt('b/169431085: This test is flaky on tfrt')
def testExecutingStatefulDefunConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
v.assign(x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(0.0)] * 100
pool.map(stateful, inputs)
self.assertEqual(float(v.read_value()), 0.0)
def testExecutingManyStatefulDefunsConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
del x
return v.assign(0.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
pool.map(stateful, [object() for _ in range(100)])
self.assertEqual(float(v.read_value()), 0.0)
def testShareRendezvous(self):
# Disable grappler from inlining the functions. Note we run the send & recv
# in graph mode since with eager mode the function should automatically be
# inlined.
context.context().set_optimizer_experimental_options(
{'disable_meta_optimizer': True})
cpu = '/device:CPU:0'
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
@def_function.function
def send():
x = constant_op.constant(1)
gen_sendrecv_ops.send(x, 'x', cpu, 0, cpu)
return x
send._shared_rendezvous = True # pylint: disable=protected-access
@def_function.function(input_signature=signature)
def send_body(n):
send()
return n - 1
@def_function.function
def recv():
return gen_sendrecv_ops.recv(dtypes.int32, 'x', cpu, 0, cpu)
recv._shared_rendezvous = True # pylint: disable=protected-access
@def_function.function(input_signature=signature)
def recv_body(n):
recv()
return n - 1
@def_function.function(input_signature=signature)
def cond(n):
return n > 0
# Instead of calling the send & recv functions directly we want to call them
# through a functional while to ensure the rendezvous is shared across the
# while boundary.
@def_function.function
def fn(n):
functional_ops.While([n], cond.get_concrete_function(),
send_body.get_concrete_function())
return functional_ops.While([n], cond.get_concrete_function(),
recv_body.get_concrete_function())
# Use a graph context since functions will not be automatically inlined
with context.graph_mode(), self.cached_session():
self.evaluate(fn(2))
def disabled_testRandomSeed(self):
@def_function.function
def f():
return random_ops.random_normal(())
random_seed.set_random_seed(1)
x = f()
self.assertNotEqual(x, f())
random_seed.set_random_seed(1)
self.assertAllEqual(f(), x)
def testNestedInputsGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = a_times_b.get_concrete_function(
pair(dict(a=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'a')),
dict(b=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'b'))))
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(a=t, b=t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return (matmul(a, a), {'b': constant_op.constant(1.0)})
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes,
(tensor_shape.TensorShape([2, 2]),
{'b': tensor_shape.TensorShape([])}))
self.assertEqual(sq_op.output_dtypes,
(dtypes.float32, {'b': dtypes.float32}))
(a, b) = sq_op(t)
self.assertAllEqual(a, math_ops.matmul(t, t).numpy())
self.assertAllEqual(b['b'].numpy(), 1.0)
def testGraphFunctionNoneOutput(self):
@def_function.function
def fn(unused_a, unused_b):
return None
x = constant_op.constant(1)
fn_op = fn.get_concrete_function(x, x)
self.assertEqual(fn_op.output_dtypes, None)
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
def testDefunNumpyArraysConvertedToTensors(self):
def f(x):
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x)
self.assertLen(total_function_cache(defined), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(total_function_cache(defined), 1)
np_ones = numpy.ones([], numpy.float32)
np_zeros = numpy.zeros([], numpy.float32)
tf_ones = array_ops.ones([])
tf_zeros = array_ops.zeros([])
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(np_ones).numpy())
self.assertLen(total_function_cache(defined), 2)
self.assertEqual(0., defined(np_zeros).numpy())
self.assertEqual(1., defined(tf_ones).numpy())
self.assertEqual(0., defined(tf_zeros).numpy())
self.assertLen(total_function_cache(defined), 2)
# Test that mutable inputs are supported.
mutable = numpy.ones([], numpy.float32)
self.assertEqual(1., defined(mutable).numpy())
mutable.fill(0)
self.assertEqual(0., defined(mutable).numpy())
class MyNdarray(numpy.ndarray):
pass
# Test that the subclasses of ndarray are converted too.
self.assertEqual(1., defined(np_ones.view(MyNdarray)).numpy())
self.assertEqual(0., defined(np_zeros.view(MyNdarray)).numpy())
# We should not have triggered any re-tracing of the python function.
self.assertLen(total_function_cache(defined), 2)
def testNumpyDtypeInputSupported(self):
@function.defun
def f(x, dtype):
return constant_op.constant(dtype(x))
self.assertEqual(f(1, numpy.float32).numpy(), numpy.float32(1))
self.assertEqual(f(2, numpy.float32).numpy(), numpy.float32(2))
self.assertEqual(f(1, numpy.int32).numpy(), numpy.int32(1))
self.assertEqual(f(2, numpy.int32).numpy(), numpy.int32(2))
def testDefunNumpyArraysConvertedToTensorsInKwargs(self):
def f(**kwargs):
x = kwargs.pop('x')
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x=x)
self.assertLen(total_function_cache(defined), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x=x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(total_function_cache(defined), 1)
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(x=numpy.ones([])).numpy())
self.assertEqual(0., defined(x=numpy.zeros([])).numpy())
self.assertEqual(1., defined(x=array_ops.ones([])).numpy())
self.assertEqual(0., defined(x=array_ops.zeros([])).numpy())
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@def_function.function
def add_int32s():
return x + x
self.assertEqual(2, int(add_int32s()))
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
return v.read_value()
self.assertEqual(1.0, float(f()))
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
x = constant_op.constant(2.0)
@def_function.function
def test_assign_add():
v.assign_add(x)
return v.read_value()
self.assertEqual(3.0, float(test_assign_add()))
@test_util.run_in_graph_and_eager_modes
def testTensorInitializationInFunctionRaisesError(self):
@def_function.function
def tensor_init():
with self.assertRaisesRegex(ValueError, 'could not be lifted out'):
resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
tensor_init()
@test_util.run_in_graph_and_eager_modes
def testCallableTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
self.v = resource_variable_ops.ResourceVariable(
lambda: constant_op.constant(2.0))
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
@test_util.also_run_as_tf_function
def testInitScopeTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
with ops.init_scope():
const = constant_op.constant(2.0)
# Note: this variable bypasses tf.function's variable creation
# requirements by bypassing variable_creator_scope by using
# ResourceVariable instead of Variable.
self.v = resource_variable_ops.ResourceVariable(const)
return self.v.read_value()
value = tensor_init()
self.assertAllEqual(value, 2.0)
@test_util.run_in_graph_and_eager_modes
def testGetConcreteFunctionCreatesVariables(self):
v_holder = []
@def_function.function
def tensor_init():
if not v_holder:
v_holder.append(variables.Variable(5.))
return v_holder[0].read_value()
concrete = tensor_init.get_concrete_function()
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(5., self.evaluate(concrete()))
self.assertAllEqual(5., self.evaluate(tensor_init()))
def testFuncGraphCaptureByValue(self):
v = variables.Variable(1.0)
def trivial_function():
return v.read_value()
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testFuncGraphCaptureByValueNested(self):
v = variables.Variable(1.0)
def trivial_function():
return control_flow_ops.cond(
array_ops.placeholder_with_default(True, ()),
v.read_value, v.read_value)
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testShapeInferenceForMoreSpecificInput(self):
def f(a):
return array_ops.reshape(a, [-1, 3])
signature = [tensor_spec.TensorSpec(None, dtypes.float32)]
compiled = def_function.function(f, input_signature=signature)
@def_function.function
def use_f():
inputs = array_ops.zeros([10, 10, 3])
self.assertAllEqual(f(inputs).shape, compiled(inputs).shape)
use_f()
def testFuncListAttr(self):
@function.defun
def test_function(val):
def fn1():
return array_ops.ones([10])
fn2 = lambda: array_ops.ones([10]) * 2
def fn3(x=3):
return array_ops.ones([10]) * x
fn4 = functools.partial(fn3, x=4)
fn5 = functools.partial(fn3, 5)
return gen_functional_ops.case(val, [], [dtypes.float32],
[function.defun(f).get_concrete_function()
for f in (fn1, fn2, fn3, fn4, fn5)])
ones = array_ops.ones([10])
self.assertAllEqual([ones], test_function(0))
self.assertAllEqual([ones * 2], test_function(1))
self.assertAllEqual([ones * 3], test_function(2))
self.assertAllEqual([ones * 4], test_function(3))
self.assertAllEqual([ones * 5], test_function(4))
self.assertAllEqual([ones * 5], test_function(22)) # default branch
@test_util.enable_control_flow_v2
def testVariableInLoopInFunction(self):
@function.defun
def test_function():
def loop_test(_):
return False
def loop_body(_):
return variable_scope.get_variable('a', shape=())
return control_flow_ops.while_loop(loop_test, loop_body, [0.0])
self.assertEqual(test_function().shape, [])
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
with context.graph_mode():
v = variables.Variable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# Check that shape inference works while creating the defun
compiled = def_function.function(f)
compiled()
def testDefunShapeInferenceWithCapturedTensorListInGraphMode(self):
with context.graph_mode():
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(1.0))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(2.0))
def f():
tl, value = list_ops.tensor_list_pop_back(
tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
return tl
compiled = def_function.function(f)
output_tensor_list = compiled()
_, value = list_ops.tensor_list_pop_back(
output_tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testDefunForcesResourceVariables(self):
def variable_creator():
self.v = variables.Variable(0.0)
return self.v.read_value()
self.v = None
defined = function.defun(variable_creator)
defined() # Create the variable.
self.assertIsInstance(
self.v, resource_variable_ops.ResourceVariable)
def testRunMetadata(self):
@def_function.function
def f(x):
return x * x
with ops.device('cpu:0'):
context.enable_run_metadata()
f(constant_op.constant(1.0))
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertLen(run_metadata.partition_graphs, 1)
def testGraphModeCaptureVariable(self):
with context.graph_mode(), self.cached_session():
class HasAVar(object):
def __init__(self):
self.v = resource_variable_ops.ResourceVariable(1.0)
def call(self):
return self.v * 2
o = HasAVar()
self.evaluate(variables.global_variables_initializer())
call = def_function.function(o.call)
op = call()
self.assertAllEqual(self.evaluate(op), 2.0)
def testGraphModeManyFunctions(self):
with ops.Graph().as_default(), self.cached_session():
@def_function.function
def f(x):
return x * x
@def_function.function
def g(x):
return f(x) + 1
self.assertAllEqual(g(constant_op.constant(2.0)), 5.0)
def testDict(self):
@def_function.function
def f(x):
return {'name': x + 1}
self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
def testWeakrefInputsRejected(self):
@def_function.function
def f(x):
return x
class Dummy:
pass
o = Dummy()
wr = weakref.ref(o)
with self.assertRaisesRegex(ValueError, 'weakref'):
f(wr)
def testTensorConversionWithDefun(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testTensorConversionCall(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
@def_function.function
def g(x):
return f(f(x))
self.assertAllEqual(8, g(constant_op.constant(2)))
def testCallShape(self):
@def_function.function
def f(x):
return x + 1
@def_function.function
def g(x):
x = f(x)
self.assertEqual(x.shape.as_list(), [])
return None
g(constant_op.constant(1.0))
def testNestedDefunWithNoOutputAndTapedInput(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@def_function.function
def f(x):
# This function intentionally takes a taped variable as input,
# but does not return any values
math_ops.add(x, three)
@def_function.function
def g(x):
y = math_ops.add(x, three)
f(y)
g(three)
def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
defined = def_function.function(sum_gather)
self.assertAllEqual(sum_gather(), defined())
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1', ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor', sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
]) # pyformat: disable
def testReturnCompositeTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f():
return input_ct
output_ct = f()
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
('RaggedTensorRaggedRank1WithSignature',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]},
[ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)]),
('RaggedTensorRaggedRank2WithSignature',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]},
[ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32)]),
('SparseTensorWithSignature',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]},
[sparse_tensor.SparseTensorSpec([None], dtypes.int32)]),
]) # pyformat: disable
def testCompositeAsArgumentTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f(x):
return x
output_ct = f(input_ct)
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
def testTracedCompositeDiscardsShapeInfo(self):
# SparseTensorSpec intentionally excludes info about the number of elements
# that are in a sparse tensor (which is recorded as st.indices.shape[0] and
# st.values.shape[0]). Similarly, RaggedTensorSpec intentionally excludes
# info about the total number of values in a RaggedTensor (stored as
# rt.values.shape[0]). This test checks that the placeholders created by
# tf.function() properly mask this shape info.
@def_function.function
def f(rt, st):
self.assertEqual(st.indices.shape.as_list()[:1], [None])
self.assertEqual(st.values.shape.as_list(), [None])
return (rt, st)
rt = ragged_factory_ops.constant([[1, 2], [3]])
st = sparse_tensor.SparseTensor([[0]], [0], [10])
f(rt, st)
@test_util.run_gpu_only
def testFunctionOnDevice(self):
x = constant_op.constant([1.]).gpu()
f = def_function.function(math_ops.add)
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testFunctionWithResourcesOnDifferentDevices(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, gpu_result
defined = function.defun(sum_gather)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
self.assertAllEqual(expected, self.evaluate(defined()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testOpInFunctionWithConflictingResourceInputs(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='cpu')
v_also_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='also_cpu')
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='gpu')
@def_function.function
def resource_apply_adam():
training_ops.resource_apply_adam(
v_cpu.handle,
v_gpu.handle,
v_also_cpu.handle,
1.0, # beta1_power
1.0, # beta2_power
1.0, # learning_rate
1.0, # beta1
1.0, # beta2
1.0, # epsilon,
[1.0, 1.0, 1.0], # grad
False) # use_locking
return None
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Cannot place the graph because a reference or resource edge connects '
'colocation groups with incompatible assigned devices'):
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(resource_apply_adam())
@test_util.run_gpu_only
def testFunctionHandlesInputsOnDifferentDevices(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = reshape(value, shape).cpu()
self.assertAllEqual(reshaped, [[1], [2]])
@test_util.run_gpu_only
def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1]).gpu()
reshape(value, shape) # No error is raised
def testNoneOutput(self):
@def_function.function
def my_function(_):
return None
self.assertAllEqual(my_function(1), None)
def testNestedFunctions(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@tf_function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@def_function.function
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@def_function.function
def inner_read():
return v.read_value()
@def_function.function
def outer():
return inner_read()
self.assertEqual(1, int(outer()))
def testReturnCapturedEagerTensor(self):
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(read()))
def testReturnCapturedGraphTensor(self):
with context.graph_mode(), self.cached_session():
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(self.evaluate(read())))
def testSequenceInputs(self):
clip_by_global_norm = def_function.function(clip_ops.clip_by_global_norm)
t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]
clipped_list, global_norm = clip_by_global_norm(t_list,
constant_op.constant(.2))
for t in clipped_list:
self.assertIsInstance(t, ops.Tensor)
self.assertIsInstance(global_norm, ops.Tensor)
def testNestedSequenceInputs(self):
def my_op(inputs):
a, b, c = inputs
e, f = b
g, h = e
return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c
my_eager_op = def_function.function(my_op)
ret = my_eager_op([
constant_op.constant(1), [(constant_op.constant(2),
constant_op.constant(3)),
constant_op.constant(4)],
constant_op.constant(5)
])
self.assertLen(ret, 2)
self.assertAllEqual(ret[0][0], 2)
self.assertAllEqual(ret[0][1][0][0], 8)
self.assertAllEqual(ret[0][1][0][1], 4)
self.assertIsInstance(ret[0][1][0], tuple)
self.assertAllEqual(ret[0][1][1], 6)
self.assertAllEqual(ret[0][2], 10)
self.assertAllEqual(ret[1], 15)
def testVariableNamesRespectNameScopesWithDefun(self):
@def_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable(0.0, name='bar')
self.assertEqual(v.name, 'foo/bar:0')
create_variable()
def testVariableNamesRespectNameScopesWithDefunInGraph(self):
with context.graph_mode():
@def_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')
self.assertEqual(v.name, 'foo/bar:0')
with ops.get_default_graph().as_default():
create_variable()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testCallOptionsMemory(self):
@function.defun
def model(x):
return x + constant_op.constant(1.)
# This happens with a lot of option toggles, e.g. soft device placement
context.context().function_call_options = None
model(constant_op.constant(2.))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testLayerInDefun(self):
conv = convolutional.Conv2D(
filters=1,
kernel_size=2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
@function.defun
def model(x):
return conv(x)
x = array_ops.ones([1, 2, 2, 1])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[[[4.0]]]], self.evaluate(y))
# Variable lifting is somewhat different between defun/tf.function, so testing
# device placement on both makes sense.
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
@test_util.run_in_graph_and_eager_modes
def testVariablesPlacedOnOutsideDevice(self, function_decorator):
class _Obj(object):
def __init__(self):
self.v = None
@function_decorator
def f(self):
if self.v is None:
self.v = variables.Variable(1.)
return self.v + 1.
has_device = _Obj()
with ops.device('cpu:0'):
has_device.f()
self.assertIn('CPU', has_device.v.device)
@test_util.run_in_graph_and_eager_modes
def testMultipleDeviceCheck(self):
def f():
with ops.device('cpu'):
return test_ops.device_placement_op()
func = function.defun(f)
with ops.device('cpu:0'):
output = self.evaluate(func())
self.assertIn(compat.as_bytes('CPU:0'), output)
@test_util.run_in_graph_and_eager_modes
def testDeviceAnnotationsRespected(self):
def multi_device_fn():
with ops.device('/cpu:0'):
s0 = test_ops.device_placement_op()
with ops.device('/cpu:1'):
s1 = test_ops.device_placement_op()
with ops.device('/cpu:2'):
s2 = test_ops.device_placement_op()
s3 = test_ops.device_placement_op()
return s0, s1, s2, s3
defined = function.defun(multi_device_fn)
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
with ops.device('/cpu:3'):
outputs = self.evaluate(defined())
# All function definitions are agnostic to call site devices.
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:3'), outputs[3])
with ops.device('/cpu:0'):
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:0'), outputs[3])
@test_util.run_in_graph_and_eager_modes
def testCallingGraphFunctionOnDifferentDevice(self):
def func():
return constant_op.constant(0)
defined = def_function.function(func)
with ops.device('cpu:0'):
cpu_graph_function = defined.get_concrete_function()
with ops.device('cpu:0'):
self.assertEqual(
self.evaluate(cpu_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
with ops.device(None):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
default_graph_function = defined.get_concrete_function()
self.assertEqual(
self.evaluate(default_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(default_graph_function()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testColocateWithRespected(self):
# TODO(b/113291792): Use multiple CPUs instead of a GPU.
with ops.device('cpu:0'):
x = array_ops.identity(1.0)
with ops.device('gpu:0'):
y = array_ops.identity(1.0)
@def_function.function
def foo():
return test_ops.device_placement_op()
with ops.colocate_with(x):
self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))
with ops.colocate_with(y):
self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))
def testVariablesAreTracked(self):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x):
return v * x
defined = def_function.function(foo)
x = constant_op.constant([1.0])
self.assertEqual(1., self.evaluate(defined(x)))
v.assign(2.)
x = constant_op.constant([1.0, 2.0])
self.assertAllEqual([2., 4.], self.evaluate(defined(x)))
def testCacheObjectHashCollisions(self):
class Foo(object):
def __hash__(self):
return 42
def func(foo):
return constant_op.constant([id(foo)])
defined = function.defun(func)
foo_1 = Foo()
defined(foo_1)
self.assertLen(total_function_cache(defined), 1)
foo_2 = Foo()
defined(foo_2)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([[1.0]], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorUnknownShapesCollisionRelaxedShapes(self):
def func(t):
return t + t
with context.graph_mode(), self.cached_session():
defined = function.defun(func, experimental_relax_shapes=True)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[])
defined(p)
self.assertLen(total_function_cache(defined), 1)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
defined(p)
self.assertLen(total_function_cache(defined), 2)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[2])
defined(p)
# Gradual shape relaxation is performed; and the common shape between
# [1] and [2] is one containing unknown dimensions.
self.assertLen(total_function_cache(defined), 2)
# pylint: disable=protected-access
self.assertLen(defined._function_cache.arg_relaxed_specs, 1)
relaxed_specs = (
list(defined._function_cache.arg_relaxed_specs.values())[0])
self.assertLen(relaxed_specs, 1)
relaxed_shape = relaxed_specs[0].shape
# pylint: enable=protected-access
self.assertEqual(relaxed_shape.rank, 1)
self.assertEqual(tensor_shape.dimension_value(relaxed_shape[0]), None)
t = constant_op.constant([1.0, 1.0, 1.0], dtype=dtypes.float32)
defined(t)
# Shape (3,) matches the relaxed shape TensorShape([None])
self.assertLen(total_function_cache(defined), 2)
def testPythonFunctionWithDefaultArgs(self):
def func(foo, bar=1, baz=2):
del foo
del bar
del baz
return
defined = function.defun(func)
defined(0, baz=20)
self.assertLen(total_function_cache(defined), 1)
defined(1) # bar=1, baz=2
self.assertLen(total_function_cache(defined), 2)
# This matches the previous call.
defined(foo=1)
self.assertLen(total_function_cache(defined), 2)
defined(1, 2, 3)
self.assertLen(total_function_cache(defined), 3)
# This matches the previous call.
defined(1, bar=2, baz=3)
self.assertLen(total_function_cache(defined), 3)
# This matches the previous call.
defined(1, baz=3, bar=2)
self.assertLen(total_function_cache(defined), 3)
def testDatasetIteratorCaching(self):
def func(it1, it2):
next(it1)
next(it2)
return 0
defined = function.defun(func)
d = dataset_ops.DatasetV2.from_tensor_slices([1, 2, 3])
it1 = iter(d)
it2 = iter(d)
_ = defined(it1, it2) # The two iterators are different
self.assertLen(total_function_cache(defined), 1)
it3 = iter(d)
it4 = iter(d)
_ = defined(it3, it4) # The two iterators are different, should not retrace
self.assertLen(total_function_cache(defined), 1)
it5 = iter(d)
_ = defined(it5, it5) # The two iterators are the same, should retrace
self.assertLen(total_function_cache(defined), 2)
it6 = iter(d)
_ = defined(it6, it6) # The two iterators are the same, should not retrace
self.assertLen(total_function_cache(defined), 2)
def testFunctoolsPartialUnwrappedCorrectly(self):
def full_function(a, b, c=3):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2)
defined = function.defun(partial)
func_a, func_b, func_c = defined(2)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureWithMatchingInputs(self):
def foo(a):
self.assertEqual(a.shape, (2,))
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2])
self.assertAllEqual(a, defined(a))
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(a, defined.get_concrete_function()(a))
self.assertAllEqual(a, defined.get_concrete_function(a)(a))
self.assertAllEqual(a, defined.get_concrete_function(
tensor_spec.TensorSpec((2,), dtype=dtypes.float32))(a))
self.assertLen(total_function_cache(defined), 1)
def bar(a):
self.assertEqual(a._shape_tuple(), (2, None))
return a
signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)]
defined = function.defun(bar, input_signature=signature)
a = array_ops.ones([2, 1])
out = defined(a)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, a)
# Changing the second dimension shouldn't create a new function.
b = array_ops.ones([2, 3])
out = defined(b)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, b)
def testInputSignatureWithDictInPositionalArgs(self):
@function.defun
def f(*_args, **_kwargs):
return None
f(1, x=2)
self.assertLen(total_function_cache(f), 1)
f(1, x=2)
self.assertLen(total_function_cache(f), 1)
f(1, {'x': 2})
self.assertLen(total_function_cache(f), 2)
def testInputSignatureWithCompatibleInputs(self):
rank2_spec = tensor_spec.TensorSpec(shape=(None, None),
dtype=dtypes.float32)
@function.defun(input_signature=[rank2_spec])
def func(a):
self.assertEqual([None, None], a.shape.as_list())
return array_ops.shape(a)
self.assertAllEqual([3, 1], func([[0], [1.0], [1]]))
self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]])))
with self.assertRaisesRegex(ValueError, 'incompatible'):
func([0.0, 1.0, 2.0]) # Wrong shape.
with self.assertRaisesRegex(ValueError, 'incompatible'):
func([['wrong dtype']])
def testNestedInputSignatures(self):
def expected_foo(a, b):
return [a, b]
@function.defun(input_signature=[
[tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2,
tensor_spec.TensorSpec((1,), dtypes.float32),
])
def foo(a, b):
self.assertEqual(a[0]._shape_tuple(), (2, None))
self.assertEqual(a[1]._shape_tuple(), (2, None))
self.assertEqual(b._shape_tuple(), (1,))
return [a, b]
a = array_ops.ones([2, 1])
b = array_ops.ones([1])
expected = expected_foo([a, a], b)
out = foo([a, a], b)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], a)
self.assertAllEqual(out[1], b)
# Changing the unspecified dimensions shouldn't create a new function.
a = array_ops.ones([2, 3])
b = array_ops.ones([2, 5])
c = array_ops.ones([1])
expected = expected_foo([a, b], c)
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
c = c.numpy().tolist()
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
def testNestedInputSignaturesWithDict(self):
def expected_bar(a):
return a
@function.defun(input_signature=[{
'a': tensor_spec.TensorSpec((2, None), dtypes.float32),
'b': tensor_spec.TensorSpec((2, None), dtypes.float32),
'c': tensor_spec.TensorSpec((1,), dtypes.float32)}])
def bar(a):
self.assertEqual(a['a']._shape_tuple(), (2, None))
self.assertEqual(a['b']._shape_tuple(), (2, None))
self.assertEqual(a['c']._shape_tuple(), (1,))
return a
a = array_ops.ones([2, 3])
b = array_ops.ones([1])
inputs = {'a': a, 'b': a, 'c': b}
expected = expected_bar(inputs)
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
inputs = {'a': a, 'b': a, 'c': b}
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must consist exclusively of `TensorSpec` objects.
signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]
with self.assertRaisesRegex(TypeError, 'input_signature.*nested sequence'):
def_function.function(foo, input_signature=signature)
# Signatures must be either lists or tuples on their outermost levels.
signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}
with self.assertRaisesRegex(
TypeError, 'input_signature must be either a '
'tuple or a list.*'):
function.defun(foo, input_signature=signature)
@test_util.run_in_graph_and_eager_modes
def testInputsIncompatibleWithSignatureRaisesError(self):
def foo(a):
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = def_function.function(foo, input_signature=signature)
# Invalid shapes.
with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([3]))
with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([2, 1]))
# Wrong number of arguments.
with self.assertRaisesRegex(TypeError, 'specifies 1 .* got 2'):
defined(array_ops.ones([2]), array_ops.ones([2]))
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined()
with self.assertRaisesRegex(ValueError,
'inputs incompatible with input_signature'):
defined.get_concrete_function(
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.float32))
def testMismatchedConcreteSignatureRaisesError(self):
@def_function.function
def run_test():
@def_function.function
def f(x):
return x
with self.assertRaisesRegex(
TypeError, 'ConcreteFunction .* was constructed .* but was called'):
f.get_concrete_function(1)(constant_op.constant(1))
with self.assertRaisesRegex(TypeError, r'f\(x\) expected .* but got .*'):
f.get_concrete_function(constant_op.constant(1))(1)
with self.assertRaisesRegex(
TypeError, 'ConcreteFunction .* was constructed .* but was called'):
f.get_concrete_function(1)(2)
run_test()
def testInputsIncompatibleWithNestedSignatureRaisesError(self):
def foo(a, b):
return [a, b]
signature = [[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2,
[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([1])
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined([a, a, a], [a])
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined([a], [a, a, a])
defined([a, a], [a, a])
def testUnderspecifiedInputSignature(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
])
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
x = constant_op.constant(1.0)
with self.assertRaisesRegex(
TypeError, 'got keyword argument `training` '
'that was not included in input_signature'):
foo(x, training=True)
with self.assertRaisesRegex(
TypeError, 'got keyword argument `training` '
'that was not included in input_signature'):
foo(x, training=False)
self.assertAllEqual(x.numpy(), foo(x).numpy())
def testInputSignatureWithPartialFunction(self):
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2.0)
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
defined = function.defun(partial, input_signature=signature)
x = constant_op.constant(2.0)
func_a, func_b, func_c = defined(x)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureConversionWithDefaultArg(self):
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
signature = [
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.bool),
]
defined = def_function.function(foo, input_signature=signature)
a = constant_op.constant(1.0)
self.assertAllEqual(a.numpy(), defined(a))
self.assertAllEqual(a.numpy(), defined(a, training=True))
self.assertAllEqual(-a.numpy(), defined(a, training=False))
def testInputSignatureWithKeywordPositionalArgs(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def foo(flt, integer):
return flt, integer
flt = constant_op.constant(1.0)
integer = constant_op.constant(2, dtypes.int64)
out1, out2 = foo(flt, integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt=flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(integer=integer, flt=flt)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
def testInputSignatureWithKeywordArgs(self):
def foo(a, b, **kwargs):
del kwargs
return a, b
x = function.defun(
foo,
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int32)
]).get_concrete_function()
result = x(constant_op.constant(5.0), constant_op.constant(5))
self.assertAllEqual(result, [5.0, 5])
def testInputSignatureWithCompositeTensors(self):
def f(rt):
self.assertEqual(rt.values.shape.as_list(), [None])
self.assertEqual(rt.row_splits.shape.as_list(), [4])
return rt
signature = [ragged_tensor.RaggedTensorSpec(
shape=[3, None], dtype=dtypes.int32)]
defined = function.defun(f, input_signature=signature)
rt1 = ragged_factory_ops.constant([[1], [], [2, 3, 4]])
out1 = defined(rt1)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out1.values, rt1.values)
self.assertAllEqual(out1.row_splits, rt1.row_splits)
# Changing the row lengths shouldn't create a new function.
rt2 = ragged_factory_ops.constant([[1, 2], [3, 4], [5]])
out2 = defined(rt2)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out2.values, rt2.values)
self.assertAllEqual(out2.row_splits, rt2.row_splits)
# Different number of rows
rt3 = ragged_factory_ops.constant([[1, 2], [3, 4], [5], [6]])
with self.assertRaisesRegex(ValueError, 'incompatible'):
defined(rt3)
# Different dtype
rt4 = ragged_factory_ops.constant([[1.0, 2.0], [], [3.0]])
with self.assertRaisesRegex(ValueError, 'Structure .* does not match'):
defined(rt4)
# Different rank
rt5 = ragged_factory_ops.constant([[[1]], [[2]], [[3]]])
with self.assertRaisesRegex(ValueError, 'does not match'):
defined(rt5)
def testInputSignatureWithVariableArgs(self):
def f(v):
v.assign_add(1)
signature = [
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)
]
defined = function.defun(f, input_signature=signature)
v1 = variables.Variable(0)
v2 = variables.Variable(0)
defined(v1)
self.assertEqual(v1.numpy(), 1)
self.assertEqual(v2.numpy(), 0)
defined(v=v2)
self.assertEqual(v1.numpy(), 1)
self.assertEqual(v2.numpy(), 1)
def testInputSignatureWithKeywordOnlyArgs(self):
def f(a, b, c=3, *, d=4):
self.assertIsInstance(a, ops.Tensor)
self.assertIsInstance(b, ops.Tensor)
self.assertIsInstance(c, int)
self.assertIsInstance(d, (int, ops.Tensor))
return a + b + c + d
signature = [
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
]
defined = function.defun(f, input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 10)
defined = function.defun(
functools.partial(f, c=4), input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
defined = function.defun(
functools.partial(f, d=5), input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
defined = function.defun(
functools.partial(f, d=array_ops.constant(5)),
input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
mod = module.Module()
save(mod, '/tmp/kwonlyf', defined.get_concrete_function(*signature))
loaded = load('/tmp/kwonlyf')
result = loaded.signatures['serving_default'](
a=array_ops.constant(1), b=array_ops.constant(2))
self.assertEqual(result['output_0'].numpy(), 11)
def testInputSignatureWithKeywordOnlyArgsNoDefaults(self):
signature = [
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
]
def test_func(a, *, b):
return a + b
with self.assertRaisesRegex(
ValueError, "keyword-only arguments must have default values.*'b'"):
function.defun(test_func, input_signature=signature)
test_func_lambda = lambda a, *, b: a + b
with self.assertRaisesRegex(
ValueError, "keyword-only arguments must have default values.*'b'"):
function.defun(test_func_lambda, input_signature=signature)
def testTensorKeywordArguments(self):
def foo(a, b):
del a
return b
defined = function.defun(foo)
a = constant_op.constant(2.0)
b = constant_op.constant([1.0, 2.0])
one = defined(a, b)
self.assertLen(total_function_cache(defined), 1)
two = defined(a=a, b=b)
self.assertLen(total_function_cache(defined), 1)
three = defined(b=b, a=a)
self.assertLen(total_function_cache(defined), 1)
four = defined(a, b=b)
self.assertLen(total_function_cache(defined), 1)
# The next call corresponds to a new input signature, hence
# we expect another function to be defined.
five = defined(b, a)
self.assertLen(total_function_cache(defined), 2)
six = defined(a=b, b=a)
self.assertLen(total_function_cache(defined), 2)
seven = defined(b=a, a=b)
self.assertLen(total_function_cache(defined), 2)
self.assertAllEqual(one, [1.0, 2.0])
self.assertAllEqual(two, [1.0, 2.0])
self.assertAllEqual(three, [1.0, 2.0])
self.assertAllEqual(four, [1.0, 2.0])
self.assertAllEqual(five, 2.0)
self.assertAllEqual(six, 2.0)
self.assertAllEqual(seven, 2.0)
def testDefuningInstanceMethod(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
def one(self, tensor):
return tensor
@def_function.function
def two(self, tensor, other=integer):
return self.one(tensor), other
foo = Foo()
t = constant_op.constant(1.0)
one, two = foo.two(t)
self.assertEqual(one.numpy(), 1.0)
self.assertEqual(two.numpy(), 2)
def testDefuningInstanceMethodWithDefaultArgument(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
@def_function.function
def func(self, other=integer):
return other
foo = Foo()
self.assertEqual(foo.func().numpy(), int(integer))
def testPythonCallWithSideEffects(self):
state = []
@def_function.function
def side_effecting_function():
state.append(0)
side_effecting_function()
self.assertAllEqual(state, [0])
# The second invocation should call the graph function, which shouldn't
# trigger the list append.
side_effecting_function()
self.assertAllEqual(state, [0])
# Whereas calling the python function directly should create a side-effect.
side_effecting_function.python_function()
self.assertAllEqual(state, [0, 0])
def testFunctionWithNestedFunctionCallAndSideEffects(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(1.0)
@def_function.function
def add_one(a):
a.assign_add(1.0)
# Grappler will inline calls to `add_one` into the function body, we check
# that all side-effects were executed.
@def_function.function
def side_effecting_function(a, b):
add_one(a)
add_one(b)
return a + b
result = side_effecting_function(v1, v2)
self.assertEqual(result.numpy(), 4.0)
def testFunctionWithExtraAttributes(self):
@function.defun_with_attributes(attributes={'experimental_1': 'value1',
'experimental_2': 2})
def matmul(x, y):
return math_ops.matmul(x, y)
def add(x, y):
return math_ops.add(x, y)
defun_add = function.defun_with_attributes(
add, attributes={'experimental_3': True, 'experimental_4': 1.0})
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t)
double = defun_add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 2)
functions = list(graph._functions.values())
self.assertRegex(functions[0].definition.signature.name, '.*matmul.*')
attrs = functions[0].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_1'].s, b'value1')
self.assertEqual(attrs['experimental_2'].i, 2)
self.assertRegex(functions[1].definition.signature.name, '.*add.*')
attrs = functions[1].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_3'].b, True)
self.assertEqual(attrs['experimental_4'].f, 1.0)
# pylint: enable=protected-access
def testFunctionWithInvalidAttribute(self):
@function.defun_with_attributes(attributes={'experimental_1': ['value1']})
def add(x, y):
return math_ops.add(x, y)
with self.assertRaisesRegex(ValueError,
'Attribute experimental_1 must be .* Got .*'):
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
add(t, t)
def testRegisterFunction(self):
@function.defun
def add(x, y):
return math_ops.add(x, y)
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
function.register(add, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*matmul.*',
'.*forward.*matmul.*',
'.*inference.*backward.*matmul.*',
'.*inference.*add.*',
'.*forward.*add.*',
'.*inference.*backward.*add.*',
]
for i in range(len(functions)):
self.assertRegex(captured_function_names[i],
expected_func_name_regex[i])
# Check the forward and backward function has the correct attributes.
self.assertEqual(
functions[1].definition.attr['backward_function_name'].s,
functions[2].name)
self.assertEqual(
functions[2].definition.attr['forward_function_name'].s,
functions[1].name)
self.assertEqual(
functions[4].definition.attr['backward_function_name'].s,
functions[5].name)
self.assertEqual(
functions[5].definition.attr['forward_function_name'].s,
functions[4].name)
sq = defun_matmul(t, t)
double = add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
functions = list(graph._functions.values())
for i in range(len(functions)):
self.assertEqual(captured_function_names[i],
functions[i].definition.signature.name)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testRegisterConcreteFunction(self, function_decorator):
@function_decorator
def py_add(x, y):
return math_ops.add(x, y)
py_add(array_ops.ones([]), array_ops.ones([]))
add = py_add.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@function_decorator
def py_composite(x, y):
return x, add(x, y)
py_composite(array_ops.ones([]), array_ops.ones([]))
composite = py_composite.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
composite.add_to_graph()
composite.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*py_composite.*',
'.*inference.*py_add.*',
'.*forward.*py_composite.*',
'.*forward.*py_add.*',
'.*inference.*backward.*py_composite.*',
'.*inference.*backward.*py_add.*',
]
for expected, found in zip(
expected_func_name_regex,
captured_function_names):
self.assertRegex(found, expected)
composite_t, composite_double = composite(t, t)
double = add(t, t)
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double))
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double))
self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t))
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testEagerCaptures(self, function_decorator):
with context.eager_mode():
large_tensor = array_ops.ones(shape=(256,))
self.assertGreater(256, func_graph._EAGER_CONST_THRESHOLD)
small_tensor = array_ops.ones(shape=(4,))
self.assertLessEqual(4, func_graph._EAGER_CONST_THRESHOLD)
v = resource_variable_ops.ResourceVariable(0.0)
for captured, op_type in [(large_tensor, 'Placeholder'),
(small_tensor, 'Const'), (v, 'Placeholder')]:
@function_decorator
def test_fn():
return captured + 1 # pylint: disable=cell-var-from-loop
g = test_fn.get_concrete_function().graph
internal_captures = g.internal_captures
self.assertLen(internal_captures, 1)
self.assertEqual(internal_captures[0].op.type, op_type)
def testRegisterFunctionWithInputSignature(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(
matmul,
input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
# Test register function with cache, note inputs are ignored.
function.register(defun_matmul)
graph = ops.get_default_graph()
self.assertLen(graph._functions, 3)
def testRegisterFunctionWithCache(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])
function.register(defun_matmul, t, t)
function.register(defun_matmul, t2, t2)
graph = ops.get_default_graph()
# Only one function is registered since the input param are in same type
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
def testCallingFunctionWithDifferentVariables(self):
@function.defun
def foo(v):
v.assign_add(1.0)
return v.read_value()
v = resource_variable_ops.ResourceVariable(0.0)
graph_function = foo.get_concrete_function(v)
self.assertLen(graph_function.inputs, 1)
self.assertEmpty(graph_function.captured_inputs)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(v)), 2.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun
def bar(v):
del v
return constant_op.constant(1.0)
graph_function = bar.get_concrete_function(v)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(w)), 1.0)
def testCallingFunctionWithNonTensorsFails(self):
@function.defun
def foo(x):
return x
graph_function = foo.get_concrete_function(constant_op.constant(1.0))
with self.assertRaises((TypeError, ValueError)):
graph_function('Not a Tensor.')
def testSwapImplementationWithGrapplerPlugin(self):
# Set the min_graph_nodes to -1 since the graph in this test is too small,
# and will be ignored by grappler if don't set this.
rewrites = rewriter_config_pb2.RewriterConfig()
rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
rewrites.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrites, build_cost_model=1)
config_proto = config_pb2.ConfigProto(graph_options=graph_options)
with context.graph_mode(), self.cached_session(
config=config_proto, graph=ops.Graph(), use_gpu=True):
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'CPU'
})
def cpu_boost(x):
return math_ops.add(x, 2.0)
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'GPU'
})
def gpu_boost(x):
return math_ops.add(x, 4.0)
x = constant_op.constant(1.0)
function.register(cpu_boost, x)
y = gpu_boost(x)
y_value = self.evaluate(y)
if test.is_gpu_available():
self.assertEqual(y_value, 5.0)
else:
# Grappler fallback to use the CPU impl even called with GPU function.
self.assertEqual(y_value, 3.0)
@test_util.disable_tfrt('b/174712583: TFRT doesn\'t support behavior '
'equivalent to implementation_selector for function')
def testSwapImplementationInEager(self):
if not context.executing_eagerly():
self.skipTest('eager only')
# testSharedRendezvous sets the disable_meta_optimizer flag to True
# if that subtest runs before this one, then having that set to True
# will cause this subtest to fail. To avoid that scenario, explicitly
# set the disable_meta_optimizer flag to false here
context.context().set_optimizer_experimental_options({
'min_graph_nodes': -1,
'implementation_selector': True,
'disable_meta_optimizer': False
})
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'CPU'})
def on_cpu(x):
return x + 2
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'GPU'})
def on_gpu(x):
return x + 4
@function.defun
def run_on_cpu(t):
function.register(on_cpu, t)
with ops.device('CPU:0'):
return on_gpu(t)
# Expect to run the on_cpu branch, regardless whether gpu is available.
self.assertEqual(run_on_cpu(constant_op.constant(1)).numpy(), 3)
def testDefunFunctionSeparateGraphs(self):
with context.graph_mode():
@function.defun
def add(x):
return x + 5
@function.defun
def maybe_add(x, should_add):
if should_add:
return add(x)
else:
return x
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 1)
self.assertLen(total_function_cache(add), 1)
maybe_add(x, False)
self.assertLen(total_function_cache(maybe_add), 2)
self.assertLen(total_function_cache(add), 1)
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 3)
self.assertLen(total_function_cache(add), 2)
def testCacheKeyOverlappingShapes(self):
@function.defun
def defined(t):
return t
defined(array_ops.zeros([12, 1]))
self.assertLen(total_function_cache(defined), 1)
defined(array_ops.zeros([1, 21]))
self.assertLen(total_function_cache(defined), 2)
@function.defun
def defined_again(t):
return defined(t)
defined_again.get_concrete_function(array_ops.zeros([12, 1]))
self.assertLen(total_function_cache(defined_again), 1)
defined_again.get_concrete_function(array_ops.zeros([1, 21]))
self.assertLen(total_function_cache(defined_again), 2)
def testCacheTensorSpecIdenticalToTensor(self):
@function.defun
def defined(t):
return t
z = array_ops.zeros([2, 2])
z_spec = tensor_spec.TensorSpec.from_tensor(z)
self.assertIs(
defined.get_concrete_function(z_spec), defined.get_concrete_function(z))
def testCacheKeyNestedLists(self):
@function.defun
def defined(l):
return l
a = constant_op.constant(1.)
b = constant_op.constant(2.)
c = constant_op.constant(3.)
defined([[a], b, c])
self.assertLen(total_function_cache(defined), 1)
defined([[a, b], c])
self.assertLen(total_function_cache(defined), 2)
def testCacheKeyAttrsClass(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class TestClass(object):
a = attr.ib()
b = attr.ib()
@function.defun
def defined(l):
return l
defined(
TestClass(
constant_op.constant(1.),
[constant_op.constant(2.),
constant_op.constant(3.)]))
self.assertLen(total_function_cache(defined), 1)
defined(
TestClass(
constant_op.constant(1.),
[constant_op.constant(2.),
constant_op.constant(3.)]))
self.assertLen(total_function_cache(defined), 1)
defined(
TestClass([constant_op.constant(1.),
constant_op.constant(2.)], constant_op.constant(3.)))
self.assertLen(total_function_cache(defined), 2)
def testDistinctVariablesNoRetracing(self):
@function.defun
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
# We generate cache keys based on unique combinations of resource ids.
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
# Re-arranging arguments should not cause cache miss
# because the three inputs are still distinct
defined(z, y, x)
self.assertLen(total_function_cache(defined), 1)
def testRetracingOnDifferentVaribleCombinationPatterns(self):
@function.defun
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
# Retracing because the first two arguments are the same
defined(x, x, z)
self.assertLen(total_function_cache(defined), 2)
# Replacing x with y does not cause cache miss
# because the combination stays the same as (x, x, z)
defined(y, y, z)
self.assertLen(total_function_cache(defined), 2)
# A different combination pattern causes cache miss
defined(z, y, y)
self.assertLen(total_function_cache(defined), 3)
defined(z, y, y)
self.assertLen(total_function_cache(defined), 3)
def testDeepcopyVariableNoRetracing(self):
@function.defun
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
x_copy = copy.deepcopy(x)
defined(x_copy, y, z)
self.assertLen(total_function_cache(defined), 1)
def _total_function_cache_def_func(self, defined):
return defined._list_all_concrete_functions() # pylint: disable=protected-access
def testVariableRetracingOnDtypeChanges(self):
@def_function.function
def defined(a, b):
return a + b
x1 = resource_variable_ops.ResourceVariable(0.0)
x2 = resource_variable_ops.ResourceVariable(0.0)
defined(x1, x2)
self.assertLen(self._total_function_cache_def_func(defined), 1)
# Should expect retracing for new dtypes
y1 = resource_variable_ops.ResourceVariable(0)
y2 = resource_variable_ops.ResourceVariable(1)
defined(y1, y2)
self.assertLen(self._total_function_cache_def_func(defined), 2)
def testVariableRetracingDtypeShape(self):
@def_function.function
def defined(a, b):
return a + b
x1 = resource_variable_ops.ResourceVariable(0.0)
x2 = resource_variable_ops.ResourceVariable(0.0)
defined(x1, x2)
self.assertLen(self._total_function_cache_def_func(defined), 1)
y1 = resource_variable_ops.ResourceVariable([0.0, 1.0])
y2 = resource_variable_ops.ResourceVariable([0.0, 1.0])
defined(y1, y2)
self.assertLen(self._total_function_cache_def_func(defined), 2)
z1 = resource_variable_ops.ResourceVariable([[0.0, 1.0]])
z2 = resource_variable_ops.ResourceVariable([[0.0, 1.0]])
defined(z1, z2)
self.assertLen(self._total_function_cache_def_func(defined), 3)
def testDecoratedMethodInspect(self):
class DefunnedMiniModel(object):
@function.defun
def call(self, inputs, training=True):
pass
m = DefunnedMiniModel()
fullargspec = tf_inspect.getfullargspec(m.call)
self.assertIn('training', fullargspec.args)
def testFunctionModifiesInputList(self):
# Tests on `list` methods that do in place modification, except `list.sort`
# since it cannot even be "defunned" in the first place
def get_list():
return [constant_op.constant(0.), constant_op.constant(1.)]
expected_msg = '.*() should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def append(l):
l.append(constant_op.constant(0.))
append(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def extend(l):
l.extend([constant_op.constant(0.)])
extend(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def insert(l):
l.insert(0, constant_op.constant(0.))
insert(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def pop(l):
l.pop()
pop(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def reverse(l):
l.reverse()
reverse(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def remove(l):
l.remove(l[0])
remove(get_list())
# `list.clear` is a method that is in Py3 but not Py2
if sys.version.startswith('3'):
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def clear(l):
l.clear()
clear(get_list())
# One last test for keyword arguments
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def kwdappend(**kwargs):
l = kwargs['l']
l.append(constant_op.constant(0.))
kwdappend(l=get_list())
def testFunctionModifiesInputDict(self):
def get_dict():
return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}
expected_msg = '.* should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def clear(m):
m.clear()
clear(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def pop(m):
m.pop('t1')
pop(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def popitem(m):
m.popitem()
popitem(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def update(m):
m.update({'t1': constant_op.constant(3.)})
update(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def setdefault(m):
m.setdefault('t3', constant_op.constant(3.))
setdefault(get_dict())
def testFunctionModifiesInputNest(self):
with self.assertRaisesRegex(ValueError, 'modify.* should not modify'):
@def_function.function
def modify(n):
n[0]['t1'].append(constant_op.constant(1.))
nested_input = [{
't1': [constant_op.constant(0.),
constant_op.constant(1.)],
},
constant_op.constant(2.)]
modify(nested_input)
with self.assertRaisesRegex(ValueError,
'modify_same_flat.* should not modify'):
# The flat list doesn't change whereas the true structure changes
@def_function.function
def modify_same_flat(n):
n[0].append(n[1].pop(0))
nested_input = [[constant_op.constant(0.)],
[constant_op.constant(1.),
constant_op.constant(2.)]]
modify_same_flat(nested_input)
@test_util.disable_tfrt('b/173429686')
def testExecutorType(self):
@function.defun
def add_five(x):
return x + 5
self.assertEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
with self.assertRaisesRegex(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):
with context.function_executor_type('NON_EXISTENT_EXECUTOR'):
add_five(constant_op.constant(0, dtype=dtypes.int32))
for executor_type in ('', 'DEFAULT', None):
with context.function_executor_type(executor_type):
self.assertAllEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
@test_util.assert_no_garbage_created
def testReferenceCycles(self):
fn = function.defun(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
def testFunctionStackInErrorMessage(self):
if context.executing_eagerly():
# TODO(b/122736651): Remove this skipTest once fixed.
self.skipTest('Error interpolation is not working when function is '
'invoked without PartitionedCallOp.')
@def_function.function()
def fn3(x):
return x + 2
@def_function.function()
def fn2(x):
check_ops.assert_equal(fn3(x), 3)
return 2
@def_function.function()
def fn(x):
return fn2(x)
with self.assertRaises(errors.InvalidArgumentError) as cm:
fn(2)
e = cm.exception
self.assertIn('fn -> fn2', e.message)
self.assertIn('node assert_equal/Assert/Assert (defined at', e.message)
self.assertNotIn('fn3', e.message)
@test_util.run_gpu_only
def testFunctionIsNotPinned(self):
"""Tests that functions aren't pinned to the CPU by the eager runtime."""
seed1, seed2 = 79, 25
shape = constant_op.constant([4, 7])
dtype = dtypes.float32
@def_function.function
def func():
with ops.device('GPU:0'):
return gen_random_ops.random_standard_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
with ops.device('GPU:0'):
x = func()
self.assertRegex(x.device, 'GPU')
@test_util.run_in_graph_and_eager_modes
def testShapeCaching(self):
@function.defun
def func(x):
return array_ops.shape(x)
@function.defun(
input_signature=[tensor_spec.TensorSpec([None, None], dtypes.float32)])
def calls_func(x):
return func(x)
self.assertAllEqual([1, 1], self.evaluate(func(array_ops.zeros([1, 1]))))
self.assertAllEqual([2, 2], self.evaluate(func(array_ops.zeros([2, 2]))))
self.assertAllEqual(
[3, 3],
self.evaluate(calls_func(array_ops.zeros([3, 3]))))
def testLimitedRetracing(self):
trace_count = [0]
@function.defun
def func(x):
trace_count[0] += 1
return x
for _ in range(50):
func(constant_op.constant(3.))
func(constant_op.constant(4.))
func(constant_op.constant([[1., 2.]]))
func(constant_op.constant([[]]))
func(constant_op.constant([[3., 4.], [5., 6.]]))
func(constant_op.constant([[3., 4.], [5., 6.], [7., 8.]]))
# Tracing more than twice per input doesn't make sense.
self.assertLess(trace_count[0], 13)
def testLimitedRetracingWithCompositeTensors(self):
trace_count = [0]
@def_function.function
def f(x):
trace_count[0] += 1
return x
for i in range(10):
f(ragged_factory_ops.constant([[1, 2], [i]]))
f(ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]]))
f(ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]]))
self.assertEqual(trace_count[0], 3)
def test_concrete_function_shape_mismatch(self):
@def_function.function
def f(argument_name):
return argument_name + 1.
f_concrete = f.get_concrete_function(constant_op.constant([1.]))
# Calling a function from eager doesn't do any shape checking above what
# kernels do while executing.
self.assertAllEqual(
[2., 3.],
f_concrete(constant_op.constant([1., 2.])).numpy())
@def_function.function
def g():
f_concrete(constant_op.constant([1., 2.]))
with self.assertRaisesRegex(ValueError, 'argument_name'):
g()
@test_util.run_in_graph_and_eager_modes
def test_shape_inference_with_symbolic_shapes(self):
@def_function.function
def _uses_symbolic_shapes(w, x, y):
x = array_ops.identity(x, name='name_collision')
x = array_ops.transpose(x, [1, 0, 2])
x_batch = array_ops.shape(x)[0]
y_batch = array_ops.shape(y)[0]
y *= w
n = y_batch // x_batch
return array_ops.reshape(y, [n, x_batch, -1])
conc = _uses_symbolic_shapes.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@def_function.function
def _call_concrete():
c = constant_op.constant(1.)
array_ops.identity(c, name='name_collision')
output1 = conc(array_ops.ones([2]),
array_ops.ones([5, 4, 2]),
array_ops.ones([20, 2]))
self.assertEqual([5, 4, 2], output1.shape)
output2 = conc(array_ops.ones([3]),
array_ops.ones([5, 4, 3]),
array_ops.ones([40, 3]))
self.assertEqual([10, 4, 3], output2.shape)
return output1, output2
output1, output2 = _call_concrete()
self.assertEqual((5, 4, 2), self.evaluate(output1).shape)
self.assertEqual((10, 4, 3), self.evaluate(output2).shape)
def testAutoGraphContext(self):
@def_function.function
def test_fn():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED)
prev_status = ag_ctx.control_status_ctx().status
test_fn()
self.assertEqual(ag_ctx.control_status_ctx().status, prev_status)
@test_util.disable_tfrt('b/170435618')
def testCancelBeforeFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
c_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
cancelable_func()
@test_util.disable_tfrt('b/170435618')
def testCancelBlockedFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
def cancel_thread():
time.sleep(0.5)
c_mgr.start_cancel()
t = self.checkedThread(cancel_thread)
t.start()
with self.assertRaises(errors.CancelledError):
cancelable_func()
t.join()
@test_util.disable_tfrt('b/170435618')
def testCancelAfterFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
q.enqueue(37)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
self.assertAllEqual(37, cancelable_func().numpy())
# Cancellation after the function executes is a no-op.
c_mgr.start_cancel()
def testAddFunctionCallback(self):
functions = []
def function_callback(f, name, graph, inputs, outputs):
del name, graph, inputs, outputs
functions.append(f)
@def_function.function
def plus_one(x):
return x + 1
try:
function.add_function_callback(function_callback)
x_float32 = numpy.array(3.0, dtype=numpy.float32)
self.assertAllClose(plus_one(x_float32), 4.0)
self.assertLen(functions, 1)
# Function is already created. Executing it again should not invoke the
# function callback.
self.assertAllClose(plus_one(x_float32), 4.0)
self.assertLen(functions, 1)
# Signature change leads to a new Function being built.
x_float64 = numpy.array(3.0, dtype=numpy.float64)
self.assertAllClose(plus_one(x_float64), 4.0)
self.assertLen(functions, 2)
finally:
function.clear_function_callbacks()
def testFunctionCallbackAddOps(self):
file_name = os.path.join(self.get_temp_dir(), 'test')
def function_callback(f, name, graph, inputs, outputs):
del f, name, inputs
with graph.as_default():
printer = logging_ops.print_v2(
'hello',
output_stream='file://' + file_name
)
outputs[0].op._add_control_input(printer)
@def_function.function
def plus_one(x):
return x + 1
self.addCleanup(function.clear_function_callbacks)
function.add_function_callback(function_callback)
x_float32 = numpy.array(3.0, dtype=numpy.float32)
self.assertAllClose(plus_one(x_float32), 4.0)
with open(file_name, 'r') as f:
self.assertEqual(f.read().strip(), 'hello')
def testRemoveFunctionCallback(self):
functions_1 = []
def function_callback_1(f, name, graph, inputs, outputs):
del name, graph, inputs, outputs
functions_1.append(f)
functions_2 = []
def function_callback_2(f, name, graph, inputs, outputs):
del name, graph, inputs, outputs
functions_2.append(f)
@def_function.function
def plus_one(x):
return x + 1
try:
function.add_function_callback(function_callback_1)
function.add_function_callback(function_callback_2)
self.assertAllClose(plus_one(numpy.array(3.0, dtype=numpy.float32)), 4.0)
self.assertLen(functions_1, 1)
self.assertLen(functions_2, 1)
function.remove_function_callback(function_callback_1)
# The 1st callback should not be invokved after remove_function_callback()
# is called.
self.assertAllClose(plus_one(numpy.array(3.0, dtype=numpy.float64)), 4.0)
self.assertLen(functions_1, 1)
self.assertLen(functions_2, 2)
finally:
function.clear_function_callbacks()
def testClearFunctionCallbacks(self):
function.add_function_callback(lambda f: None)
function.add_function_callback(lambda f: None)
self.assertLen(function._function_callbacks, 2)
function.clear_function_callbacks()
self.assertEmpty(function._function_callbacks) # pylint:disable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = constant_op.constant(1000)
b = constant_op.constant(200)
c = constant_op.constant(30)
d = {'a': a, 'b': b}
e = (c, 4)
# Test different argument signatures when constructing the concrete func.
for cf in [
f.get_concrete_function(d, e),
f.get_concrete_function(d, y=e),
f.get_concrete_function(y=e, x=d),
f.get_concrete_function(_spec_for_value(d), _spec_for_value(e)),
f.get_concrete_function(_spec_for_value(d), y=_spec_for_value(e)),
f.get_concrete_function(y=_spec_for_value(e), x=_spec_for_value(d))
]:
# Test different calling conventions when calling the concrete func.
for output in [
cf(d, e), # structured signature
cf(d, y=e), # structured signature w/ kwarg
cf(y=e, x=d), # structured signature w/ 2 kwargs
cf(a, b, c), # flat signature
cf(x=a, x_1=b, y=c) # flat signature w/ kwargs
]:
self.assertIsInstance(output, tuple)
self.assertLen(output, 2)
self.assertAllEqual(output[0], 1200)
self.assertAllEqual(output[1], 34)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': constant_op.constant(1000), 'b': constant_op.constant(200)}
b = (50, 3)
for cf in [ # argument y is bound to non-Tensor value (50, 3).
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a), cf(x=a), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 1253)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNonTensorStringInputs(self):
@def_function.function
def f(x, y):
return string_ops.string_join([x, y])
a = constant_op.constant('a')
b = 'b'
cf = f.get_concrete_function(a, b)
for output in [cf(a), cf(x=a), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output, b'ab')
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithBoundNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 3000, 'b': 200, 'c': 9000}
b = (constant_op.constant(30), 4)
for cf in [ # argument x is bound to non-tensor value `a`
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a, b), cf(a, y=b), cf(y=b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 3234)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithAllBoundNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 5000, 'b': 500}
b = (50, 5)
cf = f.get_concrete_function(a, b)
for output in [cf(), cf(a), cf(y=b)]:
self.assertAllEqual(output[0] + output[1], 5555)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionMethodWithVarargs(self):
float32_scalar = tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
class MyModel(module.Module):
@def_function.function(input_signature=[float32_scalar, float32_scalar])
def add(self, *arg):
return math_ops.add(*arg)
m = MyModel()
cf = m.add.get_concrete_function()
cf(-12.0, 3.0)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureKeywordOrder(self):
# Check that keyword-only arguments are sorted appropriately, so that they
# feed the right tensor into each input.
@def_function.function
def g(**kwargs):
return string_ops.reduce_join(
string_ops.reduce_join(
ops.convert_to_tensor(sorted(kwargs.items())),
axis=1,
separator='='),
axis=0,
separator=', ')
s = constant_op.constant('s')
g.get_concrete_function(q=s, a=s, p=s, r=s, v=s, m=s, l=s)
self.assertAllEqual(
g(m='a', r='b', v='c', q='d', l='e', a='f', p='g'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(q='d', a='f', p='g', r='b', v='c', m='a', l='e'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(a='f', l='e', m='a', p='g', q='d', r='b', v='c'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (1, constant_op.constant(2)),
call_args=lambda: (1,),
error=r'func\(x, y\) missing required arguments: y'),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (1, 2, constant_op.constant(1.0)),
call_args=lambda: (1, 2),
error=r'func\(x, y, <arg3>\) missing required arguments: <arg3>'),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2, 3),
error=r'func\(x, y\) takes 2 .* got 3'),
dict(
testcase_name='MissingKeywordOnlyArg',
conc_args=lambda: (1, 2),
conc_kwargs=lambda: {'c': constant_op.constant(1.0)},
call_args=lambda: (1, 2),
error=r'func\(x, y, \*, c\) missing required arguments: c'),
dict(
testcase_name='ExtraKeywordArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'c': constant_op.constant(1.0)},
error=r'func\(x, y\) got unexpected keyword arguments: c'),
dict(
testcase_name='ExpectedRaggedGotNest',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: ({
'a': constant_op.constant([1, 2, 3])
},),
error=r'func\(x, y\): argument x had incorrect type\n'
r' expected: RaggedTensor\n'
r" got: {'a': (Eager)?Tensor}"),
dict(
testcase_name='WrongRaggedRank',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: (ragged_factory_ops.constant([[[1]]]),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='WrongRaggedDType',
conc_args=lambda: (ragged_factory_ops.constant([[1]]),),
call_args=lambda: (ragged_factory_ops.constant([[1.0]]),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='ExpectedDictGotTensor',
conc_args=lambda: ({
'a': constant_op.constant(1),
'b': constant_op.constant(1)
},),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='ExpectedTupleGotTensor',
conc_args=lambda:
((constant_op.constant(1), constant_op.constant(2)),),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0),),
exception=(ValueError, errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError)),
dict(
testcase_name='ExpectedTensorGotInt',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (5,),
error=r'func\(x, y\) expected a Tensor in x, but got int value 5'),
dict(
testcase_name='ExpectedIntGotDifferentInt',
conc_args=lambda: (5,),
call_args=lambda: (8,),
error=r'ConcreteFunction func\(x, y\) was constructed with int '
r'value 5 in x, but was called with int value 8'),
dict(
testcase_name='ExpectedIntGotTensor',
conc_args=lambda: (5,),
call_args=lambda: (constant_op.constant(6),),
error=r'ConcreteFunction func\(x, y\) was constructed with int '
'value 5 in x, but was called with (Eager)?Tensor value .*'),
dict(
testcase_name='TwoValuesForArgument',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'x': 3},
error=r"func\(x, y\) got two values for 'x'"),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the structrued signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@def_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\) missing required arguments: y'),
dict(
testcase_name='TwoValuesForArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {
'x': constant_op.constant(1),
'y': constant_op.constant(1)
},
error=r"func\(x, y\) got two values for 'x'"),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
error=r'func\(x, y\) takes 2 .* got 3'),
dict(
testcase_name='UnexpectedKeywordArg',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x\) got unexpected keyword arguments: c'),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, varargs_0\) missing required '
r'arguments: varargs_0'),
dict(
testcase_name='MissingKeywordArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': constant_op.constant(1)},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, c\) missing required arguments: c'),
dict(
testcase_name='ExpectedTensorGotInt',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (5, constant_op.constant(2)),
error=r'func\(x, y\): expected argument #0\(zero-based\) to be '
r'a Tensor; got int \(5\)'),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0),),
exception=(ValueError, errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError)),
dict(
testcase_name='MissingKeywordArgNestPiece',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': ragged_factory_ops.constant([[1]])},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x, y, c, c_1\) missing required arguments: c_1'),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionFlatSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the flat signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@def_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
# Remove _function_spec, to disable the structured signature.
conc._set_function_spec(None) # pylint: disable=protected-access
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionAmbiguousSignature(self):
# When both the flat & structured signatures are applicable, but they
# give different results, we use the structured signature. Note: we expect
# this to be extremely rare.
@def_function.function
def f(x, y):
return x * 10 + y
conc = f.get_concrete_function(
x=tensor_spec.TensorSpec(None, dtypes.int32, name='y'),
y=tensor_spec.TensorSpec(None, dtypes.int32, name='x'))
result = conc(x=constant_op.constant(5), y=constant_op.constant(6))
self.assertAllEqual(result, 56)
def testPrettyPrintedSignature(self):
@def_function.function
def func(x, kangaroo=None, octopus=7):
del octopus, kangaroo
return x
scalar = constant_op.constant(5)
vector = constant_op.constant([10, 10, 20])
ragged = ragged_factory_ops.constant([[10, 20], [40]])
c1 = func.get_concrete_function(scalar, vector)
c1_summary = r'func\(x, kangaroo, octopus=7\)'
c1_details = (r' Args:\n'
r' x: int32 Tensor, shape=\(\)\n'
r' kangaroo: int32 Tensor, shape=\(3,\)\n'
r' Returns:\n'
r' int32 Tensor, shape=\(\)')
self.assertRegex(c1.pretty_printed_signature(verbose=False), c1_summary)
self.assertRegex(
c1.pretty_printed_signature(verbose=True),
c1_summary + '\n' + c1_details)
self.assertRegex(
repr(c1), r'<ConcreteFunction func\(x, kangaroo, octopus=7\) at .*>')
self.assertRegex(
str(c1), 'ConcreteFunction {}\n{}'.format(c1_summary, c1_details))
c2 = func.get_concrete_function(scalar, ragged, 3)
c2_summary = r'func\(x, kangaroo, octopus=3\)'
c2_details = (r' Args:\n'
r' x: int32 Tensor, shape=\(\)\n'
r' kangaroo: RaggedTensorSpec\(.*\)\n'
r' Returns:\n'
r' int32 Tensor, shape=\(\)')
self.assertRegex(c2.pretty_printed_signature(),
c2_summary + '\n' + c2_details)
c3 = func.get_concrete_function({'a': scalar, 'b': [ragged, ragged]})
c3_summary = r'func\(x, kangaroo=None, octopus=7\)'
c3_details = (r' Args:\n'
r" x: {'a': <1>, 'b': \[<2>, <3>\]}\n"
r' <1>: int32 Tensor, shape=\(\)\n'
r' <2>: RaggedTensorSpec\(.*\)\n'
r' <3>: RaggedTensorSpec\(.*\)\n'
r' Returns:\n'
r" {'a': <1>, 'b': \[<2>, <3>\]}\n"
r' <1>: int32 Tensor, shape=\(\)\n'
r' <2>: RaggedTensorSpec\(.*\)\n'
r' <3>: RaggedTensorSpec\(.*\)')
# python 3.5 does not gurantee deterministic iteration of dict contents
# which can lead mismatch on pretty_printed_signature output for "Args"
if sys.version_info >= (3, 6):
self.assertRegex(c3.pretty_printed_signature(),
c3_summary + '\n' + c3_details)
# pylint: disable=keyword-arg-before-vararg
@def_function.function
def func2(x, y=3, *args, **kwargs):
return (x, y, args, kwargs)
c4 = func2.get_concrete_function(scalar, 4, 5, a=scalar)
c4_summary = 'func2(x, y=4, <arg3>=5, *, a)'
self.assertEqual(c4.pretty_printed_signature(verbose=False), c4_summary)
c5 = func2.get_concrete_function(8, vector)
c5_summary = 'func2(x=8, y)'
self.assertEqual(c5.pretty_printed_signature(verbose=False), c5_summary)
def testPrettyPrintedExplicitSignatureWithKeywordArg(self): # b/159639913
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def fn(a, b=1):
return a + b
concrete_fn = fn.get_concrete_function()
self.assertEqual(concrete_fn.pretty_printed_signature(False), 'fn(a)')
self.assertEqual(
concrete_fn.pretty_printed_signature(True), 'fn(a)\n'
' Args:\n'
' a: float32 Tensor, shape=<unknown>\n'
' Returns:\n'
' float32 Tensor, shape=<unknown>')
def testPrettyPrintedSignatureLoadedNamedTuple(self):
Point = collections.namedtuple('Point', ['x', 'y'])
@def_function.function
def fn(b, a): # pylint: disable=unused-argument
return 1.
b = Point(
x=constant_op.constant(1., dtype=dtypes.float32),
y=constant_op.constant(1., dtype=dtypes.float32))
a = Point(
x=constant_op.constant(1, dtype=dtypes.int32),
y=constant_op.constant(1, dtype=dtypes.int32))
mod = module.Module()
f = fn.get_concrete_function(b, a)
save(mod, '/tmp/f', signatures=f)
loaded = load('/tmp/f')
printed = loaded.signatures['serving_default'].pretty_printed_signature()
self.assertIn('a: int32 Tensor, shape=()', printed)
self.assertIn('a_1: int32 Tensor, shape=()', printed)
self.assertIn('b: float32 Tensor, shape=()', printed)
self.assertIn('b_1: float32 Tensor, shape=()', printed)
@test_util.run_in_graph_and_eager_modes
def testIndexedSlicesAsGradientsForConcreteFunctions(self):
@def_function.function
def summing_rnn(inputs):
return math_ops.reduce_sum(inputs, axis=1)
@def_function.function
def gradients(inputs):
with backprop.GradientTape() as tape:
tape.watch(inputs)
hidden = summing_rnn(inputs)
hidden = array_ops.gather(hidden, constant_op.constant([0]))
loss = math_ops.reduce_mean(hidden)
return tape.gradient(loss, inputs)
gradients(constant_op.constant([[[1.0], [2.0]]])) # No error is raised
def testFollowTypeHintsTraceBasic(self):
trace_count = [0]
def func(x: ops.Tensor):
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1) # Initial call gets traced
enabled(2)
enabled(3)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1)
disabled(2) # Retrace
disabled(3) # Retrace
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithArgs(self):
trace_count = [0]
def func(*args: ops.Tensor):
trace_count[0] += 1
return args
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
args = (
'abc',
'def',
) * 20
args2 = (
'def',
'abc',
) * 20
enabled(args)
enabled(args2)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(args)
disabled(args2) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithKwargs(self):
trace_count = [0]
def func(t: ops.Tensor, **kwargs: ops.Tensor):
del kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1, x=1, y=1.0, z='one')
enabled(2, x=2, y=2.0, z='two')
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1, x=1, y=1.0, z='one')
disabled(2, x=2, y=2.0, z='two') # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithMultipleInputTypes(self):
trace_count = [0]
def func(t: ops.Tensor, *args: ops.Tensor, **kwargs: ops.Tensor):
del args, kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1, constant_op.constant(1), 'str', x=4.0)
enabled(2, constant_op.constant(2), 'str2', x=5.0)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1, constant_op.constant(1), 'str', x=4.0)
disabled(2, constant_op.constant(2), 'str2', x=5.0) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithOnlyArgNamed(self):
trace_count = [0]
def func(t: ops.Tensor, i: int = 1, **kwargs): # pylint: disable=bad-whitespace
del i, kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 3, x=4.0, y='str')
enabled(2, 4, x=4.0, y='str') # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithNotAllNamed(self):
trace_count = [0]
def func(x, y: ops.Tensor, z: int):
del y, z
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3)
enabled(1, 20, 3) # No retrace - change in ops.Tensor typed arg
enabled(2, 2, 3) # Retrace - change in untyped arg
enabled(2, 2, 4) # Retrace - change in typed arg
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithOnlyArgsNamed(self):
trace_count = [0]
def func(x, y, *args: ops.Tensor):
del y, args
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 20, 3, 4, 5, 6)
enabled(1, 20, 3, 4, 5, 60) # No retrace - change in *args
enabled(1, 30, 7, 8, 9, 10) # Retrace - change in args
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithOnlyKwargsNamed(self):
trace_count = [0]
def func(x, y, *args, **kwargs: ops.Tensor):
del y, args, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, 5, 6, a=1.0, b=2.0, c=3.0)
enabled(
1, 2, 3, 4, 5, 6, a=1.5, b=2.5,
c=3.5) # No retrace - change in **kwargs
enabled(100, 2, 3, 4, 5, 6, a=1.0, b=2.0, c=3.0) # Retrace - change in args
enabled(
1, 2, 3, 4, 5, 100, a=1.0, b=2.0, c=3.0) # Retrace - change in *args
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithArgsEquals(self):
trace_count = [0]
def func(
x: ops.Tensor = 0, # pylint:disable=bad-whitespace
y: int = 1, # pylint:disable=bad-whitespace
**kwargs: ops.Tensor):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace - change in args
enabled(x=2, y=2, z=4) # No retrace - change in args and **kwargs
enabled(x=2, y=2, z=4, u=5) # Retrace - change in **kwargs
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsWithTensorSpec(self):
def func(x: ops.Tensor, y):
return x + y
v = def_function.function(experimental_follow_type_hints=True)(func)
v = v.get_concrete_function(
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32), 3)
x = v(constant_op.constant(1.), 3)
self.assertEqual(x.numpy(), 4.)
def testFollowTypeHintsTraceWithKwArgsAndNoVarKws(self):
trace_count = [0]
def func(a: int, b: ops.Tensor,
x: ops.Tensor = 0, y: int = 1):
del a, b, y
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(0, 0, x=1, y=2)
enabled(0, 0, x=2, y=2,) # No retrace, since only tensor changed
self.assertEqual(trace_count[0], 1)
# Pass args as keyword args.
enabled(a=0, b=0, x=2, y=2,) # No retrace, args are the same
self.assertEqual(trace_count[0], 1)
enabled(a=1, b=0, x=2, y=2,) # Retrace, since non-tensor arg changed
self.assertEqual(trace_count[0], 2)
enabled(a=1, b=2, x=2, y=2) # No retrace, since only tensor changed
self.assertEqual(trace_count[0], 2)
trace_count[0] = 0
disabled = def_function.function(func, experimental_follow_type_hints=False)
disabled(0, 0, x=1, y=2)
disabled(0, 0, x=2, y=2,) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithArgsEqualsTypedKwargs(self):
trace_count = [0]
def func(x, y, **kwargs: ops.Tensor):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace
enabled(x=1, y=2, z=4) # No retrace
enabled(x=2, y=2, z=4) # Retrace
enabled(x=2, y=2, z=4, u=5) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsEqualsTypedArgs(self):
trace_count = [0]
def func(x: ops.Tensor, y: int, **kwargs):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace
enabled(x=1, y=2, z=4) # Retrace
enabled(x=2, y=2, z=3) # No retrace
enabled(x=2, y=2, z=4, u=5) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithKwOnlyArgsBasic(self):
trace_count = [0]
def func(*, a: ops.Tensor = None, b=1): # pylint: disable=bad-whitespace
del b
trace_count[0] += 1
return a
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(a=1, b=2)
enabled(a=2, b=2) # No retrace
enabled(a=1, b=1) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedArg(self):
trace_count = [0]
def func(arg: ops.Tensor, *args, kwonly, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1000, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedArgs(self):
trace_count = [0]
def func(arg, *args: ops.Tensor, kwonly, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 200, 300, 400, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedKwOnlyArg(self):
trace_count = [0]
def func(arg, *args, kwonly: ops.Tensor, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=500, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedKwargs(self):
trace_count = [0]
def func(arg, *args, kwonly, **kwargs: ops.Tensor):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # No retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=600, kwarg2=700) # No retrace
self.assertEqual(trace_count[0], 4)
def testWithExtraWrapper(self):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@def_function.function
@dummy_tf_decorator
def add(self, x, y, z=1):
if self.var is None:
return x + y + z
foo = Foo()
self.assertEqual(foo.add(2, 3).numpy(), 6)
@parameterized.parameters([(def_function.function, dummy_tf_decorator),
(dummy_tf_decorator, def_function.function),
(def_function.function, def_function.function)])
def testWithExtraWrapperRedundantArgs(self, decorator1, decorator2):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@decorator1
@decorator2
def add1(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(TypeError, 'got two values'):
foo.add1(2, x=3) # pylint: disable=redundant-keyword-arg,no-value-for-parameter
def testWithExtraWrapperMissingArgs(self):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@def_function.function
@dummy_tf_decorator
def add1(self, x, y):
if self.var is None:
return x + y
@def_function.function
@dummy_tf_decorator
def add2(self, x, y):
if self.var is None:
return x + y
@def_function.function
@def_function.function
def add3(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(
TypeError, 'missing 1 required positional argument: \'y\''):
foo.add1(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'missing 1 required argument: x'):
foo.add1(y=2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(
TypeError, 'missing 1 required positional argument: \'y\''):
foo.add2(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'missing 1 required argument: x'):
foo.add2(y=2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(
TypeError, 'missing 1 required positional argument: \'y\''):
foo.add3(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'missing 1 required argument: x'):
foo.add3(y=2) # pylint: disable=no-value-for-parameter
def testMissingArgsTfFunctionedMethod(self):
class A(object):
def func(self, position_arg1, position_arg2):
return position_arg1, position_arg2
@def_function.function
def decorated_method(self, position_arg1, position_arg2):
return position_arg1, position_arg2
a_instance = A()
tf_method_pos = def_function.function(a_instance.func)
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_method_pos(position_arg2='foo')
# tf.function-decorated instance methods need to be tested because of
# the __get__ method implementation.
tf_func_decorated_method = def_function.function(
a_instance.decorated_method)
tf_func_decorated_method(position_arg1='foo', position_arg2='bar')
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_func_decorated_method(position_arg2='bar')
def testMissingArgsTfFunctionedObject(self):
class A(object):
def __call__(self, position_arg1, position_arg2):
return position_arg1, position_arg2
a_instance = A()
# A tf.function-decorated callable object needs to be tested because of
# the special inspect results.
tf_func_obj = def_function.function(a_instance)
tf_func_obj(position_arg1=1, position_arg2=2)
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_func_obj(position_arg2='bar')
def testMissingArgsTfFunctionedFunctions(self):
def func_pos(position_arg1, position_arg2):
return position_arg1, position_arg2
def func_with_default(position_arg, named_arg=None):
return position_arg, named_arg
def func_pos_3args(position_arg1, position_arg2, position_arg3):
return position_arg1, position_arg2, position_arg3
tf_func_pos = def_function.function(func_pos)
with self.assertRaisesRegex(
TypeError, '.* missing 1 required argument: position_arg1'):
tf_func_pos(position_arg2='foo')
tf_func_with_default = def_function.function(func_with_default)
tf_func_with_default(position_arg='bar')
with self.assertRaisesRegex(TypeError,
'.* missing 1 required argument: position_arg'):
tf_func_with_default(named_arg='foo')
tf_func_pos_3args = def_function.function(func_pos_3args)
with self.assertRaisesRegex(
TypeError,
'.* missing required arguments: position_arg1, position_arg3'):
tf_func_pos_3args(position_arg2='foo')
def testShapeInferencePropagateConstNestedStack(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((None, None), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
old_shape = array_ops.shape(x)
new_shape = array_ops.stack([old_shape[0], s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedUnstackStack(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((None, None), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
s0, _ = array_ops.unstack(array_ops.shape(x), axis=0)
new_shape = array_ops.stack([s0, s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedConcat(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function()
def g():
y = f(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
def testShapeInferencePropagateConstDoubleNested(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function()
def g():
y = def_function.function(f)(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
@test_util.run_v2_only
def testControlDependencyAfterInline(self):
v = variables.Variable(0.)
@def_function.function
def assign():
return v.assign(1.)
@def_function.function
def assign_add():
return v.assign_add(1.)
@def_function.function
def f():
check_ops.assert_equal_v2(assign(), 1.)
check_ops.assert_equal_v2(assign_add(), 2.)
# We don't have a way to inspect the inlined graph in Python, so we run it
# multiple times to have more confidence the dependency is correct.
for _ in range(30):
f()
@test_util.run_v2_only
def testReadInFuncWriteOutside(self):
# Run many times since we are testing for a potential race condition.
for _ in range(30):
# pylint: disable=cell-var-from-loop
v = variables.Variable(1.)
@def_function.function
def add_one():
return v + 1.
@def_function.function
def get_v_plus_one():
v_plus_one = add_one()
v.assign_add(2.0)
return v_plus_one
self.assertAllEqual(get_v_plus_one(), 2.0)
def testOpExpandErrorMessage(self):
@def_function.function
def test_fn():
if array_ops.constant(False):
return array_ops.constant(1)
else:
return script_ops.eager_py_func(
func=lambda: array_ops.constant([2.]), inp=(), Tout=dtypes.int32)
error_pattern = re.compile(r'Graph execution error.*func=lambda', re.DOTALL)
with self.assertRaisesRegex(errors.InvalidArgumentError, error_pattern):
test_fn()
class MultiDeviceTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
def testMultiDeviceOutput(self):
"""Tests that functions can produce outputs on multiple devices."""
@function.defun
def func(a, b, transpose_a):
with ops.device('/device:CPU:0'):
m1 = math_ops.matmul(a, b, transpose_a=transpose_a)
with ops.device('/device:GPU:0'):
m2 = math_ops.matmul(a, b, transpose_a=transpose_a)
return m1, m2
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
m1, m2 = func(t, t, transpose_a=True)
self.assertAllEqual(m1.numpy(), [[10, 14], [14, 20]])
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), [[10, 14], [14, 20]])
self.assertRegex(m2.backing_device, 'GPU')
@test_util.run_gpu_only
def testEmptyBody(self):
@function.defun
def func(a, b):
return b, a
with ops.device('/device:CPU:0'):
a = array_ops.identity(3.0)
with ops.device('/device:GPU:0'):
b = array_ops.identity(5.0)
m1, m2 = func(a, b)
self.assertAllEqual(m1.numpy(), 5.0)
self.assertRegex(m1.backing_device, 'GPU')
self.assertAllEqual(m2.numpy(), 3.0)
self.assertRegex(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceInt32(self):
"""Tests that multi-device functions can take and output INT32s.
When an INT32 device tensor is fed into a function, it is copied to CPU
by the eager runtime. The function sees all INT32 inputs on CPU.
We set allocator attribute 'on_host' for INT32 outputs. They can be
partitioned into the GPU component function, but will be allocated on
CPU nevertheless.
There is experimental support for `ints_on_device` in
FunctionLibraryRuntime now. We can try that.
"""
with ops.device('/device:CPU:0'):
int_cpu = constant_op.constant(3, dtype=dtypes.int32)
resource = resource_variable_ops.ResourceVariable(5, dtype=dtypes.int32)
with ops.device('/device:GPU:0'):
int_gpu = constant_op.constant(7, dtype=dtypes.int32)
@function.defun
def func(int_cpu, resource, int_gpu):
with ops.device('/device:CPU:0'):
m1 = int_cpu * resource + int_gpu
with ops.device('/device:GPU:0'):
# This computation will happen on GPU but m2 will be copied to CPU.
m2 = int_gpu * resource + int_cpu + 1
return m1, m2
m1, m2 = func(int_cpu, resource, int_gpu)
self.assertAllEqual(m1.numpy(), 22)
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 39)
self.assertRegex(m2.backing_device, 'CPU')
# flip arguments
m1, m2 = func(int_gpu, resource, int_cpu)
self.assertAllEqual(m1.numpy(), 38)
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 23)
self.assertRegex(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceColocateWith(self):
"""Tests that function's outputs respect colocation constraints."""
@function.defun
def func(a, b):
with ops.colocate_with(a):
ra = 2 * a
with ops.colocate_with(b):
rb = 3 * b
return ra, rb
devices = ['/device:CPU:0', '/device:GPU:0']
for dev1, dev2 in itertools.product(devices, devices):
with ops.device(dev1):
a = array_ops.identity(1.0)
with ops.device(dev2):
b = array_ops.identity(10.0)
ra, rb = func(a, b)
self.assertEqual(ra.numpy(), 2.0)
self.assertRegex(ra.backing_device, dev1)
self.assertEqual(rb.numpy(), 30.0)
self.assertRegex(rb.backing_device, dev2)
@test_util.run_gpu_only
def testMultiDeviceResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
c2 = resource_variable_ops.ResourceVariable(7.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
g2 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * g2
with ops.device('/device:GPU:0'):
result2 = resource2 * c2
return result1, result2
r1, r2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegex(r2.backing_device, 'GPU')
# Call with flipped inputs. Check that we look at resource's
# device and reinstantiates the function when inputs' devices change.
r1, r2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegex(r2.backing_device, 'GPU')
@test_util.run_gpu_only
def testOutputResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * 5
with ops.device('/device:GPU:0'):
result2 = resource2 * 7
return result1, resource1.handle, result2, resource2.handle
r1, res1, r2, res2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegex(r2.backing_device, 'GPU')
def check_handle(handle, expected_value):
self.assertRegex(handle.backing_device, 'CPU')
tensor = gen_resource_variable_ops.read_variable_op(
handle, dtypes.float32)
self.assertEqual(tensor.numpy(), expected_value)
# Check that handles returned from functions are on CPU and an op using
# the resource handle is correctly placed on the device backing the
# resource.
check_handle(res1, 2.0)
check_handle(res2, 3.0)
# Call with flipped inputs to make sure the same the function is
# reinstantiated and eager runtime does not mess up the device assignment
# for ops consuming handles returned from defuns.
r1, res1, r2, res2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegex(r2.backing_device, 'GPU')
check_handle(res1, 3.0)
check_handle(res2, 2.0)
@test_util.run_gpu_only
def testPassResourceThroughNestedFunctionCall(self):
"""Test passing GPU resource to noinline function call placed on CPU.
PartitionedCallOp must not enforce any particular device assignment for the
resource output. Inner function marked as `_nospecialize`, so Grappler would
not prune unused function output.
"""
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun_with_attributes(attributes={
'_noinline': True,
'_nospecialize': True
})
def inner(resource1):
return resource1 * 2, resource1.handle
@function.defun
def outer(resource1):
with ops.device('/device:CPU:0'):
r1, _ = inner(resource1)
return r1
r1 = outer(g1)
self.assertEqual(r1.numpy(), 6.0)
self.assertRegex(r1.backing_device, 'CPU')
@test_util.run_gpu_only
def testReturnResourceFromNestedFunctionCall(self):
"""Test returning GPU resource from noinline function call placed on CPU.
When inferring output devices for the return value, do not set a device for
returns of DT_RESOURCE data type based on the device assignment of the node
that produced that resource. As an example function call placed on CPU can
return resources on GPU.
"""
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun_with_attributes(attributes={
'_noinline': True
})
def inner(resource1):
resource1.assign_add(2.0)
return resource1 * 2, resource1.handle
@function.defun
def outer(resource1):
with ops.device('/device:CPU:0'):
r1, res1 = inner(resource1)
return r1, res1
r1, res1 = outer(g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
def check_handle(handle, expected_value):
self.assertRegex(handle.backing_device, 'CPU')
tensor = gen_resource_variable_ops.read_variable_op(
handle, dtypes.float32)
self.assertEqual(tensor.numpy(), expected_value)
# Check that handles returned from functions are on CPU and an op using
# the resource handle is correctly placed on the device backing the
# resource.
check_handle(res1, 5.0)
@test_util.run_gpu_only
def testComplexInputOutputDevicePattern(self):
"""Tests input/output mapping logic in partitioning."""
with ops.device('/device:CPU:0'):
rc0 = resource_variable_ops.ResourceVariable(2.0)
rc1 = resource_variable_ops.ResourceVariable(3.0)
cc0 = array_ops.identity(5.0)
cc1 = array_ops.identity(7.0)
with ops.device('/device:GPU:0'):
rg0 = resource_variable_ops.ResourceVariable(11.0)
rg1 = resource_variable_ops.ResourceVariable(13.0)
cg0 = array_ops.identity(17.0)
cg1 = array_ops.identity(19.0)
# Make sure tensors are on expected devices.
for tensor in [cc0, cc1]:
self.assertRegex(tensor.backing_device, 'CPU:0')
for tensor in [cg0, cg1]:
self.assertRegex(tensor.backing_device, 'GPU:0')
@function.defun
def func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1):
with ops.device('/device:CPU:0'):
m1 = rc0 * cg0
with ops.device('/device:GPU:0'):
m2 = rg0 * cc0
with ops.device('/device:CPU:0'):
r1 = 1000.0 * m2 + rc1 * cg1
with ops.device('/device:GPU:0'):
r2 = 1000.0 * m1 + rg1 * cc1
return r1, r2, m2, m1
r1, r2, m2, m1 = func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1)
self.assertRegex(m1.backing_device, 'CPU')
self.assertRegex(r1.backing_device, 'CPU')
self.assertRegex(m2.backing_device, 'GPU')
self.assertRegex(r2.backing_device, 'GPU')
self.assertEqual(m1.numpy(), 34.0)
self.assertEqual(r1.numpy(), 55000.0 + 3.0 * 19.0)
self.assertEqual(m2.numpy(), 55.0)
self.assertEqual(r2.numpy(), 34000.0 + 13.0 * 7.0)
@test_util.run_gpu_only
def testArgumentPruning(self):
"""Tests functions taking unnecessary arguments."""
with ops.device('/device:CPU:0'):
c1 = constant_op.constant(5.0)
c2 = constant_op.constant(7.0)
with ops.device('/device:GPU:0'):
g1 = constant_op.constant(11.0)
g2 = constant_op.constant(13.0)
g3 = constant_op.constant(17.0)
@function.defun
def func(g1, g2, c1, g3, c2): # pylint: disable=unused-argument
# arguments g1 and g2 are unused and can be pruned by grappler.
return c1 * g3 * c2
result = func(g1, g2, c1, g3, c2)
self.assertEqual(result.numpy(), 5.0 * 7.0 * 17.0)
def testNestedCallWatchedVariables(self):
v = variables.Variable(4.)
@def_function.function
def f():
return v ** 2.
with backprop.GradientTape() as tape:
f()
self.assertEqual((v,), tape.watched_variables())
@def_function.function
def g():
return f()
with backprop.GradientTape() as tape:
g()
self.assertEqual((v,), tape.watched_variables())
# f() can rely on the variable being read during its trace. g() checks that
# variables from a function which knows about them are recorded on the
# tape. h() tests that functions forward knowledge of variables to callers.
@def_function.function
def h():
return g()
with backprop.GradientTape() as tape:
h()
self.assertEqual((v,), tape.watched_variables())
def testReplaceCaptureWithDeferred(self):
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = constant_op.constant(3.0)
@def_function.function
def fn():
a = x + y
b = a + z
return b
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), 6.0)
value = constant_op.constant(4.0)
def closure():
return value
concrete_fn.replace_capture_with_deferred_capture(
concrete_fn.captured_inputs[1],
closure,
spec=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
placeholder=concrete_fn.inputs[1])
self.assertAllEqual(concrete_fn(), 8.0)
value = constant_op.constant(5.0)
self.assertAllEqual(concrete_fn(), 9.0)
def testRaiseReplaceCaptureWithDeferredTypeSpecMismatch(self):
bool_captured_tensor = constant_op.constant(True)
float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
value = constant_op.constant([2.], dtype=dtypes.float32)
@def_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), [2.])
new_bool_captured_tensor = constant_op.constant(False)
def bool_closure():
return new_bool_captured_tensor
# Test raise if replacing a bool capture with a closure of output type
# float32
new_float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
def float_closure():
return new_float_captured_tensor
with self.assertRaisesRegex(ValueError,
'Attempting to substitute closure with spec*'):
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
float_closure,
spec=tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
# Test replace without a placeholder
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
bool_closure,
spec=tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool))
self.assertAllEqual(concrete_fn(), [5.])
def testConcreteFunctionSetExternalCapture(self):
captured_tensor = constant_op.constant([1.])
value = constant_op.constant([2.])
@def_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
return deferred_tensor + captured_tensor
cf = fn.get_concrete_function()
self.assertLen(cf._captured_inputs, 2)
self.assertEqual(list(map(callable, cf._captured_inputs)), [False, True])
self.assertAllEqual(cf(), [3.])
# Reset capture to a deferred one, reset deferred capture to a capture.
cf.set_external_captures([cf._captured_inputs[1], cf._captured_inputs[0]])
value = constant_op.constant([3.])
self.assertAllEqual(cf(), [4.])
def testGraphReplaceCaptureAndSetExternalCapture(self):
bool_captured_tensor = constant_op.constant(True)
float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
value = constant_op.constant([2.], dtype=dtypes.float32)
@def_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_spec.TensorSpec(shape=(1,), dtype=dtypes.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), [2.])
new_bool_captured_tensor = constant_op.constant(False)
def closure():
return new_bool_captured_tensor
concrete_fn.graph.replace_capture_with_deferred_capture(
concrete_fn.captured_inputs[0],
closure,
spec=tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool),
placeholder=concrete_fn.inputs[1])
concrete_fn.set_external_captures([
closure, concrete_fn._captured_inputs[1],
concrete_fn._captured_inputs[2]
])
self.assertAllEqual(concrete_fn(), [5.])
def testDeferredCapture(self):
value = 1.0
@def_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(lazy_capture(2.0), 4.0)
def testNestedDeferredCapture(self):
value = 1.0
@def_function.function
def inner(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
@def_function.function
def outer(x):
return inner(x)
self.assertAllEqual(outer(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(outer(2.0), 4.0)
def testNestedDeferredCaptureInTFWhileLoop(self):
value = 1.
@def_function.function
def inner(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
@def_function.function
def outer():
dummy = constant_op.constant(True)
sums = constant_op.constant(0.)
while dummy:
directives.set_loop_options(
shape_invariants=[(sums, tensor_shape.TensorShape(None))])
sums += inner(2.)
dummy = constant_op.constant(False)
return sums
self.assertAllEqual(outer(), 3.)
value = constant_op.constant(2.)
self.assertAllEqual(outer(), 4.)
value = constant_op.constant(3.)
self.assertAllEqual(outer(), 5.)
def testDeferredCaptureWithKey(self):
value0 = 1.0
value1 = 2.0
@def_function.function
def lazy_capture(x):
w = ops.get_default_graph().capture_call_time_value(
lambda: value0, tensor_spec.TensorSpec(None), key=0)
y = ops.get_default_graph().capture_call_time_value(
lambda: value1, tensor_spec.TensorSpec(None), key=1)
def bad_closure():
raise ValueError('Should not run')
z = ops.get_default_graph().capture_call_time_value(
bad_closure, tensor_spec.TensorSpec(None), key=1)
return x + y + w + z
self.assertAllEqual(lazy_capture(2.0), 7.0)
value0 = 2.0
value1 = 3.0
self.assertAllEqual(lazy_capture(2.0), 10.0)
def testDeferredCaptureTypeError(self):
value = constant_op.constant(1.0)
@def_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(()))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# dtype mismatch
value = constant_op.constant(1)
with self.assertRaisesRegex(ValueError, 'Value .* to a tensor with dtype'):
lazy_capture(2.0)
# shape mismatch
value = constant_op.constant([1.0])
with self.assertRaisesRegex(ValueError, 'Value .* shape'):
lazy_capture(2.0)
def testDeferredCaptureReturnNestWithCompositeTensor(self):
i_s = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
r_t = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]])
s_t = sparse_tensor.SparseTensor(
values=[1, 2, 3], indices=[[0], [8], [10]], dense_shape=[20])
@def_function.function
def lazy_capture():
y = ops.get_default_graph().capture_call_time_value(
lambda: {'i': i_s, 't': (r_t, s_t)},
{'i': indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int32),
't': (ragged_tensor.RaggedTensorSpec([2, None, None], dtypes.int32),
sparse_tensor.SparseTensorSpec([None], dtypes.int32))})
return y['i'], y['t']
i, (r, s) = lazy_capture()
self.assertAllEqual(i_s.values, i.values)
self.assertAllEqual(i_s.indices, i.indices)
self.assertAllEqual(i_s.dense_shape, i.dense_shape)
self.assertAllEqual(r_t, r)
self.assertAllEqual(s_t.indices, s.indices)
self.assertAllEqual(s_t.values, s.values)
self.assertAllEqual(s_t.dense_shape, s.dense_shape)
def testDeferredCaptureCompositeTensorSpecTypeMismatch(self):
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64))
@def_function.function
def lazy_capture():
return ops.get_default_graph().capture_call_time_value(
lambda: value,
indexed_slices.IndexedSlicesSpec(dtype=dtypes.int32))
# Type matches spec.
lazy_capture()
# Extra dense shape component.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
with self.assertRaises(ValueError):
lazy_capture()
# Index dtype mismatch int32 vs. int64.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1]))
with self.assertRaises(ValueError):
lazy_capture()
def testFunctoolsLruCache(self):
self.skipTest(
"b/194845243: inspect.getfullargspec doesn't unwrap Python decorators.")
@def_function.function
@functools.lru_cache(maxsize=2)
def f(a):
return 2 * a
self.assertAllEqual(f(1), array_ops.constant(2))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
py | 1a382fd2cf667fb4b4de45d7cd6da7c28a33adeb | import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import time
import ot
from scipy import linalg
from scipy import sparse
import gromovWassersteinAveraging as gwa
import spectralGW as sgw
from geodesicVisualization import *
from GromovWassersteinGraphToolkit import *
import json
# Import Graph Partitioning Packages
from infomap import Infomap
# Load the S-GWL code
import DataIO as DataIO
import EvaluationMeasure as Eval
import GromovWassersteinGraphToolkit as GwGt
import pickle
import warnings
# Load modules for network partitioning experiments
from networkx.algorithms.community import greedy_modularity_communities
from networkx.algorithms.community.asyn_fluid import asyn_fluidc
from networkx.algorithms.community.quality import performance, coverage, modularity
from sklearn import metrics
from scipy.cluster.hierarchy import dendrogram, linkage, cut_tree
from scipy.signal import find_peaks
"""
Define some helper functions
"""
def graph_partition_gd2(cost_s, p_s, p_t,idx2node, ot_hyperpara, trans0=None):
"""
** May 19, 2020: Gradient descent version of graph_partition
Achieve a single graph partition via calculating Gromov-Wasserstein discrepancy
between the target graph and proposed one
Args:
cost_s: (n_s, n_s) adjacency matrix of source graph
p_s: (n_s, 1) the distribution of source nodes
p_t: (n_t, 1) the distribution of target nodes
idx2node: a dictionary {key = idx of row in cost, value = name of node}
ot_hyperpara: a dictionary of hyperparameters
Returns:
sub_costs: a dictionary {key: cluster idx,
value: sub cost matrices}
sub_probs: a dictionary {key: cluster idx,
value: sub distribution of nodes}
sub_idx2nodes: a dictionary {key: cluster idx,
value: a dictionary mapping indices to nodes' names
trans: (n_s, n_t) the optimal transport
"""
cost_t = np.diag(p_t[:, 0])
cost_s = np.asarray(cost_s)
# cost_t = 1 / (1 + cost_t)
trans, log = gwa.gromov_wasserstein_asym_fixed_initialization(cost_s, cost_t, p_s.flatten(), p_t.flatten(), trans0)
d_gw = log['gw_dist']
sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(cost_s, trans, p_s, p_t, idx2node)
return sub_costs, sub_probs, sub_idx2nodes, trans, d_gw
def get_partition(coup):
est_idx = np.argmax(coup, axis=1)
num_clusters = np.max(est_idx)
partition = []
for j in range(num_clusters+1):
partition.append(set(np.argwhere(est_idx == j).T[0]))
return partition
"""
Main Experiment
"""
num_trials = 10
num_nodes = 1000
clique_size = 150
p_in = 0.5
ps_out = [0.08, 0.10, 0.12, 0.15]
ot_dict = {'loss_type': 'L2', # the key hyperparameters of GW distance
'ot_method': 'proximal',
'beta': 0.15,
'outer_iteration': 2 * num_nodes, # outer, inner iterations and error bound of optimal transport
'iter_bound': 1e-30,
'inner_iteration': 5,
'sk_bound': 1e-30,
'node_prior': 0.0001,
'max_iter': 1, # iteration and error bound for calcuating barycenter
'cost_bound': 1e-16,
'update_p': False, # optional updates of source distribution
'lr': 0,
'alpha': 0}
# Range to search for optimal number of clusters over
num_clusts = list(range(3,10))
train_times = []
specGW_avg_amis = []
specGW_avg_times = []
GWL_avg_amis = []
GWL_avg_times = []
infoMap_avg_amis = []
infoMap_avg_times = []
for pn in range(len(ps_out)):
print('Starting p_out index = ',pn)
##############################################
# Training specGW
##############################################
G = nx.gaussian_random_partition_graph(n=num_nodes, s=clique_size, v=8,
p_in=p_in, p_out=ps_out[pn], directed=True)
p_s, cost_s, idx2node = DataIO.extract_graph_info(G)
p_s = (p_s + 1) ** 0.01
p_s /= np.sum(p_s)
start = time.time()
t = 10
cost = sgw.directed_heat_kernel(G,t)
modularities = []
for j in num_clusts:
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=j)
sub_costs, sub_probs, sub_idx2nodes, coup, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
partition = get_partition(coup)
modularities.append(modularity(G,partition))
est_num_clust = num_clusts[np.argmax(modularities)]
ts = np.linspace(5,15,10)
modularities = []
for t in ts:
cost = sgw.directed_heat_kernel(G,t)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=est_num_clust)
sub_costs, sub_probs, sub_idx2nodes, coup, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
partition = get_partition(coup)
modularities.append(modularity(G,partition))
est_t_value = ts[np.argmax(modularities)]
end = time.time()
training_time = end - start
train_times.append(training_time)
print('Time to Train:', training_time)
print('Estimated Clusters:', est_num_clust)
print('Estimated t value:', est_t_value)
##############################################
# Main Experiment
##############################################
gwl_amis = []
gwl_times = []
specGW_amis = []
specGW_times = []
infoMap_amis = []
infoMap_times = []
for j in range(num_trials):
# Create Graph
G = nx.gaussian_random_partition_graph(n=num_nodes, s=clique_size, v=5,
p_in=p_in, p_out=ps_out[pn], directed=True)
gt = np.zeros((num_nodes,))
for i in range(len(G.nodes)):
gt[i] = G.nodes[i]['block']
num_partitions = int(np.max(gt) + 1)
p_s, cost_s, idx2node = DataIO.extract_graph_info(G)
p_s = (p_s + 1) ** 0.01
p_s /= np.sum(p_s)
# Run SpecGW
start = time.time()
cost = sgw.directed_heat_kernel(G,est_t_value)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=est_num_clust)
sub_costs, sub_probs, sub_idx2nodes, coup, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
est_idx = np.argmax(coup, axis=1)
ami = metrics.adjusted_mutual_info_score(gt, est_idx, average_method='max')
end = time.time()
specGW_amis.append(ami)
specGW_times.append(end - start)
# print('SpecGW AMI:',ami,' Time:',end -start)
# Run GWL
start = time.time()
sub_costs, sub_probs, sub_idx2nodes = GwGt.recursive_graph_partition(cost_s,
p_s,
idx2node,
ot_dict,
max_node_num=300)
est_idx = np.zeros((num_nodes,))
for n_cluster in range(len(sub_idx2nodes)):
for key in sub_idx2nodes[n_cluster].keys():
idx = sub_idx2nodes[n_cluster][key]
est_idx[idx] = n_cluster
ami = metrics.adjusted_mutual_info_score(gt, est_idx, average_method='max')
end = time.time()
gwl_amis.append(ami)
gwl_times.append(end-start)
# print('GWL AMI:',ami,' Time:',end -start)
# Run InfoMap
start = time.time()
im = Infomap()
for edge in G.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
ami = metrics.adjusted_mutual_info_score(gt, est_idx, average_method='max')
end = time.time()
infoMap_amis.append(ami)
infoMap_times.append(end-start)
# print('InfoMap AMI:',ami,' Time:',end -start)
specGW_avg_amis.append(np.mean(specGW_amis))
specGW_avg_times.append(np.mean(specGW_times))
GWL_avg_amis.append(np.mean(gwl_amis))
GWL_avg_times.append(np.mean(gwl_times))
infoMap_avg_amis.append(np.mean(infoMap_amis))
infoMap_avg_times.append(np.mean(infoMap_times))
print('Average AMIs:')
print('p_out','specGW','GWL','Infomap')
for j in range(len(ps_out)):
print(ps_out[j],np.round(specGW_avg_amis,3)[j],np.round(GWL_avg_amis,3)[j],np.round(infoMap_avg_amis,3)[j])
print('Average times:')
print('p_out','specGW','GWL','Infomap')
for j in range(len(ps_out)):
print(ps_out[j],np.round(specGW_avg_times,2)[j],np.round(GWL_avg_times,2)[j],np.round(infoMap_avg_times,2)[j])
## Store results
ami_p_out = []
ami_specGW = []
ami_GWL = []
ami_Infomap = []
times_p_out = []
times_specGW = []
times_GWL = []
times_Infomap = []
for j in range(len(ps_out)):
ami_p_out.append(ps_out[j])
ami_specGW.append(np.round(specGW_avg_amis,3)[j])
ami_GWL.append(np.round(GWL_avg_amis,3)[j])
ami_Infomap.append(np.round(infoMap_avg_amis,3)[j])
times_p_out.append(ps_out[j])
times_specGW.append(np.round(specGW_avg_times,2)[j])
times_GWL.append(np.round(GWL_avg_times,2)[j])
times_Infomap.append(np.round(infoMap_avg_times,2)[j])
res_ami = {}#pd.DataFrame()
res_ami['p_out'] = ami_p_out
res_ami['specGW'] = ami_specGW
res_ami['GWL'] = ami_GWL
res_ami['Infomap'] = ami_Infomap
res_times = {}#pd.DataFrame()
res_times['p_out'] = times_p_out
res_times['specGW'] = times_specGW
res_times['GWL'] = times_GWL
res_times['Infomap'] = times_Infomap
with open('res_randomGraphPartitioning.txt', 'w') as outfile:
json.dump(['Average AMIs',
res_ami,
'Average times',
res_times], outfile,indent=0) |
py | 1a383005131ca9f629fd89021825b33e01db05a8 | """Miscellaneous utilities for images"""
from functools import partial
import mimetypes
from wildebeest.util import find_files_with_extensions
find_image_files = partial(
find_files_with_extensions,
extensions=[k for k, v in mimetypes.types_map.items() if v.startswith("image/")],
)
"""Find all image files in a directory"""
|
py | 1a38304c354e5961c64df760461dd2bf8913cdae | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "portia_server.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 1a383090f9461be888866fb8a91ba8d464a436c3 | import gzip
import os
import struct
import tempfile
import unittest
from io import BytesIO, StringIO, TextIOWrapper
from unittest import mock
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.move import file_move_safe
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, UploadedFile,
)
try:
from PIL import Image
except ImportError:
Image = None
else:
from django.core.files import images
class FileTests(unittest.TestCase):
def test_unicode_uploadedfile_name(self):
uf = UploadedFile(name='¿Cómo?', content_type='text')
self.assertIs(type(repr(uf)), str)
def test_unicode_file_name(self):
f = File(None, 'djángö')
self.assertIs(type(repr(f)), str)
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_open_resets_opened_file_to_start_and_returns_context_manager(self):
file = File(BytesIO(b'content'))
file.read()
with file.open() as f:
self.assertEqual(f.read(), b'content')
def test_open_reopens_closed_file_and_returns_context_manager(self):
temporary_file = tempfile.NamedTemporaryFile(delete=False)
file = File(temporary_file)
try:
file.close()
with file.open() as f:
self.assertFalse(f.closed)
finally:
# remove temporary file
os.unlink(file.name)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
def test_file_iteration_windows_newlines(self):
"""
#8149 - File objects with \r\n line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_mac_newlines(self):
"""
#8149 - File objects with \r line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\rtwo\rthree'))
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_mixed_newlines(self):
f = File(BytesIO(b'one\rtwo\nthree\r\nfour'))
self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\ntwo\nthree'))
# Set chunk size to create a boundary after \n:
# b'one\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\n', b'two\n', b'three'])
def test_file_iteration_with_windows_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
# Set chunk size to create a boundary between \r and \n:
# b'one\r\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_mac_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\rtwo\rthree'))
# Set chunk size to create a boundary after \r:
# b'one\r...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_with_text(self):
f = File(StringIO('one\ntwo\nthree'))
self.assertEqual(list(f), ['one\n', 'two\n', 'three'])
def test_readable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.readable())
self.assertFalse(test_file.readable())
def test_writable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.writable())
self.assertFalse(test_file.writable())
with tempfile.TemporaryFile('rb') as temp, File(temp, name='something.txt') as test_file:
self.assertFalse(test_file.writable())
def test_seekable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.seekable())
self.assertFalse(test_file.seekable())
def test_io_wrapper(self):
content = "vive l'été\n"
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
test_file.write(content.encode())
test_file.seek(0)
wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n')
self.assertEqual(wrapper.read(), content)
wrapper.write(content)
wrapper.seek(0)
self.assertEqual(wrapper.read(), content * 2)
test_file = wrapper.detach()
test_file.seek(0)
self.assertEqual(test_file.read(), (content * 2).encode())
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertIsNone(File(BytesIO(b'A file with no name')).name)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class ContentFileTestCase(unittest.TestCase):
def test_content_file_default_name(self):
self.assertIsNone(ContentFile(b"content").name)
def test_content_file_custom_name(self):
"""
The constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
ContentFile can accept both bytes and strings and the retrieved content
is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
self.assertIsInstance(ContentFile("español").read(), str)
def test_open_resets_file_to_start_and_returns_context_manager(self):
file = ContentFile(b'content')
with file.open() as f:
self.assertEqual(f.read(), b'content')
with file.open() as f:
self.assertEqual(f.read(), b'content')
class InMemoryUploadedFileTests(unittest.TestCase):
def test_open_resets_file_to_start_and_returns_context_manager(self):
uf = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
uf.read()
with uf.open() as f:
self.assertEqual(f.read(), '1')
class DimensionClosingBug(unittest.TestCase):
"""
get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper:
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
img_path = os.path.join(os.path.dirname(__file__), "test.png")
with open(img_path, 'rb') as fh:
image = images.ImageFile(fh)
image_pil = Image.open(fh)
size_1 = images.get_image_dimensions(image)
size_2 = images.get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(__file__), "magic.png")
size = images.get_image_dimensions(img_path)
with open(img_path, 'rb') as fh:
self.assertEqual(size, Image.open(fh).size)
@unittest.skipUnless(Image, "Pillow not installed")
class GetImageDimensionsTests(unittest.TestCase):
def test_invalid_image(self):
"""
get_image_dimensions() should return (None, None) for the dimensions of
invalid images (#24441).
brokenimg.png is not a valid image and it has been generated by:
$ echo "123" > brokenimg.png
"""
img_path = os.path.join(os.path.dirname(__file__), "brokenimg.png")
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
def test_valid_image(self):
"""
get_image_dimensions() should catch struct.error while feeding the PIL
Image parser (#24544).
Emulates the Parser feed error. Since the error is raised on every feed
attempt, the resulting image size should be invalid: (None, None).
"""
img_path = os.path.join(os.path.dirname(__file__), "test.png")
with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error):
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
with self.assertRaises(IOError):
file_move_safe(self.file_a, self.file_b, allow_overwrite=False)
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
class SpooledTempTests(unittest.TestCase):
def test_in_memory_spooled_temp(self):
with tempfile.SpooledTemporaryFile() as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
def test_written_spooled_temp(self):
with tempfile.SpooledTemporaryFile(max_size=4) as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
|
py | 1a3830c5ff3d92b8f7658e4c9f21e7b31bd38dba | from commands import commands
from socket import socket
import json
# All these "magic" variables come from reverse engineering the HS100 app, Kasa
# We decompiled it and found their encryption function, then wrote this to try
# to connect and manipulate a HS100, which it does! It appears they use no form
# of authentication or fancy crypto algorithms for encryption
# -85 in the Kasa app, but bytes are unsigned,
# so 256 - 85 = 171
STARTING_BYTE = 171
# 4 hard coded null characters pad each string sent and received.
STARTING_PAD = b"\0\0\0\0"
# Revealed via netcat
PORT = 9999
def encrypt(string):
"""Encrypts a string for transferring to an HS100, they use a simple
autokey cipher padded by 4 null characters
Args:
string: a json string the HS100 should understand
Returns:
bytearray: a bytearray of encrypted bytes using the reversed engineered
autokey cipher
"""
byte = STARTING_BYTE
encrypted = bytearray(STARTING_PAD)
for char in string:
byte = byte ^ ord(char)
encrypted.append(byte)
return encrypted
def decrypt(bytes):
"""Decrypts a bytes sent from an HS100 response
Args:
bytes: the raw bytes sent back from an HS100 to decrypt
Returns:
str: should be a JSON string if a valid command was sent prior to
decyption, but could also be empty string if no response.
Regardless it will now be decrypted
"""
# chop off the beginning with with padded nulls
bytes = bytes[len(STARTING_PAD):]
key = STARTING_BYTE
decrypted = ""
for byte in bytes:
decrypted += chr(key ^ byte)
key = byte
return decrypted
def query(host, command):
"""Simply given a host an a shorthand command alias, runs that command and
returns the response from the HS100
Args:
host: string of the valid hostname that is the location of the HS100
command: string that is a valid command to run, from commands.py
Returns:
str: the returned str from the HS100, empty string means an error
"""
if command not in commands:
# make sure it is valid json
try:
json.loads(command)
command_string = command
except ValueError:
raise Exception(
"Command {} not known and is not valid JSON".format(command)
)
else:
# the command is a shorthand name, so look it up
command_string = commands[command]
tcp = socket()
tcp.connect((host, PORT))
send = encrypt(command_string)
tcp.send(send)
# 4KB of data should be enough for any response
data = tcp.recv(4096)
# we are done with the query, now we need to parse it
tcp.close()
response = decrypt(data)
return response
|
py | 1a3830f06fd11d43b5c683b0f9a95332a16a4801 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Alert.text'
db.alter_column('vaxapp_alert', 'text', self.gf('django.db.models.fields.CharField')(max_length=2, null=True))
def backwards(self, orm):
# Changing field 'Alert.text'
db.alter_column('vaxapp_alert', 'text', self.gf('django.db.models.fields.CharField')(max_length=160, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vaxapp.alert': {
'Meta': {'object_name': 'Alert'},
'analyzed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'countrystock': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.CountryStock']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'risk': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '2', 'null': 'True', 'blank': 'True'})
},
'vaxapp.country': {
'Meta': {'object_name': 'Country'},
'iso2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'numerical_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'vaxapp.countrystock': {
'Meta': {'object_name': 'CountryStock'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'vaccine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Vaccine']"})
},
'vaxapp.countrystockstats': {
'Meta': {'object_name': 'CountryStockStats'},
'actual_cons_rate': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'actual_cons_rate'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'analyzed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'annual_demand': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'annual_demand'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'consumed_in_year': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumed_in_year'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'countrystock': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.CountryStock']"}),
'days_of_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'demand_for_period': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'doses_delivered_this_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'doses_on_orders': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'est_daily_cons': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nine_by_year': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nine_by_year'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'percent_coverage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reference_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'three_by_year': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'three_by_year'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"})
},
'vaxapp.dicty': {
'Meta': {'object_name': 'Dicty'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '160'})
},
'vaxapp.document': {
'Meta': {'object_name': 'Document'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'date_exception': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_process_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_process_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_queued': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_stored': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_uploaded': ('django.db.models.fields.DateTimeField', [], {}),
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_document': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_document': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'vaxapp.keyval': {
'Meta': {'object_name': 'KeyVal'},
'dicty': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Dicty']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'val': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'})
},
'vaxapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Country']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'vaxapp.vaccine': {
'Meta': {'object_name': 'Vaccine'},
'abbr_en': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'abbr_en_alt': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'abbr_fr': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'abbr_fr_alt': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.VaccineGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'})
},
'vaxapp.vaccinegroup': {
'Meta': {'object_name': 'VaccineGroup'},
'abbr_en': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'abbr_fr': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['vaxapp']
|
py | 1a383131e423d8d993b27c8bfcf0de4c37f147d1 | # coding: utf-8
"""
Feed API
<p>The <strong>Feed API</strong> lets sellers upload input files, download reports and files including their status, filter reports using URI parameters, and retrieve customer service metrics task details.</p> # noqa: E501
OpenAPI spec version: v1.3.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InventoryFilterCriteria(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'creation_date_range': 'DateRange',
'modified_date_range': 'DateRange',
'listing_format': 'str',
'listing_status': 'str'
}
attribute_map = {
'creation_date_range': 'creationDateRange',
'modified_date_range': 'modifiedDateRange',
'listing_format': 'listingFormat',
'listing_status': 'listingStatus'
}
def __init__(self, creation_date_range=None, modified_date_range=None, listing_format=None, listing_status=None): # noqa: E501
"""InventoryFilterCriteria - a model defined in Swagger""" # noqa: E501
self._creation_date_range = None
self._modified_date_range = None
self._listing_format = None
self._listing_status = None
self.discriminator = None
if creation_date_range is not None:
self.creation_date_range = creation_date_range
if modified_date_range is not None:
self.modified_date_range = modified_date_range
if listing_format is not None:
self.listing_format = listing_format
if listing_status is not None:
self.listing_status = listing_status
@property
def creation_date_range(self):
"""Gets the creation_date_range of this InventoryFilterCriteria. # noqa: E501
:return: The creation_date_range of this InventoryFilterCriteria. # noqa: E501
:rtype: DateRange
"""
return self._creation_date_range
@creation_date_range.setter
def creation_date_range(self, creation_date_range):
"""Sets the creation_date_range of this InventoryFilterCriteria.
:param creation_date_range: The creation_date_range of this InventoryFilterCriteria. # noqa: E501
:type: DateRange
"""
self._creation_date_range = creation_date_range
@property
def modified_date_range(self):
"""Gets the modified_date_range of this InventoryFilterCriteria. # noqa: E501
:return: The modified_date_range of this InventoryFilterCriteria. # noqa: E501
:rtype: DateRange
"""
return self._modified_date_range
@modified_date_range.setter
def modified_date_range(self, modified_date_range):
"""Sets the modified_date_range of this InventoryFilterCriteria.
:param modified_date_range: The modified_date_range of this InventoryFilterCriteria. # noqa: E501
:type: DateRange
"""
self._modified_date_range = modified_date_range
@property
def listing_format(self):
"""Gets the listing_format of this InventoryFilterCriteria. # noqa: E501
The type of buying option for the order. Supports LMS_ACTIVE_INVENTORY_REPORT. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingFormatEnum'>eBay API documentation</a> # noqa: E501
:return: The listing_format of this InventoryFilterCriteria. # noqa: E501
:rtype: str
"""
return self._listing_format
@listing_format.setter
def listing_format(self, listing_format):
"""Sets the listing_format of this InventoryFilterCriteria.
The type of buying option for the order. Supports LMS_ACTIVE_INVENTORY_REPORT. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingFormatEnum'>eBay API documentation</a> # noqa: E501
:param listing_format: The listing_format of this InventoryFilterCriteria. # noqa: E501
:type: str
"""
self._listing_format = listing_format
@property
def listing_status(self):
"""Gets the listing_status of this InventoryFilterCriteria. # noqa: E501
The status of the listing (whether the listing was unsold or is active). The UNSOLD value does not apply to LMS_ACTIVE_INVENTORY_REPORT feed types. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingStatusEnum'>eBay API documentation</a> # noqa: E501
:return: The listing_status of this InventoryFilterCriteria. # noqa: E501
:rtype: str
"""
return self._listing_status
@listing_status.setter
def listing_status(self, listing_status):
"""Sets the listing_status of this InventoryFilterCriteria.
The status of the listing (whether the listing was unsold or is active). The UNSOLD value does not apply to LMS_ACTIVE_INVENTORY_REPORT feed types. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/feed/types/api:ListingStatusEnum'>eBay API documentation</a> # noqa: E501
:param listing_status: The listing_status of this InventoryFilterCriteria. # noqa: E501
:type: str
"""
self._listing_status = listing_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InventoryFilterCriteria, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InventoryFilterCriteria):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a38319f65043d7368c65c2d0d57a99a0256710b | from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.query import SimpleStatement
import sys
# initializing variable defaults
hostname="127.0.0.1"
username="cassandra"
password="cassandra"
#check arguments for overrides
hostname=sys.argv[1]
username=sys.argv[2]
password=sys.argv[3]
keyspace=sys.argv[4]
table=sys.argv[5]
#key=sys.argv[6]
#value=sys.argv[7]
nodes = []
nodes.append(hostname)
auth_provider = PlainTextAuthProvider(username=username, password=password)
cluster = Cluster(nodes,auth_provider=auth_provider)
session = cluster.connect()
#strCQL = f"SELECT * FROM {keyspace}.{table} WHERE {key} = ?"
strCQL = f"SELECT * FROM {keyspace}.{table}"
print(strCQL)
#statement = session.prepare(strCQL)
statement = SimpleStatement(strCQL,fetch_size=100)
#rows = session.execute(statement,value)
rows = session.execute(statement)
for row in rows:
print(row)
|
py | 1a3831e7d2d0887ef329235b18383f24fc6ddee3 | # _*_ coding: utf-8 _*_
"""
parse.py by xianhu
"""
import logging
import multiprocessing
from .base import TPEnum, BaseThread
from ...utilities import CONFIG_ERROR_MESSAGE, check_url_legal, get_dict_buildin
class ParseThread(BaseThread):
"""
class of ParseThread, as the subclass of BaseThread
"""
def __init__(self, name, worker, pool):
"""
constructor
"""
BaseThread.__init__(self, name, worker, pool)
self._pool_multiprocssing = multiprocessing.Pool()
return
def working(self):
"""
procedure of parsing, auto running, and return True
"""
# ----1----
task_list = [self._pool.get_a_task(TPEnum.HTM_PARSE) for _ in range(max(1, self._pool.get_number_dict(TPEnum.HTM_PARSE_NOT)))]
# ----2----
result_list = [self._pool_multiprocssing.apply_async(self._worker.working, args=task) for task in task_list]
for index in range(len(task_list)):
priority, url, keys, deep, content = task_list[index]
parse_state, url_list, item = result_list[index].get(timeout=None)
# ----3----
self._pool.accept_state_from_task(TPEnum.HTM_PARSE, parse_state, (priority, url, keys, deep, content))
# ----4----
if parse_state > 0:
self._pool.update_number_dict(TPEnum.HTM_PARSE_SUCC, +1)
for _url, _keys, _priority in filter(lambda x: check_url_legal(x[0]), url_list):
self._pool.add_a_task(TPEnum.URL_FETCH, (_priority, _url, _keys, deep+1, 0))
if item:
self._pool.add_a_task(TPEnum.ITEM_SAVE, (priority, url, keys, deep, item))
else:
self._pool.update_number_dict(TPEnum.HTM_PARSE_FAIL, +1)
logging.error("%s error: %s, %s", url_list[0], url_list[1], CONFIG_ERROR_MESSAGE % (priority, get_dict_buildin(keys), deep, url))
# ----5----
self._pool.finish_a_task(TPEnum.HTM_PARSE)
# ----6----
return True
|
py | 1a383365dde1b8508f3ab8dce672db29d3ae7c0c | from deco.sources import Dataset
class Constant(Dataset):
def __init__(self, parent):
self.parent = parent
def __iter__(self):
yield self.parent
def constant(parent):
return Constant(parent)
Dataset.constant = constant |
py | 1a38336bf99000d87a8cddf08eaef0f437909da9 | from flask import Flask
app = Flask(__name__)
app.config.from_object('myproject.config')
from myproject.views import index
|
py | 1a3833ae0991b36ec1a15b80a97f3d29862392ae | X = 99
def f1():
global X
X += 1
def f2():
print(X)
f2()
f1()
print(X) |
py | 1a383662aee7141c9b3c07f0cdf7733e37133331 | r"""
Genomics operations
"""
import collections
import os
import re
from functools import reduce
from itertools import chain
from operator import add
from typing import Any, Callable, List, Mapping, Optional, Union
import anndata
import networkx as nx
import numpy as np
import pandas as pd
import pybedtools
from pybedtools import BedTool
from pybedtools.cbedtools import Interval
from .check import check_deps
from .graph import compose_multigraph, reachable_vertices
from .typehint import RandomState
from .utils import ConstrainedDataFrame, logged, smart_tqdm, get_rs
class Bed(ConstrainedDataFrame):
r"""
BED format data frame
"""
COLUMNS = pd.Index([
"chrom", "chromStart", "chromEnd", "name", "score",
"strand", "thickStart", "thickEnd", "itemRgb",
"blockCount", "blockSizes", "blockStarts"
])
@classmethod
def rectify(cls, df: pd.DataFrame) -> pd.DataFrame:
df = super(Bed, cls).rectify(df)
COLUMNS = cls.COLUMNS.copy(deep=True)
for item in COLUMNS:
if item in df:
if item in ("chromStart", "chromEnd"):
df[item] = df[item].astype(int)
else:
df[item] = df[item].astype(str)
elif item not in ("chrom", "chromStart", "chromEnd"):
df[item] = "."
else:
raise ValueError(f"Required column {item} is missing!")
return df.loc[:, COLUMNS]
@classmethod
def verify(cls, df: pd.DataFrame) -> None:
super(Bed, cls).verify(df)
if len(df.columns) != len(cls.COLUMNS) or np.any(df.columns != cls.COLUMNS):
raise ValueError("Invalid BED format!")
@classmethod
def read_bed(cls, fname: os.PathLike) -> "Bed":
r"""
Read BED file
Parameters
----------
fname
BED file
Returns
-------
bed
Loaded :class:`Bed` object
"""
COLUMNS = cls.COLUMNS.copy(deep=True)
loaded = pd.read_csv(fname, sep="\t", header=None, comment="#")
loaded.columns = COLUMNS[:loaded.shape[1]]
return cls(loaded)
def write_bed(self, fname: os.PathLike, ncols: Optional[int] = None) -> None:
r"""
Write BED file
Parameters
----------
fname
BED file
ncols
Number of columns to write (by default write all columns)
"""
if ncols and ncols < 3:
raise ValueError("`ncols` must be larger than 3!")
df = self.df.iloc[:, :ncols] if ncols else self
df.to_csv(fname, sep="\t", header=False, index=False)
def to_bedtool(self) -> pybedtools.BedTool:
r"""
Convert to a :class:`pybedtools.BedTool` object
Returns
-------
bedtool
Converted :class:`pybedtools.BedTool` object
"""
return BedTool(Interval(
row["chrom"], row["chromStart"], row["chromEnd"],
name=row["name"], score=row["score"], strand=row["strand"]
) for _, row in self.iterrows())
def nucleotide_content(self, fasta: os.PathLike) -> pd.DataFrame:
r"""
Compute nucleotide content in the BED regions
Parameters
----------
fasta
Genomic sequence file in FASTA format
Returns
-------
nucleotide_stat
Data frame containing nucleotide content statistics for each region
"""
result = self.to_bedtool().nucleotide_content(fi=os.fspath(fasta), s=True) # pylint: disable=unexpected-keyword-arg
result = pd.DataFrame(
np.stack([interval.fields[6:15] for interval in result]),
columns=[
r"%AT", r"%GC",
r"#A", r"#C", r"#G", r"#T", r"#N",
r"#other", r"length"
]
).astype({
r"%AT": float, r"%GC": float,
r"#A": int, r"#C": int, r"#G": int, r"#T": int, r"#N": int,
r"#other": int, r"length": int
})
pybedtools.cleanup()
return result
def strand_specific_start_site(self) -> "Bed":
r"""
Convert to strand-specific start sites of genomic features
Returns
-------
start_site_bed
A new :class:`Bed` object, containing strand-specific start sites
of the current :class:`Bed` object
"""
if set(self["strand"]) != set(["+", "-"]):
raise ValueError("Not all features are strand specific!")
df = pd.DataFrame(self, copy=True)
pos_strand = df.query("strand == '+'").index
neg_strand = df.query("strand == '-'").index
df.loc[pos_strand, "chromEnd"] = df.loc[pos_strand, "chromStart"] + 1
df.loc[neg_strand, "chromStart"] = df.loc[neg_strand, "chromEnd"] - 1
return type(self)(df)
def strand_specific_end_site(self) -> "Bed":
r"""
Convert to strand-specific end sites of genomic features
Returns
-------
end_site_bed
A new :class:`Bed` object, containing strand-specific end sites
of the current :class:`Bed` object
"""
if set(self["strand"]) != set(["+", "-"]):
raise ValueError("Not all features are strand specific!")
df = pd.DataFrame(self, copy=True)
pos_strand = df.query("strand == '+'").index
neg_strand = df.query("strand == '-'").index
df.loc[pos_strand, "chromStart"] = df.loc[pos_strand, "chromEnd"] - 1
df.loc[neg_strand, "chromEnd"] = df.loc[neg_strand, "chromStart"] + 1
return type(self)(df)
def expand(
self, upstream: int, downstream: int,
chr_len: Optional[Mapping[str, int]] = None
) -> "Bed":
r"""
Expand genomic features towards upstream and downstream
Parameters
----------
upstream
Number of bps to expand in the upstream direction
downstream
Number of bps to expand in the downstream direction
chr_len
Length of each chromosome
Returns
-------
expanded_bed
A new :class:`Bed` object, containing expanded features
of the current :class:`Bed` object
Note
----
Starting position < 0 after expansion is always trimmed.
Ending position exceeding chromosome length is trimed only if
``chr_len`` is specified.
"""
if upstream == downstream == 0:
return self
df = pd.DataFrame(self, copy=True)
if upstream == downstream: # symmetric
df["chromStart"] -= upstream
df["chromEnd"] += downstream
else: # asymmetric
if set(df["strand"]) != set(["+", "-"]):
raise ValueError("Not all features are strand specific!")
pos_strand = df.query("strand == '+'").index
neg_strand = df.query("strand == '-'").index
if upstream:
df.loc[pos_strand, "chromStart"] -= upstream
df.loc[neg_strand, "chromEnd"] += upstream
if downstream:
df.loc[pos_strand, "chromEnd"] += downstream
df.loc[neg_strand, "chromStart"] -= downstream
df["chromStart"] = np.maximum(df["chromStart"], 0)
if chr_len:
chr_len = df["chrom"].map(chr_len)
df["chromEnd"] = np.minimum(df["chromEnd"], chr_len)
return type(self)(df)
class Gtf(ConstrainedDataFrame): # gffutils is too slow
r"""
GTF format data frame
"""
COLUMNS = pd.Index([
"seqname", "source", "feature", "start", "end",
"score", "strand", "frame", "attribute"
]) # Additional columns after "attribute" is allowed
@classmethod
def rectify(cls, df: pd.DataFrame) -> pd.DataFrame:
df = super(Gtf, cls).rectify(df)
COLUMNS = cls.COLUMNS.copy(deep=True)
for item in COLUMNS:
if item in df:
if item in ("start", "end"):
df[item] = df[item].astype(int)
else:
df[item] = df[item].astype(str)
elif item not in ("seqname", "start", "end"):
df[item] = "."
else:
raise ValueError(f"Required column {item} is missing!")
return df.sort_index(axis=1, key=cls._column_key)
@classmethod
def _column_key(cls, x: pd.Index) -> np.ndarray:
x = cls.COLUMNS.get_indexer(x)
x[x < 0] = x.max() + 1 # Put additional columns after "attribute"
return x
@classmethod
def verify(cls, df: pd.DataFrame) -> None:
super(Gtf, cls).verify(df)
if len(df.columns) < len(cls.COLUMNS) or \
np.any(df.columns[:len(cls.COLUMNS)] != cls.COLUMNS):
raise ValueError("Invalid GTF format!")
@classmethod
def read_gtf(cls, fname: os.PathLike) -> "Gtf":
r"""
Read GTF file
Parameters
----------
fname
GTF file
Returns
-------
gtf
Loaded :class:`Gtf` object
"""
COLUMNS = cls.COLUMNS.copy(deep=True)
loaded = pd.read_csv(fname, sep="\t", header=None, comment="#")
loaded.columns = COLUMNS[:loaded.shape[1]]
return cls(loaded)
def split_attribute(self) -> "Gtf":
r"""
Extract all attributes from the "attribute" column
and append them to existing columns
Returns
-------
splitted
Gtf with splitted attribute columns appended
"""
pattern = re.compile(r'([^\s]+) "([^"]+)";')
splitted = pd.DataFrame.from_records(np.vectorize(lambda x: {
key: val for key, val in pattern.findall(x)
})(self["attribute"]), index=self.index)
if set(self.COLUMNS).intersection(splitted.columns):
self.logger.warning(
"Splitted attribute names overlap standard GTF fields! "
"The standard fields are overwritten!"
)
return self.assign(**splitted)
def to_bed(self, name: Optional[str] = None) -> Bed:
r"""
Convert GTF to BED format
Parameters
----------
name
Specify a column to be converted to the "name" column in bed format,
otherwise the "name" column would be filled with "."
Returns
-------
bed
Converted :class:`Bed` object
"""
bed_df = pd.DataFrame(self, copy=True).loc[
:, ("seqname", "start", "end", "score", "strand")
]
bed_df.insert(3, "name", np.repeat(
".", len(bed_df)
) if name is None else self[name])
bed_df["start"] -= 1 # Convert to zero-based
bed_df.columns = (
"chrom", "chromStart", "chromEnd", "name", "score", "strand"
)
return Bed(bed_df)
def interval_dist(x: Interval, y: Interval) -> int:
r"""
Compute distance and relative position between two bed intervals
Parameters
----------
x
First interval
y
Second interval
Returns
-------
dist
Signed distance between ``x`` and ``y``
"""
if x.chrom != y.chrom:
return np.inf * (-1 if x.chrom < y.chrom else 1)
if x.start < y.stop and y.start < x.stop:
return 0
if x.stop <= y.start:
return x.stop - y.start - 1
if y.stop <= x.start:
return x.start - y.stop + 1
def window_graph(
left: Union[Bed, str], right: Union[Bed, str], window_size: int,
left_sorted: bool = False, right_sorted: bool = False,
attr_fn: Optional[Callable[[Interval, Interval, float], Mapping[str, Any]]] = None
) -> nx.MultiDiGraph:
r"""
Construct a window graph between two sets of genomic features, where
features pairs within a window size are connected.
Parameters
----------
left
First feature set, either a :class:`Bed` object or path to a bed file
right
Second feature set, either a :class:`Bed` object or path to a bed file
window_size
Window size (in bp)
left_sorted
Whether ``left`` is already sorted
right_sorted
Whether ``right`` is already sorted
attr_fn
Function to compute edge attributes for connected features,
should accept the following three positional arguments:
- l: left interval
- r: right interval
- d: signed distance between the intervals
By default no edge attribute is created.
Returns
-------
graph
Window graph
"""
check_deps("bedtools")
if isinstance(left, Bed):
pbar_total = len(left)
left = left.to_bedtool()
else:
pbar_total = None
left = pybedtools.BedTool(left)
if not left_sorted:
left = left.sort(stream=True)
left = iter(left) # Resumable iterator
if isinstance(right, Bed):
right = right.to_bedtool()
else:
right = pybedtools.BedTool(right)
if not right_sorted:
right = right.sort(stream=True)
right = iter(right) # Resumable iterator
attr_fn = attr_fn or (lambda l, r, d: {})
if pbar_total is not None:
left = smart_tqdm(left, total=pbar_total)
graph = nx.MultiDiGraph()
window = collections.OrderedDict() # Used as ordered set
for l in left:
for r in list(window.keys()): # Allow remove during iteration
d = interval_dist(l, r)
if -window_size <= d <= window_size:
graph.add_edge(l.name, r.name, **attr_fn(l, r, d))
elif d > window_size:
del window[r]
else: # dist < -window_size
break # No need to expand window
else:
for r in right: # Resume from last break
d = interval_dist(l, r)
if -window_size <= d <= window_size:
graph.add_edge(l.name, r.name, **attr_fn(l, r, d))
elif d > window_size:
continue
window[r] = None # Placeholder
if d < -window_size:
break
pybedtools.cleanup()
return graph
def dist_power_decay(x: int) -> float:
r"""
Distance-based power decay weight, computed as
:math:`w = {\left( \frac {d + 1000} {1000} \right)} ^ {-0.75}`
Parameters
----------
x
Distance (in bp)
Returns
-------
weight
Decaying weight
"""
return ((x + 1000) / 1000) ** (-0.75)
@logged
def rna_anchored_prior_graph(
rna: anndata.AnnData, *others: anndata.AnnData,
gene_region: str = "combined", promoter_len: int = 2000,
extend_range: int = 0, extend_fn: Callable[[int], float] = dist_power_decay,
signs: Optional[List[int]] = None, propagate_highly_variable: bool = True,
corrupt_rate: float = 0.0, random_state: RandomState = None
) -> nx.MultiDiGraph:
r"""
Build prior regulatory graph anchored on RNA genes
Parameters
----------
rna
Anchor RNA dataset
*others
Other datasets
gene_region
Defines the genomic region of genes, must be one of
``{"gene_body", "promoter", "combined"}``.
promoter_len
Defines the length of gene promoters (bp upstream of TSS)
extend_range
Maximal extend distance beyond gene regions
extend_fn
Distance-decreasing weight function for the extended regions
(by default :func:`dist_power_decay`)
signs
Sign of edges between RNA genes and features in each ``*others``
dataset, must have the same length as ``*others``. Signs must be
one of ``{-1, 1}``. By default, all edges have positive signs of ``1``.
propagate_highly_variable
Whether to propagate highly variable genes to other datasets,
datasets in ``*others`` would be modified in place.
corrupt_rate
**CAUTION: DO NOT USE**, only for evaluation purpose
random_state
**CAUTION: DO NOT USE**, only for evaluation purpose
Returns
-------
graph
Prior regulatory graph
Note
----
In this function, features in the same dataset can only connect to
anchor genes via the same edge sign. For more flexibility, please
construct the prior graph manually.
"""
signs = signs or [1] * len(others)
if len(others) != len(signs):
raise RuntimeError("Length of ``others`` and ``signs`` must match!")
if set(signs).difference({-1, 1}):
raise RuntimeError("``signs`` can only contain {-1, 1}!")
rna_bed = Bed(rna.var.assign(name=rna.var_names))
other_beds = [Bed(other.var.assign(name=other.var_names)) for other in others]
if gene_region == "promoter":
rna_bed = rna_bed.strand_specific_start_site().expand(promoter_len, 0)
elif gene_region == "combined":
rna_bed = rna_bed.expand(promoter_len, 0)
elif gene_region != "gene_body":
raise ValueError("Unrecognized `gene_range`!")
graphs = [window_graph(
rna_bed, other_bed, window_size=extend_range,
attr_fn=lambda l, r, d, s=sign: {
"dist": abs(d), "weight": extend_fn(abs(d)), "sign": s
}
) for other_bed, sign in zip(other_beds, signs)]
graph = compose_multigraph(*graphs)
corrupt_num = round(corrupt_rate * graph.number_of_edges())
if corrupt_num:
rna_anchored_prior_graph.logger.warning("Corrupting prior graph!")
rs = get_rs(random_state)
rna_var_names = rna.var_names.tolist()
other_var_names = reduce(add, [other.var_names.tolist() for other in others])
corrupt_remove = set(rs.choice(graph.number_of_edges(), corrupt_num, replace=False))
corrupt_remove = set(edge for i, edge in enumerate(graph.edges) if i in corrupt_remove)
corrupt_add = []
while len(corrupt_add) < corrupt_num:
corrupt_add += [
(u, v) for u, v in zip(
rs.choice(rna_var_names, corrupt_num - len(corrupt_add)),
rs.choice(other_var_names, corrupt_num - len(corrupt_add))
) if not graph.has_edge(u, v)
]
graph.add_edges_from([
(add[0], add[1], graph.edges[remove])
for add, remove in zip(corrupt_add, corrupt_remove)
])
graph.remove_edges_from(corrupt_remove)
if propagate_highly_variable:
hvg_reachable = reachable_vertices(graph, rna.var.query("highly_variable").index)
for other in others:
other.var["highly_variable"] = [
item in hvg_reachable for item in other.var_names
]
graph = compose_multigraph(graph, graph.reverse())
all_features = set(chain.from_iterable(
map(lambda x: x.var_names, [rna, *others])
))
for item in all_features:
graph.add_edge(item, item, weight=1.0, sign=1)
return graph
def get_chr_len_from_fai(fai: os.PathLike) -> Mapping[str, int]:
r"""
Get chromosome length information from fasta index file
Parameters
----------
fai
Fasta index file
Returns
-------
chr_len
Length of each chromosome
"""
return pd.read_table(fai, header=None, index_col=0)[1].to_dict()
def ens_trim_version(x: str) -> str:
r"""
Trim version suffix from Ensembl ID
Parameters
----------
x
Ensembl ID
Returns
-------
trimmed
Ensembl ID with version suffix trimmed
"""
return re.sub(r"\.[0-9_-]+$", "", x)
# Aliases
read_bed = Bed.read_bed
read_gtf = Gtf.read_gtf
|
py | 1a3836f7ebb6f8b648208725427588f29d772829 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_global_forwarding_rule_info
description:
- Gather info for GCP GlobalForwardingRule
short_description: Gather info for GCP GlobalForwardingRule
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
elements: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
elements: str
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a global forwarding rule
gcp_compute_global_forwarding_rule_info:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
IPAddress:
description:
- The IP address that this forwarding rule is serving on behalf of.
- Addresses are restricted based on the forwarding rule's load balancing scheme
(EXTERNAL or INTERNAL) and scope (global or regional).
- When the load balancing scheme is EXTERNAL, for global forwarding rules, the
address must be a global IP, and for regional forwarding rules, the address
must live in the same region as the forwarding rule. If this field is empty,
an ephemeral IPv4 address from the same scope (global or regional) will be
assigned. A regional forwarding rule supports IPv4 only. A global forwarding
rule supports either IPv4 or IPv6.
- When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP
address belonging to the network/subnet configured for the forwarding rule.
By default, if this field is empty, an ephemeral internal IP address will
be automatically allocated from the IP range of the subnet or network configured
for this forwarding rule.
- 'An address can be specified either by a literal IP address or a URL reference
to an existing Address resource. The following examples are all valid: * 100.1.2.3
* U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address)
* projects/project/regions/region/addresses/address * regions/region/addresses/address
* global/addresses/address * address .'
returned: success
type: str
IPProtocol:
description:
- The IP protocol to which this rule applies. When the load balancing scheme
is INTERNAL_SELF_MANAGED, only TCP is valid.
returned: success
type: str
ipVersion:
description:
- The IP Version that will be used by this global forwarding rule.
returned: success
type: str
loadBalancingScheme:
description:
- This signifies what the GlobalForwardingRule will be used for.
- 'The value of INTERNAL_SELF_MANAGED means that this will be used for Internal
Global HTTP(S) LB. The value of EXTERNAL means that this will be used for
External Global Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)
NOTE: Currently global forwarding rules cannot be used for INTERNAL load balancing.'
returned: success
type: str
metadataFilters:
description:
- Opaque filter criteria used by Loadbalancer to restrict routing configuration
to a limited set xDS compliant clients. In their xDS requests to Loadbalancer,
xDS clients present node metadata. If a match takes place, the relevant routing
configuration is made available to those proxies.
- For each metadataFilter in this list, if its filterMatchCriteria is set to
MATCH_ANY, at least one of the filterLabels must match the corresponding label
provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL,
then all of its filterLabels must match with corresponding labels in the provided
metadata.
- metadataFilters specified here can be overridden by those specified in the
UrlMap that this ForwardingRule references.
- metadataFilters only applies to Loadbalancers that have their loadBalancingScheme
set to INTERNAL_SELF_MANAGED.
returned: success
type: complex
contains:
filterMatchCriteria:
description:
- Specifies how individual filterLabel matches within the list of filterLabels
contribute towards the overall metadataFilter match.
- MATCH_ANY - At least one of the filterLabels must have a matching label
in the provided metadata.
- MATCH_ALL - All filterLabels must have matching labels in the provided
metadata.
returned: success
type: str
filterLabels:
description:
- The list of label value pairs that must match labels in the provided metadata
based on filterMatchCriteria This list must not be empty and can have
at the most 64 entries.
returned: success
type: complex
contains:
name:
description:
- Name of the metadata label. The length must be between 1 and 1024
characters, inclusive.
returned: success
type: str
value:
description:
- The value that the label must match. The value has a maximum length
of 1024 characters.
returned: success
type: str
name:
description:
- Name of the resource; provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
network:
description:
- This field is not used for external load balancing.
- For INTERNAL_SELF_MANAGED load balancing, this field identifies the network
that the load balanced IP should belong to for this global forwarding rule.
If this field is not specified, the default network will be used.
returned: success
type: dict
portRange:
description:
- This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy,
TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.
- Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed
to ports in the specified range will be forwarded to target.
- Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint
port ranges.
- 'Some types of forwarding target have constraints on the acceptable ports:
* TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25,
43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy:
25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway:
500, 4500 .'
returned: success
type: str
target:
description:
- The URL of the target resource to receive the matched traffic.
- The forwarded traffic must be of a type appropriate to the target object.
- For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are
valid.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))}
module.exit_json(**return_value)
def collection(module):
return "https://compute.googleapis.com/compute/v1/projects/{project}/global/forwardingRules".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
return auth.list(link, return_if_object, array_name='items', params={'filter': query})
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
py | 1a383827cc1d135c5917e605f2f2dd512dfd5fda | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, Dict, Optional
from flask import Request
from marshmallow import ValidationError
from superset import cache
from superset.charts.commands.exceptions import (
ChartDataCacheLoadError,
ChartDataQueryFailedError,
)
from superset.charts.schemas import ChartDataQueryContextSchema
from superset.commands.base import BaseCommand
from superset.common.query_context import QueryContext
from superset.exceptions import CacheLoadError
from superset.extensions import async_query_manager
from superset.tasks.async_queries import load_chart_data_into_cache
logger = logging.getLogger(__name__)
class ChartDataCommand(BaseCommand):
def __init__(self) -> None:
self._form_data: Dict[str, Any]
self._query_context: QueryContext
self._async_channel_id: str
def run(self, **kwargs: Any) -> Dict[str, Any]:
# caching is handled in query_context.get_df_payload
# (also evals `force` property)
cache_query_context = kwargs.get("cache", False)
force_cached = kwargs.get("force_cached", False)
try:
payload = self._query_context.get_payload(
cache_query_context=cache_query_context, force_cached=force_cached
)
except CacheLoadError as exc:
raise ChartDataCacheLoadError(exc.message)
# TODO: QueryContext should support SIP-40 style errors
for query in payload["queries"]:
if query.get("error"):
raise ChartDataQueryFailedError(f"Error: {query['error']}")
return_value = {
"query_context": self._query_context,
"queries": payload["queries"],
}
if cache_query_context:
return_value.update(cache_key=payload["cache_key"])
return return_value
def run_async(self, user_id: Optional[str]) -> Dict[str, Any]:
job_metadata = async_query_manager.init_job(self._async_channel_id, user_id)
load_chart_data_into_cache.delay(job_metadata, self._form_data)
return job_metadata
def set_query_context(self, form_data: Dict[str, Any]) -> QueryContext:
self._form_data = form_data
try:
self._query_context = ChartDataQueryContextSchema().load(self._form_data)
except KeyError:
raise ValidationError("Request is incorrect")
except ValidationError as error:
raise error
return self._query_context
def validate(self) -> None:
self._query_context.raise_for_access()
def validate_async_request(self, request: Request) -> None:
jwt_data = async_query_manager.parse_jwt_from_request(request)
self._async_channel_id = jwt_data["channel"]
def load_query_context_from_cache( # pylint: disable=no-self-use
self, cache_key: str
) -> Dict[str, Any]:
cache_value = cache.get(cache_key)
if not cache_value:
raise ChartDataCacheLoadError("Cached data not found")
return cache_value["data"]
|
py | 1a38386d1155afe493637581ee27a5dbaba80e2a | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default value constants exposed by core utilities."""
DEFAULT_REGISTRY = 'gcr.io'
REGIONAL_REGISTRIES = ['us.gcr.io', 'eu.gcr.io', 'asia.gcr.io']
BUCKET_REGISTRIES = ['b.gcr.io', 'bucket.gcr.io']
APPENGINE_REGISTRY = 'appengine.gcr.io'
SPECIALTY_REGISTRIES = BUCKET_REGISTRIES + [APPENGINE_REGISTRY]
ALL_SUPPORTED_REGISTRIES = ([DEFAULT_REGISTRY] + REGIONAL_REGISTRIES
+ SPECIALTY_REGISTRIES)
DEFAULT_DEVSHELL_IMAGE = (DEFAULT_REGISTRY +
'/dev_con/cloud-dev-common:prod')
METADATA_IMAGE = DEFAULT_REGISTRY + '/google_appengine/faux-metadata:latest'
|
py | 1a383a7356061a22f0947aff00da811598148748 | from os import path
from splashgen import MetaTags, SplashSite, launch
from splashgen.integrations import MailchimpSignup
site = SplashSite(title="ZenWeb – Python Internal Web Apps",
logo=path.join(path.dirname(__file__), "zenweb-logo.png"),
theme="dark")
site.headline = "Effortless internal tools for your backend services"
site.subtext = """
Write simple code that plugs directly into your infrastructure, and let ZenWeb
turn it into a web app that anyone on your team can use.
Stop getting pinged every time an on-call engineer needs a script run,
and start automating your domain expertise.
"""
site.meta = MetaTags(title=site.headline,
description="Automate your domain expertise. Sign up to join our pilot program!",
image="https://t3dmedia.s3.amazonaws.com/_notvideos/zwbg.png",
canonical_url="https://zenweb.dev")
site.call_to_action = MailchimpSignup(
"http://eepurl.com/hw4od9", button_text="Join our pilot")
launch(site)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.