ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a42f9c9c3d0c6764d238cbf79068d075f73fbd6 | import codecs
import binascii
def xor(bytes, value):
result = []
for i in range(len(bytes)):
result.append(bytes[i] ^ value)
return bytearray(result)
# ascii a-z characters are 97-122, or 0x61-0x7a
# capitals, spaces
# if many characters outside that range, consider it garbage
def englishness(bytes):
count = 0
for i in bytes:
if is_garbage(i):
count+=1
return len(bytes)-count
def is_garbage(byte):
if byte == 32: return False #space
if byte >= 65 and byte <= 90: return False #uppercase
if byte >= 97 and byte <= 122: return False #lowercase
return True
input = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
print(input)
bytes = codecs.decode(input, 'hex')
candidates = []
for i in range(255):
decoded = xor(bytes, i)
score = englishness(decoded)
if score > 20:
decoded_string = ''.join(chr(i) for i in decoded)
candidates.append((score, decoded))
candidates.sort(key = lambda tup: tup[0])
for i in range(len(candidates)):
print(candidates[i][1]) # last one printed is most likely
"""
bytes = xor(bytes, 3)
print(binascii.hexlify(bytearray(bytes)))
bytes = xor(bytes, 3)
print(binascii.hexlify(bytearray(bytes)))
"""
|
py | 1a42f9f29a4e0cbef4a36c30b8a2f41bba37c1c9 | # -*- coding: utf-8 -*-
"""
overholt.api.products
~~~~~~~~~~~~~~~~~~~~~
Product endpoints
"""
from flask import Blueprint, request
from ..forms import NewProductForm, UpdateProductForm
from ..services import products
from . import OverholtFormError, route
bp = Blueprint('products', __name__, url_prefix='/products')
@route(bp, '/')
def list():
"""Returns a list of product instances."""
return products.all()
@route(bp, '/', methods=['POST'])
def create():
"""Creates a new product. Returns the new product instance."""
form = NewProductForm()
if form.validate_on_submit():
return products.create(**request.json)
raise OverholtFormError(form.errors)
@route(bp, '/<product_id>')
def show(product_id):
"""Returns a product instance."""
return products.get_or_404(product_id)
@route(bp, '/<product_id>', methods=['PUT'])
def update(product_id):
"""Updates a product. Returns the updated product instance."""
form = UpdateProductForm()
if form.validate_on_submit():
return products.update(products.get_or_404(product_id), **request.json)
raise(OverholtFormError(form.errors))
@route(bp, '/<product_id>', methods=['DELETE'])
def delete(product_id):
"""Deletes a product. Returns a 204 response."""
products.delete(products.get_or_404(product_id))
return None, 204
|
py | 1a42fa24a056e56b37b79837f74152bb5c134327 | from ryu.tus.const import const
from random import shuffle, random
import time
def intersect_set(set1, set2):
# TODO
# complete __equal__ for OFPMatch
set1_flat = [y for x in set1 for y in x]
set2_flat = [y for x in set2 for y in x]
res = []
for s1 in set1_flat:
for s2 in set2_flat:
if s1['dp'].id == s2['dp'].id:
#if s1 == s2:
res.append(s1)
return res
class Transaction(object):
# TODO
# everything...
def __init__(self, tx_id):
self.tx_id = tx_id
self.state = const.READ
self.read_set = {}
self.write_set = [[]]
self.barrier_set = []
self.conflict = []
def read(self, key, value):
self.read_set[key] = value
def write(self, dp, match, action):
self.write_set[len(self.barrier_set)].append(
{
'dp': dp,
'match': match,
'action': action,
}
)
def barrier(self, dp):
self.write_set.append([])
self.barrier_set.append(dp)
def execute(self):
print('Execute!')
print(self.write_set)
print(self.barrier_set)
for phase in range(len(self.barrier_set) + 1):
shuffle(self.write_set[phase])
for ac_write in self.write_set[phase]:
print('Do: ', ac_write)
# TODO
# OFP v1.2
time.sleep(random() / 100.0)
dp = ac_write['dp']
ofp_parser = dp.ofproto_parser
if ac_write['action']['name'] == 'OFPFlowMod':
req = ofp_parser.OFPFlowMod(
dp,
match=ac_write['match'],
**ac_write['action']['kwargs'],
)
dp.send_msg(req)
elif ac_write['action']['name'] == 'OFPPortMod':
req = ofp_parser.OFPPortMod(
dp,
**ac_write['action']['kwargs'],
)
dp.send_msg(req)
elif ac_write['action']['name'] == 'OFPPacketOut':
req = ofp_parser.OFPPacketOut(
dp,
**ac_write['action']['kwargs']
)
dp.send_msg(req)
if phase != len(self.barrier_set):
dp = self.barrier_set[phase]
ofp_parser = dp.ofproto_parser
req = ofp_parser.OFPBarrierRequest(dp)
dp.send_msg(req)
'''
Todo:
1. OFPBarrierRequest semantics, references
2. Merge NIB
3. Action, implement as you need
4. Why does RYU not support write then read? Read is asynchronized, implementation is hard.
5. More scenario: reproduce existing ones, read references and find more.
''' |
py | 1a42fb81e2ee8bbb150bd2e0a3ad41fc2bb9c34c | # Copyright 2014 Tata Consultancy Services Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# @author: Carl Baldwin, Hewlett-Packard
import pbr.version
__version__ = pbr.version.VersionInfo('python-vnfsvcclient').version_string()
|
py | 1a42fc0f04d23fc07f3e8068c7f9278faa4e8d46 | import re
import math
from ..exception import PygpException
from .vertex import Vertex
from .halfedge import Halfedge
from .face import Face
from .objmesh import ObjMesh
class TriMesh(object):
def __init__(self, filename=''):
self.vertices = []
self.halfedges = []
self.faces = []
self.indices = []
if filename != '':
self.load(filename)
def load(self, filename):
obj = ObjMesh(filename)
unique_vertices = {}
for i in obj.indices:
vx = obj.vertices[i * 3 + 0]
vy = obj.vertices[i * 3 + 1]
vz = obj.vertices[i * 3 + 2]
v = (vx, vy, vz)
if v not in unique_vertices:
unique_vertices[v] = len(self.vertices)
self.vertices.append(Vertex(v[0], v[1], v[2]))
self.vertices[-1].index = unique_vertices[v]
self.indices.append(unique_vertices[v])
self._make_halfedge()
def save(self, filename):
with open(filename, 'w') as fp:
for v in self.vertices:
fp.write('v {0:.6f} {1:.6f} {2:.6f}\n'.format(v.x, v.y, v.z))
for i in range(0, len(self.indices), 3):
i0 = self.indices[i + 0] + 1
i1 = self.indices[i + 1] + 1
i2 = self.indices[i + 2] + 1
fp.write('f {0} {1} {2}\n'.format(i0, i1, i2))
def n_vertices(self):
return len(self.vertices)
def n_faces(self):
return len(self.faces)
def collapse_halfedge(self, v_from, v_to, update_position=None):
if v_from.degree() <= 3 or v_to.degree() <= 3:
raise PygpException('Invalid collapse operation!')
# Find target halfedge
target_halfedge = None
for he in v_from.halfedges():
if he.vertex_from is v_from and he.vertex_to is v_to:
target_halfedge = he
break
if target_halfedge is None:
raise PygpException('Specified halfedge does not exist!')
reverse_halfedge = target_halfedge.opposite
# Update v_to's halfedge
target_halfedge.vertex_to.halfedge = target_halfedge.next.opposite.next
# Update halfedges of surrounding vertices
if target_halfedge.face is not None:
target_halfedge.next.vertex_to.halfedge = target_halfedge.next.opposite
if reverse_halfedge.face is not None:
reverse_halfedge.next.vertex_to.halfedge = reverse_halfedge.next.opposite
# Update topology
if target_halfedge.face is not None:
he0 = target_halfedge.next.opposite
he1 = target_halfedge.next.next.opposite
he0.opposite, he1.opposite = he1, he0
if reverse_halfedge.face is not None:
he2 = reverse_halfedge.next.opposite
he3 = reverse_halfedge.next.next.opposite
he2.opposite, he3.opposite = he3, he2
# Update topology for boundary vertices
if reverse_halfedge.face is None:
for he in target_halfedge.vertex_to.halfedges():
if he.opposite.next is reverse_halfedge:
he.opposite.next = reverse_halfedge.next
break
if target_halfedge.face is None:
for he in reverse_halfedge.vertex_to.halfedges():
if he.opposite.next is target_halfedge:
he.opposite.next = target_halfedge.next
break
for he in target_halfedge.vertex_to.halfedges():
he.vertex_from = target_halfedge.vertex_to
he.opposite.vertex_to = target_halfedge.vertex_to
# Delete/update vertex
self.vertices[target_halfedge.vertex_from.index] = None
if update_position is not None:
self.vertices[target_halfedge.vertex_to.index].position = update_position
# Remove faces
if target_halfedge.face is not None:
self.faces[target_halfedge.face.index] = None
if reverse_halfedge.face is not None:
self.faces[reverse_halfedge.face.index] = None
# Delete halfedge
self.halfedges[target_halfedge.index] = None
self.halfedges[reverse_halfedge.index] = None
def flip_halfedge(self, he):
rev = he.opposite
if rev.face is None:
raise PygpException('Flip method is called for boundary halfedge!')
# Get surronding vertices, halfedges and faces
v0 = he.vertex_to
v1 = he.next.vertex_to
v2 = rev.next.vertex_to
v3 = rev.vertex_to
he0 = he.next
he1 = he.next.next
he2 = rev.next.next
he3 = rev.next
f0 = he.face
f1 = rev.face
# Update halfedges of to/from vertices
v0.halfedge = he0
v3.halfedge = he3
# Update halfedge's source and destination
he.vertex_from = v1
he.vertex_to = v2
rev.vertex_from = v2
rev.vertex_to = v1
# Update face circulation
he.next = he2
he2.next = he0
he0.next = he
rev.next = he1
he1.next = he3
he3.next = rev
# Update faces
f0.halfedge = he
he.face = f0
he2.face = f0
he0.face = f0
f1.halfedge = rev
rev.face = f1
he1.face = f1
he3.face = f1
def clean(self):
# Compute new vertex indices
count = 0
new_index_table = [ 0 ] * self.n_vertices()
for i, v in enumerate(self.vertices):
new_index_table[i] = count
if v is not None:
count += 1
# Update vertex array
self.vertices = [ v for v in self.vertices if v is not None ]
for i, v in enumerate(self.vertices):
v.index = i
# Update halfedge array
self.halfedges = [ he for he in self.halfedges if he is not None ]
for i, he in enumerate(self.halfedges):
he.index = i
self.faces = [ f for f in self.faces if f is not None ]
for i, f in enumerate(self.faces):
f.index = i
self.indices = [ -1 ] * (len(self.faces) * 3)
for i, f in enumerate(self.faces):
vs = list(f.vertices())
assert len(vs) == 3
self.indices[i * 3 + 0] = vs[0].index
self.indices[i * 3 + 1] = vs[1].index
self.indices[i * 3 + 2] = vs[2].index
assert vs[0].index < len(self.vertices)
assert vs[1].index < len(self.vertices)
assert vs[2].index < len(self.vertices)
def _make_halfedge(self):
table = [ [] for i in range(len(self.vertices)) ]
self.halfedges.clear()
self.faces.clear()
for i in range(0, len(self.indices), 3):
he0 = Halfedge()
he1 = Halfedge()
he2 = Halfedge()
he0.vertex_from = self.vertices[self.indices[i + 0]]
he1.vertex_from = self.vertices[self.indices[i + 1]]
he2.vertex_from = self.vertices[self.indices[i + 2]]
he0.vertex_to = self.vertices[self.indices[i + 1]]
he1.vertex_to = self.vertices[self.indices[i + 2]]
he2.vertex_to = self.vertices[self.indices[i + 0]]
assert he0.vertex_from.index != he0.vertex_to.index
assert he1.vertex_from.index != he1.vertex_to.index
assert he2.vertex_from.index != he2.vertex_to.index
he0.next = he1
he1.next = he2
he2.next = he0
self.vertices[self.indices[i + 0]].halfedge = he0
self.vertices[self.indices[i + 1]].halfedge = he1
self.vertices[self.indices[i + 2]].halfedge = he2
face = Face()
face.halfedge = he0
he0.face = face
he1.face = face
he2.face = face
self.halfedges.extend([ he0, he1, he2 ])
self.faces.append(face)
table[self.vertices[self.indices[i + 0]].index].append(he0)
table[self.vertices[self.indices[i + 1]].index].append(he1)
table[self.vertices[self.indices[i + 2]].index].append(he2)
# Set opposite halfedges
for he0 in self.halfedges:
for he1 in table[he0.vertex_to.index]:
if he0.vertex_from == he1.vertex_to and \
he1.vertex_from == he0.vertex_to:
he0.opposite = he1
he1.opposite = he0
break
# Opposite halfedge not found
# Mark vertices as border vertices
if he0.opposite is None:
he0.vertex_from.is_boundary = True
he0.vertex_to.is_boundary = True
he1 = Halfedge()
he1.vertex_from = he0.vertex_to
he1.vertex_to = he0.vertex_from
he1.opposite = he0
he0.opposite = he1
he1.vertex_from.halfedge = he1
self.halfedges.append(he1)
# Process border vertices
for v in self.vertices:
if v.is_boundary:
he = v.halfedge
while True:
if he.opposite.next is None:
he.opposite.next = v.halfedge
break
he = he.opposite.next
for i, he in enumerate(self.halfedges):
he.index = i
for i, f in enumerate(self.faces):
f.index = i
def verify(self):
for v in self.vertices:
if v is None:
continue
if v.index < 0:
return False
if v.halfedge is None:
return False
for he in self.halfedges:
if he is None:
continue
if he.index < 0:
return False
if he.vertex_from is None or he.vertex_to is None:
return False
if he.next is None:
return False
if he.opposite is None:
return False
if he.face is None:
return False
for f in self.faces:
if f is None:
continue
if f.index < 0:
return False
if f.halfedge is None:
return False
return True
def clear(self):
self.vertices.clear()
self.halfedges.clear()
self.faces.clear()
self.indices.clear()
|
py | 1a42fc992249696b451487e46cd040bb3a01ec9f | # From https://github.com/yunjey/stargan-v2-demo/tree/master/core
import torch
import torch.nn as nn
class MappingNetwork(nn.Module):
"""Mapping network: (latent z, domain y) -> (style s)."""
def __init__(self, latent_dim=64, style_dim=64, num_domains=2):
super(MappingNetwork, self).__init__()
self.num_domains = num_domains
hidden_dim = 512
self.shared = nn.Sequential(
nn.Linear(latent_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU())
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared.append(
nn.Linear(hidden_dim, style_dim))
def forward(self, z, y):
"""
Inputs:
- z: latent vectors of shape (batch, latent_dim).
- y: domain labels of shape (batch).
Output:
- s: style vectors of shape (batch, style_dim).
"""
#z = z / torch.norm(z, p=2, dim=1, keepdim=True)
#z = z / (torch.sqrt(torch.mean(z**2, dim=1, keepdim=True)) + 1e-8)
h = self.shared(z)
outs = []
for i in range(self.num_domains):
out = self.unshared[i](h) # (batch, style_dim)
outs.append(out)
out = torch.stack(outs, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
#print('F_s: ', torch.mean(torch.var(s, dim=0, unbiased=False)))
return s |
py | 1a42fd23178de8659a3f0fbef6ca2f5dd147c7f5 | import numpy as np
np.random.seed(0)
class Initialization:
zeros_initialization = None
def _zeros_initialization(n_units: int, n_in: int):
W = np.zeros((n_units, n_in))
b = np.zeros((n_units, 1))
return W, b
def _weights_initialization(n_units, n_in):
# multiplying W by a small number makes the learning fast
# however from a practical point of view when multiplied by 0.01 using l>2 the NN does not converge
# that is because it runs into gradients vanishing problem
W = np.random.randn(n_units, n_in) * 0.01
b = np.zeros((n_units, 1))
return W, b
def _He_initialization(n_units, n_in):
""" Goes better with ReLU (a generalization this initializer is called variance_scaling_initializer)
:param n_units:
:param n_in:
:return:
"""
W = np.random.randn(n_units, n_in) * np.sqrt(2 / n_in)
b = np.zeros((n_units, 1))
return W, b
def _Xavier_initialization(n_units, n_in):
"""Initialize weight W using Xavier Initialization (also known as Glorot Initialization)
So if the input features of activations are roughly mean 0 and standard variance and variance 1 then this would
cause z to also take on a similar scale and this doesn't solve, but it definitely helps reduce the vanishing,
exploding gradients problem because it's trying to set each of the weight matrices W so that it's not
too much bigger than 1 and not too much less than 1 so it doesn't explode or vanish too quickly.
P.S. Goes better with Sigmoid and Softmax and tanh
"""
W = np.random.randn(n_units, n_in) * np.sqrt(1 / n_in)
b = np.zeros((n_units, 1))
return W, b
def _Benjio_initialization(n_units, n_in):
W = np.random.randn(n_units, n_in) * np.sqrt(2 / (n_in + n_units))
b = np.zeros((n_units, 1))
return W, b
if __name__ == '__main__':
pass
|
py | 1a42fdfe57f4a136473a699346d339a7b774fe22 | from cryptography.exceptions import InvalidSignature, InvalidKey
from django.core.handlers.wsgi import WSGIRequest
from django.http import JsonResponse
from pyattest.exceptions import PyAttestException, InvalidNonceException, InvalidCertificateChainException, \
InvalidKeyIdException, ExtensionNotFoundException
from dreiattest.exceptions import DreiAttestException, UnsupportedEncryptionException, NoKeyForSessionException
relevant_base = (PyAttestException, DreiAttestException, InvalidSignature, InvalidKey)
nonce_mismatch = (InvalidNonceException,)
invalid_key = (InvalidCertificateChainException, InvalidKeyIdException, UnsupportedEncryptionException,
ExtensionNotFoundException, InvalidSignature, InvalidKey, NoKeyForSessionException)
class HandleDreiattestExceptionsMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
def process_exception(self, request: WSGIRequest, exception: Exception):
if isinstance(exception, relevant_base):
return self.handle(request, exception)
def handle(self, request: WSGIRequest, exception: Exception):
code = exception.__class__.__name__
if code.endswith('Exception'):
code = code[:-9]
response = JsonResponse(data={'code': code}, status=403)
response['Dreiattest-error'] = self.get_header(exception)
return response
def get_header(self, exception: Exception) -> str:
""" Set some custom headers for the mobile clients. """
if isinstance(exception, nonce_mismatch):
return 'dreiAttest_nonce_mismatch'
if isinstance(exception, invalid_key):
return 'dreiAttest_invalid_key'
return 'dreiAttest_policy_violation'
|
py | 1a42feee48a55ef8b8565124b41aa5409c551df4 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
# TODO: improve compliance with handlebars spec and split in separate module (initially based on pybars, but need to change)
from functools import partial
import re
from netforce.pymeta.grammar import OMeta
import collections
from netforce import database
from netforce.model import get_model, fields, BrowseList
from netforce.locale import _
import datetime
import time
from netforce import access
import json
import tempfile
try:
import barcode
from barcode.writer import ImageWriter
except:
barcode = None
print("WARNING: pyBarcode not installed")
import math
import os
from pprint import pprint
from xml.sax import saxutils
handlebars_grammar = r"""
template ::= (<text> | <templatecommand>)*:body => ['template'] + body
text ::= (~(<start>) <anything>)+:text => ('literal', ''.join(text))
other ::= <anything>:char => ('literal', char)
templatecommand ::= <blockrule>
| <comment>
| <escapedexpression>
| <expression>
| <partial>
start ::= '{' '{'
finish ::= '}' '}'
comment ::= <start> '!' (~(<finish>) <anything>)* <finish> => ('comment', )
space ::= ' '|'\t'|'\r'|'\n'
arguments ::= (<space>+ (<kwliteral>|<literal>|<path>))*:arguments => arguments
expression_inner ::= <spaces> <path>:p <arguments>:arguments <spaces> <finish> => (p, arguments)
expression ::= <start> '{' <expression_inner>:e '}' => ('expand', ) + e
| <start> '&' <expression_inner>:e => ('expand', ) + e
escapedexpression ::= <start> <expression_inner>:e => ('escapedexpand', ) + e
block_inner ::= <spaces> <symbol>:s <arguments>:args <spaces> <finish>
=> (''.join(s), args)
alt_inner ::= <spaces> ('^' | 'e' 'l' 's' 'e') <spaces> <finish>
partial ::= <start> '>' <block_inner>:i => ('partial',) + i
path ::= ~('/') <pathseg>+:segments => ('path', segments)
kwliteral ::= <symbol>:s '=' (<literal>|<path>):v => ('kwparam', s, v)
literal ::= (<string>|<integer>|<boolean>):thing => ('literalparam', thing)
string ::= '"' <notquote>*:ls '"' => '"' + ''.join(ls) + '"'
integer ::= <digit>+:ds => int(''.join(ds))
boolean ::= <false>|<true>
false ::= 'f' 'a' 'l' 's' 'e' => False
true ::= 't' 'r' 'u' 'e' => True
notquote ::= <escapedquote> | (~('"') <anything>)
escapedquote ::= '\\' '"' => '\\"'
symbol ::= ~<alt_inner> '['? (<letterOrDigit>|'-'|'@')+:symbol ']'? => ''.join(symbol)
pathseg ::= <symbol>
| '/' => ''
| ('.' '.' '/') => '__parent'
| '.' => ''
pathfinish :expected ::= <start> '/' <path>:found ?(found == expected) <finish>
symbolfinish :expected ::= <start> '/' <symbol>:found ?(found == expected) <finish>
blockrule ::= <start> '#' <block_inner>:i
<template>:t <alttemplate>:alt_t <symbolfinish i[0]> => ('block',) + i + (t, alt_t)
| <start> '^' <block_inner>:i
<template>:t <symbolfinish i[0]> => ('invertedblock',) + i + (t,)
alttemplate ::= (<start> <alt_inner> <template>)?:alt_t => alt_t or []
"""
compile_grammar = """
compile ::= <prolog> <rule>* => builder.finish()
prolog ::= "template" => builder.start()
compile_block ::= <prolog_block> <rule>* => builder.finish_block()
prolog_block ::= "template" => builder.start_block()
rule ::= <literal>
| <expand>
| <escapedexpand>
| <comment>
| <block>
| <invertedblock>
| <partial>
block ::= [ "block" <anything>:symbol [<arg>*:arguments] [<compile_block>:t] [<compile_block>?:alt_t] ] => builder.add_block(symbol, arguments, t, alt_t)
comment ::= [ "comment" ]
literal ::= [ "literal" :value ] => builder.add_literal(value)
expand ::= [ "expand" <path>:value [<arg>*:arguments]] => builder.add_expand(value, arguments)
escapedexpand ::= [ "escapedexpand" <path>:value [<arg>*:arguments]] => builder.add_escaped_expand(value, arguments)
invertedblock ::= [ "invertedblock" <anything>:symbol [<arg>*:arguments] [<compile>:t] ] => builder.add_invertedblock(symbol, arguments, t)
partial ::= ["partial" <anything>:symbol [<arg>*:arguments]] => builder.add_partial(symbol, arguments)
path ::= [ "path" [<pathseg>:segment]] => ("simple", segment)
| [ "path" [<pathseg>+:segments] ] => ("complex", 'resolve(context, "' + '","'.join(segments) + '")' )
simplearg ::= [ "path" [<pathseg>+:segments] ] => 'resolve(context, "' + '","'.join(segments) + '")'
| [ "literalparam" <anything>:value ] => str(value)
arg ::= [ "kwparam" <anything>:symbol <simplearg>:a ] => str(symbol) + '=' + a
| <simplearg>
pathseg ::= "/" => ''
| "." => ''
| "" => ''
| "this" => ''
pathseg ::= <anything>:symbol => ''.join(symbol)
"""
compile_grammar = compile_grammar.format()
class strlist(list):
def __str__(self):
return ''.join(self)
def grow(self, thing):
if type(thing) == str:
self.append(thing)
else:
for element in thing:
self.grow(element)
_map = {
'&': '&',
'"': '"',
"'": ''',
'`': '`',
'<': '<',
'>': '>',
}
def substitute(match, _map=_map):
return _map[match.group(0)]
_escape_re = re.compile(r"&|\"|'|`|<|>")
def escape(something, _escape_re=_escape_re, substitute=substitute):
return _escape_re.sub(substitute, something)
class Scope:
def __init__(self, context, parent, data=None):
self.context = context
self.parent = parent
if parent and isinstance(parent,Scope):
self.data=parent.data
else:
self.data={}
if data:
self.data.update(data)
def get(self, name, default=None):
if name == '__parent':
return self.parent
elif name == 'this':
return self.context
elif name.startswith("@"):
return self.data.get(name[1:])
result = self.context.get(name, self)
if result is not self:
return result
return default
__getitem__ = get
def __str__(self):
return str(self.context)
def resolve(context, *segments):
# print("resolve",segments)
for segment in segments:
if context is None:
return None
if segment in (None, ""):
continue
if type(context) in (list, tuple):
offset = int(segment)
try:
context = context[offset]
except:
context = None
else:
if isinstance(segment, str) and segment.isdigit():
segment = int(segment)
context = context.get(segment)
return context
def _paginate(this, options, data, limit=None, offset=None, url=None):
if not data:
return options['inverse'](this)
if limit is None:
limit = 10
if offset is None:
offset = 0
count = len(data)
page_no = math.floor(offset / limit) + 1
num_pages = math.floor((count + limit - 1) / limit)
paginate = {
"data": data[offset:offset + limit],
"limit": limit,
"offset": offset,
"count": count,
"item_first": offset + 1,
"item_last": min(offset + limit, count),
"page_no": page_no,
"num_pages": num_pages,
"parts": [],
}
if url:
base_url = re.sub("&offset=\d+", "", url) # XXX
else:
base_url = ""
if base_url.find("?")==-1: # XXX
base_url+="?"
if page_no > 1:
p = page_no - 1
o = (p - 1) * limit
paginate["previous"] = {
"page_no": p,
"url": base_url + "&offset=%d" % o if base_url else None,
}
if page_no < num_pages:
p = page_no + 1
o = (p - 1) * limit
paginate["next"] = {
"page_no": p,
"url": base_url + "&offset=%d" % o if base_url else None,
}
if num_pages > 1:
first_part_page_no = max(1, page_no - 2)
last_part_page_no = min(num_pages, page_no + 1)
for p in range(first_part_page_no, last_part_page_no + 1):
o = (p - 1) * limit
part = {
"page_no": p,
"active": p == page_no,
"url": base_url + "&offset=%d" % o if base_url else None,
}
paginate["parts"].append(part)
scope = Scope({"paginate": paginate}, this)
return options['fn'](scope)
def _each(this, options, context, order=None, offset=None, limit=None):
if not context:
return None
result = strlist()
i = 0
if order:
if len(order.split(" ")) == 2:
if order.split(" ")[1] == "desc":
context2 = sorted(context, key=lambda x: x[order.split(" ")[0]])[::-1]
else:
context2 = sorted(context, key=lambda x: x[order])
else:
context2 = context
if offset:
context2=context2[offset:]
if limit:
context2=context2[:limit]
for ctx in context2:
data={}
if isinstance(context2, (list, BrowseList)):
data['index'] = i
data['item_no'] = i+1
data['is_last'] = i == len(context2) - 1
if isinstance(context2, dict):
data['key'] = ctx
scope = Scope(ctx, this, data=data)
result.grow(options['fn'](scope))
i += 1
return result
def _if(this, options, context):
if isinstance(context, collections.Callable):
context = context(this)
if context:
return options['fn'](this)
else:
return options['inverse'](this)
def _log(this, context):
log(context)
def _unless(this, options, context):
if not context:
return options['fn'](this)
def _blockHelperMissing(this, options, context):
if isinstance(context, collections.Callable):
context = context(this)
if context != "" and not context:
return options['inverse'](this)
if type(context) in (list, strlist, tuple):
return _each(this, options)
if context is True:
callwith = this
else:
callwith = context
return options['fn'](callwith)
def _helperMissing(scope, name, *args):
if not args:
return None
raise Exception("Could not find property %s" % (name,))
def _with(this, options, context):
if context:
scope = Scope(context, this)
return options['fn'](scope)
else:
return options['inverse'](this)
def _file_path(this, context, thumbnail=None):
if context is None:
return ""
try:
dbname = database.get_active_db()
if thumbnail:
basename, ext = os.path.splitext(context)
basename2, _, rand = basename.rpartition(",")
fname = basename2 + "-resize-256," + rand + ext
else:
fname = context
return "/static/db/" + dbname + "/files/" + fname
except:
return ""
def _currency(this, context, nogroup=False, zero=None):
if context is None:
return ""
try:
val = float(context) # in case string
if zero is not None and abs(val) < 0.0001:
return zero
val = "{:0,.2f}".format(val)
if nogroup:
val = val.replace(",", "")
return val
except:
return ""
def _compare(this, options, val1, val2, operator="="):
if operator == "=":
res = val1 == val2
elif operator == "!=":
res = val1 == val2
elif operator == "<=":
res = val1 <= val2
elif operator == ">=":
res = val1 >= val2
elif operator == "<":
res = val1 < val2
elif operator == ">":
res = val1 > val2
elif operator == "in":
res = val1 in val2
elif operator == "not in":
res = val1 not in val2
else:
raise Exception("Invalid operator: '%s'" % operator)
if res:
return options['fn'](this)
else:
return options['inverse'](this)
def _ifeq(this, options, val1, val2):
if val1 == val2:
return options['fn'](this)
else:
return options['inverse'](this)
def _change_lang_url(this, lang): # FIXME
return "/ecom_index?set_lang=%s" % lang
def _if_match(this, options, val, pattern):
if not val:
val = ""
exp = pattern.replace("%", ".*")
if re.match(exp, val):
return options['fn'](this)
else:
return options['inverse'](this)
def _first(this, options, items):
if not items:
return ""
item = items[0]
return options['fn'](item)
def _after_first(this, options, items):
html = strlist()
for item in items[1:]:
html.grow(options["fn"](item))
return html
def _translate(this, val):
return _(val)
def _padding(this, val):
if not val:
return ""
return "-" + " " * int(val / 10) # XXX
def remove_zeros(s):
z = 0
while s[-1 - z] == "0":
z += 1
if s[-1 - z] == ".":
z += 1
if z:
s = s[:-z]
return s
def _fmt_ths_qty(this, val):
if val is None:
return ""
return "{:0,.0f}".format(val)
def _fmt_qty(this, val):
if val is None:
return ""
try:
val = float(val) # in case string
return remove_zeros("%.6f" % val)
except:
return "ERR"
def _fmt_number(this, val):
if val is None:
return ""
try:
val = float(val) # in case string
return remove_zeros("%.6f" % val)
except:
return "ERR"
def _filename(this, val):
if val is None:
return ""
try:
name, ext = os.path.splitext(val)
name2 = name.rsplit(",")[0]
return name2 + ext
except:
return val
def _lookup(this, o, *inds):
v = resolve(o, *inds)
if not v:
return ""
return str(v)
def _if_lookup(this, options, o, *inds):
v = resolve(o, *inds)
if v:
return options['fn'](this)
else:
return options['inverse'](this)
def _unless_lookup(this, options, o, *inds):
v = resolve(o, *inds)
if not v:
return options['fn'](this)
def _length(this, val):
if val is None:
return ""
return len(val)
def _unless_eq(this, options, val1, val2):
if val1 != val2:
return options['fn'](this)
def _ldelim(this):
return "{{"
def _rdelim(this):
return "}}"
def _fmt_date(this, val, fmt=None):
if not val:
return None
try:
d = datetime.datetime.strptime(val[:10], "%Y-%m-%d")
settings = get_model("settings").browse(1) # FIXME: speed
if not fmt:
fmt = settings.date_format
if fmt:
fmt = fmt.replace("YYYY", "%Y").replace("MM", "%m").replace("DD", "%d")
else:
fmt = "%Y-%m-%d"
s = d.strftime(fmt)
except:
print("Cannot convert date format for %s" % val)
s = val
return s
def _fmt_datetime(this, val, fmt=None):
if not val:
return None
try:
d = datetime.datetime.strptime(val, "%Y-%m-%d %H:%M:%S")
settings = get_model("settings").browse(1) # FIXME: speed
if not fmt:
fmt = settings.date_format
if fmt:
fmt = fmt.replace("YYYY", "%Y").replace("MM", "%m").replace("DD", "%d")
else:
fmt = "%Y-%m-%d"
fmt+=" %H:%M:%S"
s = d.strftime(fmt)
except:
print("Cannot convert datetime format for %s" % val)
s = val
return s
def _fmt_bool(this, val):
if val:
return "Yes"
return "No"
def _col_if(this, val):
if val:
return ""
else:
return "[[HIDE_COL]]"
if barcode:
class NFBarWriter(ImageWriter):
def calculate_size(self, *args, **kw):
self.text = "" # XXX
if self.custom_module_height:
self.module_height = self.custom_module_height
return ImageWriter.calculate_size(self, *args, **kw)
def _barcode(this, val, height=None, type="code128", add_checksum=False):
if not barcode:
return ""
if not val:
return ""
try:
bar_cls = barcode.get_barcode_class(type)
writer = NFBarWriter()
writer.custom_module_height = height
if type == "code39":
bar = bar_cls(str(val), writer=writer, add_checksum=add_checksum)
else:
bar = bar_cls(str(val), writer=writer)
_, fname = tempfile.mkstemp(suffix=".png", prefix="barcode-")
fullname = bar.save(fname.replace(".png", ""))
return fullname
except Exception as e:
print("WARNING: failed to generate barcode: %s (%s)" % (val, e))
return ""
_acc_bal_cache = {}
def get_all_balances(date_from=None, date_to=None, track1=None, track2=None):
t = time.time()
k = (date_from, date_to, track1, track2)
if k in _acc_bal_cache:
res, res_t = _acc_bal_cache[k]
if t - res_t <= 10:
print("cache hit", k)
return res
print("cache miss", k)
if track1:
res = get_model("account.track.categ").search([["code", "=", track1]])
if not res:
raise Exception("Invalid tracking category: %s" % track1)
track_id = res[0]
else:
track_id = None
if track2:
res = get_model("account.track.categ").search([["code", "=", track2]])
if not res:
raise Exception("Invalid tracking category: %s" % track2)
track2_id = res[0]
else:
track2_id = None
ctx = {
"date_from": date_from,
"date_to": date_to,
"track_id": track_id,
"track2_id": track2_id,
}
res = get_model("account.account").search_read([["type", "!=", "view"]], ["code", "balance"], context=ctx)
_acc_bal_cache[k] = (res, t)
return res
def _acc_balance(this, acc_from=None, acc_to=None, date_from=None, date_to=None, track1=None, track2=None, negate=False):
print("_acc_balance", acc_from, acc_to, date_from, date_to, track1, track2)
res = get_all_balances(date_from=date_from, date_to=date_to, track1=track1, track2=track2)
bal = 0
for r in res:
if r["code"] >= acc_from and r["code"] <= acc_to:
bal += r["balance"]
if negate:
return -bal
return bal
def _editable_field(this, name, text_only=False):
obj = this.context
model = obj["_model"]
m = get_model(model)
f = m._fields[name]
val = obj[name] # XXX
if isinstance(f, fields.Char):
field_type = "char"
elif isinstance(f, fields.Text):
field_type = "text"
elif isinstance(f, fields.Float):
field_type = "float"
else:
raise Exception("Unsupported editable field: %s.%s" % (model, name))
html = '<div class="nf-editable" data-model="%s" data-field="%s" data-type="%s" data-id="%s"' % (
model, name, field_type, obj["id"])
if text_only:
html += ' data-text-only="1"'
html += '>%s</div>' % val
return html
def _editable_block(this, options, name, page_id=None, post_id=None):
block = get_model("cms.block").get_block(name, page_id=page_id, post_id=post_id)
if block:
out = '<div class="nf-editable" data-model="cms.block" data-field="html" data-type="text" data-id="%s">%s</div>' % (
block["id"], block["html"])
else:
html = options['fn'](this)
defaults = {
"name": name,
}
if page_id:
defaults["page_id"] = page_id
if post_id:
defaults["post_id"] = post_id
out = '<div class="nf-editable" data-model="cms.block" data-field="html" data-type="text" data-defaults=\'%s\'>%s</div>' % (
json.dumps(defaults), html)
return out
def _if_perm(this, options, perm):
if access.check_permission_other(perm):
return options['fn'](this)
else:
return options['inverse'](this)
def _odt_linebreak(this, val):
if val is None:
return ""
val = str(val)
val = saxutils.escape(val)
return val.replace("\n", "<text:line-break></text:line-break>")
_globals_ = {
'helpers': {
'blockHelperMissing': _blockHelperMissing,
'paginate': _paginate,
'each': _each,
'if': _if,
'helperMissing': _helperMissing,
'log': _log,
'unless': _unless,
'with': _with,
"file_path": _file_path,
"currency": _currency,
"change_lang_url": _change_lang_url,
'compare': _compare,
'ifeq': _ifeq,
'if_match': _if_match,
't': _translate,
'padding': _padding,
'fmt_qty': _fmt_qty,
'fmt_ths_qty': _fmt_ths_qty,
'fmt_number': _fmt_number,
'fmt_date': _fmt_date,
'fmt_datetime': _fmt_datetime,
'fmt_bool': _fmt_bool,
'filename': _filename,
'first': _first,
'after_first': _after_first,
"lookup": _lookup,
"if_lookup": _if_lookup,
"unless_lookup": _unless_lookup,
"length": _length,
"unless_eq": _unless_eq,
"ldelim": _ldelim,
"rdelim": _rdelim,
"col_if": _col_if,
#"acc_balance": _acc_balance, # XXX: move this
"editable_field": _editable_field,
"editable_block": _editable_block,
"if_perm": _if_perm,
"barcode": _barcode,
"odt_linebreak": _odt_linebreak,
},
}
def register_helper(name,func):
_globals_["helpers"][name]=func
class CodeBuilder:
def __init__(self):
self.stack = []
self.blocks = {}
def start(self):
self._result = strlist()
self.stack.append((self._result, "render"))
self._result.grow("def render(context, helpers=None, partials=None):\n")
self._result.grow(" result = strlist()\n")
self._result.grow(" _helpers = dict(_globals_['helpers'])\n")
self._result.grow(" if helpers is not None: _helpers.update(helpers)\n")
self._result.grow(" helpers = _helpers\n")
self._result.grow(" if partials is None: partials = {}\n")
def finish(self):
self._result.grow(" return result\n")
source = "from netforce.hbs_compiler import strlist,escape,Scope,partial,_globals_,resolve\n\n"
for name, lines in reversed(sorted(self.blocks.items())):
source += "".join(lines) + "\n"
lines = self._result
source += "".join(lines)
return source
def start_block(self):
name = "render_block%d" % len(self.blocks)
self._result = strlist()
self.blocks[name] = self._result
self.stack.append((self._result, name))
self._result.grow("def %s(context, helpers=None, partials=None):\n" % name)
self._result.grow(" result = strlist()\n")
self._result.grow(" _helpers = dict(_globals_['helpers'])\n")
self._result.grow(" if helpers is not None: _helpers.update(helpers)\n")
self._result.grow(" helpers = _helpers\n")
self._result.grow(" if partials is None: partials = {}\n")
def finish_block(self):
self._result.grow(" return result\n")
name = self.stack.pop(-1)[1]
self._result = self.stack and self.stack[-1][0]
return name
def add_block(self, symbol, arguments, name, alt_name):
call = self.arguments_to_call(arguments)
self._result.grow([
" options = {'fn': %s}\n" % name,
" options['helpers'] = helpers\n"
" options['partials'] = partials\n"
])
if alt_name:
self._result.grow([" options['inverse'] = %s\n" % alt_name])
else:
self._result.grow([
" options['inverse'] = lambda this: None\n"
])
self._result.grow([
" value = helper = helpers.get('%s')\n" % symbol,
" if value is None:\n"
" value = context.get('%s')\n" % symbol,
" if helper and callable(helper):\n"
" this = Scope(context, context)\n"
" value = value(this, options, %s\n" % call,
" else:\n"
" helper = helpers['blockHelperMissing']\n"
" value = helper(context, options, value)\n"
" if value is None: value = ''\n"
" result.grow(value)\n"
])
def add_literal(self, value):
self._result.grow(" result.append(%r)\n" % value)
def _lookup_arg(self, arg):
if not arg:
return "context"
return arg
def arguments_to_call(self, arguments):
params = list(map(self._lookup_arg, arguments))
return ", ".join(params) + ")"
def find_lookup(self, path, path_type, call):
if path and path_type == "simple": # simple names can reference helpers.
# TODO: compile this whole expression in the grammar; for now,
# fugly but only a compile time overhead.
# XXX: just rm.
realname = path.replace('.get("', '').replace('")', '')
self._result.grow([
" value = helpers.get('%s')\n" % realname,
" if value is None:\n"
" value = resolve(context, '%s')\n" % path,
])
elif path_type == "simple":
realname = None
self._result.grow([
" value = resolve(context, '%s')\n" % path,
])
else:
realname = None
self._result.grow(" value = %s\n" % path)
self._result.grow([
" if callable(value):\n"
" this = Scope(context, context)\n"
" value = value(this, %s\n" % call,
])
if realname:
self._result.grow(
" elif value is None:\n"
" this = Scope(context, context)\n"
" value = helpers.get('helperMissing')(this, '%s', %s\n"
% (realname, call)
)
self._result.grow(" if value is None: value = ''\n")
def add_escaped_expand(self, path_type_path, arguments):
(path_type, path) = path_type_path
call = self.arguments_to_call(arguments)
self.find_lookup(path, path_type, call)
self._result.grow([
" if type(value) is not strlist:\n",
" value = escape(str(value))\n",
" result.grow(value)\n"
])
def add_expand(self, path_type_path, arguments):
(path_type, path) = path_type_path
call = self.arguments_to_call(arguments)
self.find_lookup(path, path_type, call)
self._result.grow([
" if type(value) is not strlist:\n",
" value = str(value)\n",
" result.grow(value)\n"
])
def _debug(self):
self._result.grow(" import pdb;pdb.set_trace()\n")
def add_invertedblock(self, symbol, arguments, name):
self._result.grow([
" value = context.get('%s')\n" % symbol,
" if not value:\n"
" "])
self._invoke_template(name, "context")
def _invoke_template(self, fn_name, this_name):
self._result.grow([
" result.grow(",
fn_name,
"(",
this_name,
", helpers=helpers, partials=partials))\n"
])
def add_partial(self, symbol, arguments):
if arguments:
assert len(arguments) == 1, arguments
arg = arguments[0]
else:
arg = ""
self._result.grow([
" inner = partials['%s']\n" % symbol,
" scope = Scope(%s, context)\n" % self._lookup_arg(arg)])
self._invoke_template("inner", "scope")
class Compiler:
_handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
_builder = CodeBuilder()
_compiler = OMeta.makeGrammar(compile_grammar, {'builder': _builder})
def __init__(self):
self._helpers = {}
def compile(self, source):
self._builder.stack = []
self._builder.blocks = {}
print("compile step 1...")
tree, err = self._handlebars(source).apply('template')
if err.error:
raise Exception(err.formatError(source))
print("compile step 2...")
code, err = self._compiler(tree).apply('compile')
if err.error:
raise Exception(err.formatError(tree))
return code
|
py | 1a42ff4c4355cd6198fe636c53b4eb608eb87e7f | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Michael Nooner
# Copyright (c) 2018 - 2020, Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""This module lists out all of the tables needed to create a QR code.
This modules DOES NOT belong to the public API and may change without further
notice.
"""
from __future__ import division, unicode_literals
ALPHANUMERIC_CHARS = br'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:'
#: This defines the QR Code's 'mode' which sets what
#: type of code it is along with its size.
MODE_NUMERIC = 1
MODE_ALPHANUMERIC = 2
MODE_BYTE = 4
MODE_KANJI = 8
modes = {
'numeric': MODE_NUMERIC,
'alphanumeric': MODE_ALPHANUMERIC,
'binary': MODE_BYTE,
'kanji': MODE_KANJI,
}
#: This defines the amount of error correction. The dictionary
#: allows the user to specify this in several ways.
error_level = {'L': 'L', 'l': 'L', '7%': 'L', .7: 'L',
'M': 'M', 'm': 'M', '15%': 'M', .15: 'M',
'Q': 'Q', 'q': 'Q', '25%': 'Q', .25: 'Q',
'H': 'H', 'h': 'H', '30%': 'H', .30: 'H'}
#: This is a dictionary holds how long the "data length" field is for
#: each version and mode of the QR Code.
data_length_field = {9: {1: 10, 2: 9, 4: 8, 8: 8},
26: {1: 12, 2: 11, 4: 16, 8: 10},
40: {1: 14, 2: 13, 4: 16, 8: 12}}
#: This dictionary lists the data capacity for all possible QR Codes.
#: This dictionary is organized where the first key corresponds to the
#: QR Code version number. The next key corresponds to the error
#: correction level, see error. The final key corresponds to
#: the mode number, see modes. The zero mode number represents the
#: possible "data bits." This table was taken from:
#:
#: http://www.denso-wave.com/qrcode/vertable1-e.html
data_capacity = {
1: {
"L": {0: 152, 1: 41, 2: 25, 4: 17, 8: 10},
"M": {0: 128, 1: 34, 2: 20, 4: 14, 8: 8},
"Q": {0: 104, 1: 27, 2: 16, 4: 11, 8: 7},
"H": {0: 72, 1: 17, 2: 10, 4: 7, 8: 4}},
2: {
"L": {0: 272, 1: 77, 2: 47, 4: 32, 8: 20},
"M": {0: 224, 1: 63, 2: 38, 4: 26, 8: 16},
"Q": {0: 176, 1: 48, 2: 29, 4: 20, 8: 12},
"H": {0: 128, 1: 34, 2: 20, 4: 14, 8: 8}},
3: {
"L": {0: 440, 1: 127, 2: 77, 4: 53, 8: 32},
"M": {0: 352, 1: 101, 2: 61, 4: 42, 8: 26},
"Q": {0: 272, 1: 77, 2: 47, 4: 32, 8: 20},
"H": {0: 208, 1: 58, 2: 35, 4: 24, 8: 15}},
4: {
"L": {0: 640, 1: 187, 2: 114, 4: 78, 8: 48},
"M": {0: 512, 1: 149, 2: 90, 4: 62, 8: 38},
"Q": {0: 384, 1: 111, 2: 67, 4: 46, 8: 28},
"H": {0: 288, 1: 82, 2: 50, 4: 34, 8: 21}},
5: {
"L": {0: 864, 1: 255, 2: 154, 4: 106, 8: 65},
"M": {0: 688, 1: 202, 2: 122, 4: 84, 8: 52},
"Q": {0: 496, 1: 144, 2: 87, 4: 60, 8: 37},
"H": {0: 368, 1: 106, 2: 64, 4: 44, 8: 27}},
6: {
"L": {0: 1088, 1: 322, 2: 195, 4: 134, 8: 82},
"M": {0: 864, 1: 255, 2: 154, 4: 106, 8: 65},
"Q": {0: 608, 1: 178, 2: 108, 4: 74, 8: 45},
"H": {0: 480, 1: 139, 2: 84, 4: 58, 8: 36}},
7: {
"L": {0: 1248, 1: 370, 2: 224, 4: 154, 8: 95},
"M": {0: 992, 1: 293, 2: 178, 4: 122, 8: 75},
"Q": {0: 704, 1: 207, 2: 125, 4: 86, 8: 53},
"H": {0: 528, 1: 154, 2: 93, 4: 64, 8: 39}},
8: {
"L": {0: 1552, 1: 461, 2: 279, 4: 192, 8: 118},
"M": {0: 1232, 1: 365, 2: 221, 4: 152, 8: 93},
"Q": {0: 880, 1: 259, 2: 157, 4: 108, 8: 66},
"H": {0: 688, 1: 202, 2: 122, 4: 84, 8: 52}},
9: {
"L": {0: 1856, 1: 552, 2: 335, 4: 230, 8: 141},
"M": {0: 1456, 1: 432, 2: 262, 4: 180, 8: 111},
"Q": {0: 1056, 1: 312, 2: 189, 4: 130, 8: 80},
"H": {0: 800, 1: 235, 2: 143, 4: 98, 8: 60}},
10: {
"L": {0: 2192, 1: 652, 2: 395, 4: 271, 8: 167},
"M": {0: 1728, 1: 513, 2: 311, 4: 213, 8: 131},
"Q": {0: 1232, 1: 364, 2: 221, 4: 151, 8: 93},
"H": {0: 976, 1: 288, 2: 174, 4: 119, 8: 74}},
11: {
"L": {0: 2592, 1: 772, 2: 468, 4: 321, 8: 198},
"M": {0: 2032, 1: 604, 2: 366, 4: 251, 8: 155},
"Q": {0: 1440, 1: 427, 2: 259, 4: 177, 8: 109},
"H": {0: 1120, 1: 331, 2: 200, 4: 137, 8: 85}},
12: {
"L": {0: 2960, 1: 883, 2: 535, 4: 367, 8: 226},
"M": {0: 2320, 1: 691, 2: 419, 4: 287, 8: 177},
"Q": {0: 1648, 1: 489, 2: 296, 4: 203, 8: 125},
"H": {0: 1264, 1: 374, 2: 227, 4: 155, 8: 96}},
13: {
"L": {0: 3424, 1: 1022, 2: 619, 4: 425, 8: 262},
"M": {0: 2672, 1: 796, 2: 483, 4: 331, 8: 204},
"Q": {0: 1952, 1: 580, 2: 352, 4: 241, 8: 149},
"H": {0: 1440, 1: 427, 2: 259, 4: 177, 8: 109}},
14: {
"L": {0: 3688, 1: 1101, 2: 667, 4: 458, 8: 282},
"M": {0: 2920, 1: 871, 2: 528, 4: 362, 8: 223},
"Q": {0: 2088, 1: 621, 2: 376, 4: 258, 8: 159},
"H": {0: 1576, 1: 468, 2: 283, 4: 194, 8: 120}},
15: {
"L": {0: 4184, 1: 1250, 2: 758, 4: 520, 8: 320},
"M": {0: 3320, 1: 991, 2: 600, 4: 412, 8: 254},
"Q": {0: 2360, 1: 703, 2: 426, 4: 292, 8: 180},
"H": {0: 1784, 1: 530, 2: 321, 4: 220, 8: 136}},
16: {
"L": {0: 4712, 1: 1408, 2: 854, 4: 586, 8: 361},
"M": {0: 3624, 1: 1082, 2: 656, 4: 450, 8: 277},
"Q": {0: 2600, 1: 775, 2: 470, 4: 322, 8: 198},
"H": {0: 2024, 1: 602, 2: 365, 4: 250, 8: 154}},
17: {
"L": {0: 5176, 1: 1548, 2: 938, 4: 644, 8: 397},
"M": {0: 4056, 1: 1212, 2: 734, 4: 504, 8: 310},
"Q": {0: 2936, 1: 876, 2: 531, 4: 364, 8: 224},
"H": {0: 2264, 1: 674, 2: 408, 4: 280, 8: 173}},
18: {
"L": {0: 5768, 1: 1725, 2: 1046, 4: 718, 8: 442},
"M": {0: 4504, 1: 1346, 2: 816, 4: 560, 8: 345},
"Q": {0: 3176, 1: 948, 2: 574, 4: 394, 8: 243},
"H": {0: 2504, 1: 746, 2: 452, 4: 310, 8: 191}},
19: {
"L": {0: 6360, 1: 1903, 2: 1153, 4: 792, 8: 488},
"M": {0: 5016, 1: 1500, 2: 909, 4: 624, 8: 384},
"Q": {0: 3560, 1: 1063, 2: 644, 4: 442, 8: 272},
"H": {0: 2728, 1: 813, 2: 493, 4: 338, 8: 208}},
20: {
"L": {0: 6888, 1: 2061, 2: 1249, 4: 858, 8: 528},
"M": {0: 5352, 1: 1600, 2: 970, 4: 666, 8: 410},
"Q": {0: 3880, 1: 1159, 2: 702, 4: 482, 8: 297},
"H": {0: 3080, 1: 919, 2: 557, 4: 382, 8: 235}},
21: {
"L": {0: 7456, 1: 2232, 2: 1352, 4: 929, 8: 572},
"M": {0: 5712, 1: 1708, 2: 1035, 4: 711, 8: 438},
"Q": {0: 4096, 1: 1224, 2: 742, 4: 509, 8: 314},
"H": {0: 3248, 1: 969, 2: 587, 4: 403, 8: 248}},
22: {
"L": {0: 8048, 1: 2409, 2: 1460, 4: 1003, 8: 618},
"M": {0: 6256, 1: 1872, 2: 1134, 4: 779, 8: 480},
"Q": {0: 4544, 1: 1358, 2: 823, 4: 565, 8: 348},
"H": {0: 3536, 1: 1056, 2: 640, 4: 439, 8: 270}},
23: {
"L": {0: 8752, 1: 2620, 2: 1588, 4: 1091, 8: 672},
"M": {0: 6880, 1: 2059, 2: 1248, 4: 857, 8: 528},
"Q": {0: 4912, 1: 1468, 2: 890, 4: 611, 8: 376},
"H": {0: 3712, 1: 1108, 2: 672, 4: 461, 8: 284}},
24: {
"L": {0: 9392, 1: 2812, 2: 1704, 4: 1171, 8: 721},
"M": {0: 7312, 1: 2188, 2: 1326, 4: 911, 8: 561},
"Q": {0: 5312, 1: 1588, 2: 963, 4: 661, 8: 407},
"H": {0: 4112, 1: 1228, 2: 744, 4: 511, 8: 315}},
25: {
"L": {0: 10208, 1: 3057, 2: 1853, 4: 1273, 8: 784},
"M": {0: 8000, 1: 2395, 2: 1451, 4: 997, 8: 614},
"Q": {0: 5744, 1: 1718, 2: 1041, 4: 715, 8: 440},
"H": {0: 4304, 1: 1286, 2: 779, 4: 535, 8: 330}},
26: {
"L": {0: 10960, 1: 3283, 2: 1990, 4: 1367, 8: 842},
"M": {0: 8496, 1: 2544, 2: 1542, 4: 1059, 8: 652},
"Q": {0: 6032, 1: 1804, 2: 1094, 4: 751, 8: 462},
"H": {0: 4768, 1: 1425, 2: 864, 4: 593, 8: 365}},
27: {
"L": {0: 11744, 1: 3514, 2: 2132, 4: 1465, 8: 902},
"M": {0: 9024, 1: 2701, 2: 1637, 4: 1125, 8: 692},
"Q": {0: 6464, 1: 1933, 2: 1172, 4: 805, 8: 496},
"H": {0: 5024, 1: 1501, 2: 910, 4: 625, 8: 385}},
28: {
"L": {0: 12248, 1: 3669, 2: 2223, 4: 1528, 8: 940},
"M": {0: 9544, 1: 2857, 2: 1732, 4: 1190, 8: 732},
"Q": {0: 6968, 1: 2085, 2: 1263, 4: 868, 8: 534},
"H": {0: 5288, 1: 1581, 2: 958, 4: 658, 8: 405}},
29: {
"L": {0: 13048, 1: 3909, 2: 2369, 4: 1628, 8: 1002},
"M": {0: 10136, 1: 3035, 2: 1839, 4: 1264, 8: 778},
"Q": {0: 7288, 1: 2181, 2: 1322, 4: 908, 8: 559},
"H": {0: 5608, 1: 1677, 2: 1016, 4: 698, 8: 430}},
30: {
"L": {0: 13880, 1: 4158, 2: 2520, 4: 1732, 8: 1066},
"M": {0: 10984, 1: 3289, 2: 1994, 4: 1370, 8: 843},
"Q": {0: 7880, 1: 2358, 2: 1429, 4: 982, 8: 604},
"H": {0: 5960, 1: 1782, 2: 1080, 4: 742, 8: 457}},
31: {
"L": {0: 14744, 1: 4417, 2: 2677, 4: 1840, 8: 1132},
"M": {0: 11640, 1: 3486, 2: 2113, 4: 1452, 8: 894},
"Q": {0: 8264, 1: 2473, 2: 1499, 4: 1030, 8: 634},
"H": {0: 6344, 1: 1897, 2: 1150, 4: 790, 8: 486}},
32: {
"L": {0: 15640, 1: 4686, 2: 2840, 4: 1952, 8: 1201},
"M": {0: 12328, 1: 3693, 2: 2238, 4: 1538, 8: 947},
"Q": {0: 8920, 1: 2670, 2: 1618, 4: 1112, 8: 684},
"H": {0: 6760, 1: 2022, 2: 1226, 4: 842, 8: 518}},
33: {
"L": {0: 16568, 1: 4965, 2: 3009, 4: 2068, 8: 1273},
"M": {0: 13048, 1: 3909, 2: 2369, 4: 1628, 8: 1002},
"Q": {0: 9368, 1: 2805, 2: 1700, 4: 1168, 8: 719},
"H": {0: 7208, 1: 2157, 2: 1307, 4: 898, 8: 553}},
34: {
"L": {0: 17528, 1: 5253, 2: 3183, 4: 2188, 8: 1347},
"M": {0: 13800, 1: 4134, 2: 2506, 4: 1722, 8: 1060},
"Q": {0: 9848, 1: 2949, 2: 1787, 4: 1228, 8: 756},
"H": {0: 7688, 1: 2301, 2: 1394, 4: 958, 8: 590}},
35: {
"L": {0: 18448, 1: 5529, 2: 3351, 4: 2303, 8: 1417},
"M": {0: 14496, 1: 4343, 2: 2632, 4: 1809, 8: 1113},
"Q": {0: 10288, 1: 3081, 2: 1867, 4: 1283, 8: 790},
"H": {0: 7888, 1: 2361, 2: 1431, 4: 983, 8: 605}},
36: {
"L": {0: 19472, 1: 5836, 2: 3537, 4: 2431, 8: 1496},
"M": {0: 15312, 1: 4588, 2: 2780, 4: 1911, 8: 1176},
"Q": {0: 10832, 1: 3244, 2: 1966, 4: 1351, 8: 832},
"H": {0: 8432, 1: 2524, 2: 1530, 4: 1051, 8: 647}},
37: {
"L": {0: 20528, 1: 6153, 2: 3729, 4: 2563, 8: 1577},
"M": {0: 15936, 1: 4775, 2: 2894, 4: 1989, 8: 1224},
"Q": {0: 11408, 1: 3417, 2: 2071, 4: 1423, 8: 876},
"H": {0: 8768, 1: 2625, 2: 1591, 4: 1093, 8: 673}},
38: {
"L": {0: 21616, 1: 6479, 2: 3927, 4: 2699, 8: 1661},
"M": {0: 16816, 1: 5039, 2: 3054, 4: 2099, 8: 1292},
"Q": {0: 12016, 1: 3599, 2: 2181, 4: 1499, 8: 923},
"H": {0: 9136, 1: 2735, 2: 1658, 4: 1139, 8: 701}},
39: {
"L": {0: 22496, 1: 6743, 2: 4087, 4: 2809, 8: 1729},
"M": {0: 17728, 1: 5313, 2: 3220, 4: 2213, 8: 1362},
"Q": {0: 12656, 1: 3791, 2: 2298, 4: 1579, 8: 972},
"H": {0: 9776, 1: 2927, 2: 1774, 4: 1219, 8: 750}},
40: {
"L": {0: 23648, 1: 7089, 2: 4296, 4: 2953, 8: 1817},
"M": {0: 18672, 1: 5596, 2: 3391, 4: 2331, 8: 1435},
"Q": {0: 13328, 1: 3993, 2: 2420, 4: 1663, 8: 1024},
"H": {0: 10208, 1: 3057, 2: 1852, 4: 1273, 8: 784}}
}
#: This table defines the "Error Correction Code Words and Block Information."
#: The table lists the number of error correction words that are required
#: to be generated for each version and error correction level. The table
#: is accessed by first using the version number as a key and then the
#: error level. The array values correspond to these columns from the source
#: table:
#:
#: +----------------------------+
#: |0 | EC Code Words Per Block |
#: +----------------------------+
#: |1 | Block 1 Count |
#: +----------------------------+
#: |2 | Block 1 Data Code Words |
#: +----------------------------+
#: |3 | Block 2 Count |
#: +----------------------------+
#: |4 | Block 2 Data Code Words |
#: +----------------------------+
#:
#: This table was taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/error-correction-table/
eccwbi = {
1: {
'L': (7, 1, 19, 0, 0),
'M': (10, 1, 16, 0, 0),
'Q': (13, 1, 13, 0, 0),
'H': (17, 1, 9, 0, 0),
},
2: {
'L': (10, 1, 34, 0, 0),
'M': (16, 1, 28, 0, 0),
'Q': (22, 1, 22, 0, 0),
'H': (28, 1, 16, 0, 0),
},
3: {
'L': (15, 1, 55, 0, 0),
'M': (26, 1, 44, 0, 0),
'Q': (18, 2, 17, 0, 0),
'H': (22, 2, 13, 0, 0),
},
4: {
'L': (20, 1, 80, 0, 0),
'M': (18, 2, 32, 0, 0),
'Q': (26, 2, 24, 0, 0),
'H': (16, 4, 9, 0, 0),
},
5: {
'L': (26, 1, 108, 0, 0),
'M': (24, 2, 43, 0, 0),
'Q': (18, 2, 15, 2, 16),
'H': (22, 2, 11, 2, 12),
},
6: {
'L': (18, 2, 68, 0, 0),
'M': (16, 4, 27, 0, 0),
'Q': (24, 4, 19, 0, 0),
'H': (28, 4, 15, 0, 0),
},
7: {
'L': (20, 2, 78, 0, 0),
'M': (18, 4, 31, 0, 0),
'Q': (18, 2, 14, 4, 15),
'H': (26, 4, 13, 1, 14),
},
8: {
'L': (24, 2, 97, 0, 0),
'M': (22, 2, 38, 2, 39),
'Q': (22, 4, 18, 2, 19),
'H': (26, 4, 14, 2, 15),
},
9: {
'L': (30, 2, 116, 0, 0),
'M': (22, 3, 36, 2, 37),
'Q': (20, 4, 16, 4, 17),
'H': (24, 4, 12, 4, 13),
},
10: {
'L': (18, 2, 68, 2, 69),
'M': (26, 4, 43, 1, 44),
'Q': (24, 6, 19, 2, 20),
'H': (28, 6, 15, 2, 16),
},
11: {
'L': (20, 4, 81, 0, 0),
'M': (30, 1, 50, 4, 51),
'Q': (28, 4, 22, 4, 23),
'H': (24, 3, 12, 8, 13),
},
12: {
'L': (24, 2, 92, 2, 93),
'M': (22, 6, 36, 2, 37),
'Q': (26, 4, 20, 6, 21),
'H': (28, 7, 14, 4, 15),
},
13: {
'L': (26, 4, 107, 0, 0),
'M': (22, 8, 37, 1, 38),
'Q': (24, 8, 20, 4, 21),
'H': (22, 12, 11, 4, 12),
},
14: {
'L': (30, 3, 115, 1, 116),
'M': (24, 4, 40, 5, 41),
'Q': (20, 11, 16, 5, 17),
'H': (24, 11, 12, 5, 13),
},
15: {
'L': (22, 5, 87, 1, 88),
'M': (24, 5, 41, 5, 42),
'Q': (30, 5, 24, 7, 25),
'H': (24, 11, 12, 7, 13),
},
16: {
'L': (24, 5, 98, 1, 99),
'M': (28, 7, 45, 3, 46),
'Q': (24, 15, 19, 2, 20),
'H': (30, 3, 15, 13, 16),
},
17: {
'L': (28, 1, 107, 5, 108),
'M': (28, 10, 46, 1, 47),
'Q': (28, 1, 22, 15, 23),
'H': (28, 2, 14, 17, 15),
},
18: {
'L': (30, 5, 120, 1, 121),
'M': (26, 9, 43, 4, 44),
'Q': (28, 17, 22, 1, 23),
'H': (28, 2, 14, 19, 15),
},
19: {
'L': (28, 3, 113, 4, 114),
'M': (26, 3, 44, 11, 45),
'Q': (26, 17, 21, 4, 22),
'H': (26, 9, 13, 16, 14),
},
20: {
'L': (28, 3, 107, 5, 108),
'M': (26, 3, 41, 13, 42),
'Q': (30, 15, 24, 5, 25),
'H': (28, 15, 15, 10, 16),
},
21: {
'L': (28, 4, 116, 4, 117),
'M': (26, 17, 42, 0, 0),
'Q': (28, 17, 22, 6, 23),
'H': (30, 19, 16, 6, 17),
},
22: {
'L': (28, 2, 111, 7, 112),
'M': (28, 17, 46, 0, 0),
'Q': (30, 7, 24, 16, 25),
'H': (24, 34, 13, 0, 0),
},
23: {
'L': (30, 4, 121, 5, 122),
'M': (28, 4, 47, 14, 48),
'Q': (30, 11, 24, 14, 25),
'H': (30, 16, 15, 14, 16),
},
24: {
'L': (30, 6, 117, 4, 118),
'M': (28, 6, 45, 14, 46),
'Q': (30, 11, 24, 16, 25),
'H': (30, 30, 16, 2, 17),
},
25: {
'L': (26, 8, 106, 4, 107),
'M': (28, 8, 47, 13, 48),
'Q': (30, 7, 24, 22, 25),
'H': (30, 22, 15, 13, 16),
},
26: {
'L': (28, 10, 114, 2, 115),
'M': (28, 19, 46, 4, 47),
'Q': (28, 28, 22, 6, 23),
'H': (30, 33, 16, 4, 17),
},
27: {
'L': (30, 8, 122, 4, 123),
'M': (28, 22, 45, 3, 46),
'Q': (30, 8, 23, 26, 24),
'H': (30, 12, 15, 28, 16),
},
28: {
'L': (30, 3, 117, 10, 118),
'M': (28, 3, 45, 23, 46),
'Q': (30, 4, 24, 31, 25),
'H': (30, 11, 15, 31, 16),
},
29: {
'L': (30, 7, 116, 7, 117),
'M': (28, 21, 45, 7, 46),
'Q': (30, 1, 23, 37, 24),
'H': (30, 19, 15, 26, 16),
},
30: {
'L': (30, 5, 115, 10, 116),
'M': (28, 19, 47, 10, 48),
'Q': (30, 15, 24, 25, 25),
'H': (30, 23, 15, 25, 16),
},
31: {
'L': (30, 13, 115, 3, 116),
'M': (28, 2, 46, 29, 47),
'Q': (30, 42, 24, 1, 25),
'H': (30, 23, 15, 28, 16),
},
32: {
'L': (30, 17, 115, 0, 0),
'M': (28, 10, 46, 23, 47),
'Q': (30, 10, 24, 35, 25),
'H': (30, 19, 15, 35, 16),
},
33: {
'L': (30, 17, 115, 1, 116),
'M': (28, 14, 46, 21, 47),
'Q': (30, 29, 24, 19, 25),
'H': (30, 11, 15, 46, 16),
},
34: {
'L': (30, 13, 115, 6, 116),
'M': (28, 14, 46, 23, 47),
'Q': (30, 44, 24, 7, 25),
'H': (30, 59, 16, 1, 17),
},
35: {
'L': (30, 12, 121, 7, 122),
'M': (28, 12, 47, 26, 48),
'Q': (30, 39, 24, 14, 25),
'H': (30, 22, 15, 41, 16),
},
36: {
'L': (30, 6, 121, 14, 122),
'M': (28, 6, 47, 34, 48),
'Q': (30, 46, 24, 10, 25),
'H': (30, 2, 15, 64, 16),
},
37: {
'L': (30, 17, 122, 4, 123),
'M': (28, 29, 46, 14, 47),
'Q': (30, 49, 24, 10, 25),
'H': (30, 24, 15, 46, 16),
},
38: {
'L': (30, 4, 122, 18, 123),
'M': (28, 13, 46, 32, 47),
'Q': (30, 48, 24, 14, 25),
'H': (30, 42, 15, 32, 16),
},
39: {
'L': (30, 20, 117, 4, 118),
'M': (28, 40, 47, 7, 48),
'Q': (30, 43, 24, 22, 25),
'H': (30, 10, 15, 67, 16),
},
40: {
'L': (30, 19, 118, 6, 119),
'M': (28, 18, 47, 31, 48),
'Q': (30, 34, 24, 34, 25),
'H': (30, 20, 15, 61, 16),
},
}
#: This table lists all of the generator polynomials used by QR Codes.
#: They are indexed by the number of "ECC Code Words" (see table above).
#: This table is taken from:
#:
#: http://www.matchadesign.com/blog/qr-code-demystified-part-4/
generator_polynomials = {
7: (87, 229, 146, 149, 238, 102, 21),
10: (251, 67, 46, 61, 118, 70, 64, 94, 32, 45),
13: (74, 152, 176, 100, 86, 100, 106, 104, 130, 218, 206, 140, 78),
15: (8, 183, 61, 91, 202, 37, 51, 58, 58, 237, 140, 124, 5, 99, 105),
16: (120, 104, 107, 109, 102, 161, 76, 3, 91, 191, 147, 169, 182, 194,
225, 120),
17: (43, 139, 206, 78, 43, 239, 123, 206, 214, 147, 24, 99, 150, 39,
243, 163, 136),
18: (215, 234, 158, 94, 184, 97, 118, 170, 79, 187, 152, 148, 252, 179,
5, 98, 96, 153),
20: (17, 60, 79, 50, 61, 163, 26, 187, 202, 180, 221, 225, 83, 239, 156,
164, 212, 212, 188, 190),
22: (210, 171, 247, 242, 93, 230, 14, 109, 221, 53, 200, 74, 8, 172, 98,
80, 219, 134, 160, 105, 165, 231),
24: (229, 121, 135, 48, 211, 117, 251, 126, 159, 180, 169, 152, 192, 226,
228, 218, 111, 0, 117, 232, 87, 96, 227, 21),
26: (173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53, 161,
21, 245, 142, 13, 102, 48, 227, 153, 145, 218, 70),
28: (168, 223, 200, 104, 224, 234, 108, 180, 110, 190, 195, 147, 205, 27,
232, 201, 21, 43, 245, 87, 42, 195, 212, 119, 242, 37, 9, 123),
30: (41, 173, 145, 152, 216, 31, 179, 182, 50, 48, 110, 86, 239, 96, 222,
125, 42, 173, 226, 193, 224, 130, 156, 37, 251, 216, 238, 40, 192,
180)
}
#: This table contains the log and values used in GF(256) arithmetic.
#: They are used to generate error correction codes for QR Codes.
#: This table is taken from:
#:
#: vhttp://www.thonky.com/qr-code-tutorial/log-antilog-table/
galois_log = (
1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152,
45, 90, 180, 117, 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78,
156, 37, 74, 148, 53, 106, 212, 181, 119, 238, 193, 159, 35, 70, 140, 5,
10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161, 95, 190, 97,
194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211,
187, 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175,
67, 134, 17, 34, 68, 136, 13, 26, 52, 104, 208, 189, 103, 206, 129, 31,
62, 124, 248, 237, 199, 147, 59, 118, 236, 197, 151, 51, 102, 204, 133,
23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168, 77,
154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209,
191, 99, 198, 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227,
219, 171, 75, 150, 49, 98, 196, 149, 55, 110, 220, 165, 87, 174, 65, 130,
25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167, 83, 166, 81,
162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18,
36, 72, 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44,
88, 176, 125, 250, 233, 207, 131, 27, 54, 108, 216, 173, 71, 142, 1)
#: This table contains the antilog and values used in GF(256) arithmetic.
#: They are used to generate error correction codes for QR Codes.
#: This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/log-antilog-table/
galois_antilog = (
None, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100,
224, 14, 52, 141, 239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138,
101, 47, 225, 36, 15, 33, 53, 147, 142, 218, 240, 18, 130, 69, 29, 181,
194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77, 228, 114, 166, 6, 191,
139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54, 208,
148, 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66,
182, 163, 195, 72, 126, 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94,
155, 159, 10, 21, 121, 43, 78, 212, 229, 172, 115, 243, 167, 87, 7, 112,
192, 247, 140, 128, 99, 13, 103, 74, 222, 237, 49, 197, 254, 24, 227, 165,
153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32, 137, 46, 55, 63,
209, 91, 149, 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242,
86, 211, 171, 20, 42, 93, 158, 132, 60, 57, 83, 71, 109, 65, 162, 31, 45,
67, 216, 183, 123, 164, 118, 196, 23, 73, 236, 127, 12, 111, 246, 108,
161, 59, 82, 41, 157, 85, 170, 251, 96, 134, 177, 187, 204, 62, 90, 203,
89, 95, 176, 156, 169, 160, 81, 11, 245, 22, 235, 122, 117, 44, 215, 79,
174, 213, 233, 230, 231, 173, 232, 116, 214, 244, 234, 168, 80, 88, 175)
#: This table contains the coordinates for the position adjustment patterns.
#: The index of the table corresponds to the QR Code's version number.
#: This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
position_adjustment = (
None, # There is no version 0
None, # Version 1 does not need adjustment
(6, 18),
(6, 22),
(6, 26),
(6, 30),
(6, 34),
(6, 22, 38),
(6, 24, 42),
(6, 26, 46),
(6, 28, 50),
(6, 30, 54),
(6, 32, 58),
(6, 34, 62),
(6, 26, 46, 66),
(6, 26, 48, 70),
(6, 26, 50, 74),
(6, 30, 54, 78),
(6, 30, 56, 82),
(6, 30, 58, 86),
(6, 34, 62, 90),
(6, 28, 50, 72, 94),
(6, 26, 50, 74, 98),
(6, 30, 54, 78, 102),
(6, 28, 54, 80, 106),
(6, 32, 58, 84, 110),
(6, 30, 58, 86, 114),
(6, 34, 62, 90, 118),
(6, 26, 50, 74, 98, 122),
(6, 30, 54, 78, 102, 126),
(6, 26, 52, 78, 104, 130),
(6, 30, 56, 82, 108, 134),
(6, 34, 60, 86, 112, 138),
(6, 30, 58, 86, 114, 142),
(6, 34, 62, 90, 118, 146),
(6, 30, 54, 78, 102, 126, 150),
(6, 24, 50, 76, 102, 128, 154),
(6, 28, 54, 80, 106, 132, 158),
(6, 32, 58, 84, 110, 136, 162),
(6, 26, 54, 82, 110, 138, 166),
(6, 30, 58, 86, 114, 142, 170),
)
#: This table specifies the bit pattern to be added to a QR Code's
#: image to specify what version the code is. Note, this pattern
#: is not used for versions 1-6. This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
version_pattern = (None, None, None, None, None, None, None, #0-6
'000111110010010100', '001000010110111100', '001001101010011001',
'001010010011010011', '001011101111110110', '001100011101100010',
'001101100001000111', '001110011000001101', '001111100100101000',
'010000101101111000', '010001010001011101', '010010101000010111',
'010011010100110010', '010100100110100110', '010101011010000011',
'010110100011001001', '010111011111101100', '011000111011000100',
'011001000111100001', '011010111110101011', '011011000010001110',
'011100110000011010', '011101001100111111', '011110110101110101',
'011111001001010000', '100000100111010101', '100001011011110000',
'100010100010111010', '100011011110011111', '100100101100001011',
'100101010000101110', '100110101001100100', '100111010101000001',
'101000110001101001'
)
#: This table contains the bit fields needed to specify the error code level and
#: mask pattern used by a QR Code. This table is take from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
type_bits = {
'L': {
0: '111011111000100',
1: '111001011110011',
2: '111110110101010',
3: '111100010011101',
4: '110011000101111',
5: '110001100011000',
6: '110110001000001',
7: '110100101110110',
},
'M': {
0: '101010000010010',
1: '101000100100101',
2: '101111001111100',
3: '101101101001011',
4: '100010111111001',
5: '100000011001110',
6: '100111110010111',
7: '100101010100000',
},
'Q': {
0: '011010101011111',
1: '011000001101000',
2: '011111100110001',
3: '011101000000110',
4: '010010010110100',
5: '010000110000011',
6: '010111011011010',
7: '010101111101101',
},
'H': {
0: '001011010001001',
1: '001001110111110',
2: '001110011100111',
3: '001100111010000',
4: '000011101100010',
5: '000001001010101',
6: '000110100001100',
7: '000100000111011',
},
}
#: This table contains *functions* to compute whether to change current bit when
#: creating the masks. All of the functions in the table return a boolean value.
#: A True result means you should add the bit to the QR Code exactly as is. A
#: False result means you should add the opposite bit. This table was taken
#: from:
#:
#: http://www.thonky.com/qr-code-tutorial/mask-patterns/
mask_patterns = (
lambda row, col: (row + col) % 2 == 0,
lambda row, col: row % 2 == 0,
lambda row, col: col % 3 == 0,
lambda row, col: (row + col) % 3 == 0,
lambda row, col: ((row // 2) + (col // 3)) % 2 == 0,
lambda row, col: ((row * col) % 2) + ((row * col) % 3) == 0,
lambda row, col: (((row * col) % 2) + ((row * col) % 3)) % 2 == 0,
lambda row, col: (((row + col) % 2) + ((row * col) % 3)) % 2 == 0)
|
py | 1a42ff56ef6a3eabfb4e54823f30e728ab4bc49d | x = int(input("Digite um número"))
y = int(input("Digite outro número"))
z = input("Escolha\n 1- +\n2- -\n3- x\n4- /(dividir)")
if(z == "1"):
print(x+y,"Resultado")
elif(z == "2"):
print(x-y,"Resultado")
elif(z=="3"):
print(x*y,"Resultado")
elif(z=="4"):
print(x/y,"Resultado")
else:
print("Por favor revejava sua escolhas") |
py | 1a430028cee27d9c88bae163f79a204e65e08529 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
"""Clas to run tests on admin application"""
def setUp(self): # setup fucntion is used to do some pre-req taks
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(email='[email protected]', password='vivek')
self.client.force_login(self.admin_user, backend=None)
self.user = get_user_model().objects.create_user(email='[email protected]', password='password1234')
def test_users_listed(self):
"""Test that users are listed on users page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
print(res)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the use edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/1
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_cereate_user_page(self):
"""Test create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200) |
py | 1a4300380abf7f37393f8861b231adfb7a916047 | from mie2c.e2c import Encoder, Decoder, Transition, LinearTransition, PWATransition
import torch
from torch import nn
def get_bounce_encoder(dim_in, dim_z):
channels_enc = [6, 32, 32, 16, 16]
ff_shape = [128, 128, 128]
conv_activation = torch.nn.ReLU()
ff_activation = torch.nn.ReLU()
n_channels = len(channels_enc) - 1
kernel_enc = [5, 3, 5, 3, 5]
stride= [2, 1, 2, 1, 2]
padding= [2, 1, 2, 1, 2]
pool = [None, 2, None, 2, 2]
return Encoder(dim_in, dim_z, channels_enc, ff_shape, kernel_enc, stride, padding, pool, conv_activation=conv_activation, ff_activation=ff_activation)
def get_bounce_decoder(dim_in, dim_out):
channels_dec = [6, 32, 32, 16, dim_out[0]]
ff_shape = [128, 128, 128]
conv_activation = torch.nn.ReLU()
ff_activation = torch.nn.ReLU()
n_channels = len(channels_dec) - 1
kernel_dec = [5, 3, 5, 3, 5]
stride = [1, 1, 1, 1, 2]
padding = [2, 1, 2, 1, 2]
return Decoder(dim_in, dim_out, channels_dec, ff_shape, kernel_dec, stride, padding, ff_activation=ff_activation, conv_activation=conv_activation)
def get_bounce_transition(dim_z, dim_u):
nn_width = 32
trans = nn.Sequential(
nn.Linear(dim_z, nn_width),
nn.BatchNorm1d(nn_width),
nn.ReLU(),
nn.Linear(nn_width, nn_width),
nn.BatchNorm1d(nn_width),
nn.ReLU(),
nn.Linear(nn_width, dim_z*2)
)
return Transition(trans, dim_z, dim_u)
def get_bounce_linear_transition(dim_z, dim_u, low_rank=True):
A = torch.nn.Parameter(2. * (torch.randn(dim_z, dim_z) - .5))
r = torch.nn.Parameter(2. * (torch.randn(dim_z) - .5))
v = torch.nn.Parameter(2. * (torch.randn(dim_z) - .5))
B = torch.nn.Parameter(2. * (torch.randn(dim_z, dim_u) - .5))
o = torch.nn.Parameter(2. * (torch.randn(dim_z, 1) - .5))
return LinearTransition(dim_z, dim_u, r, v, A, B, o, low_rank=low_rank)
def get_bounce_pwa_transition(num_modes, dim_z, dim_u, low_rank=True):
mode_classifier = nn.Linear(dim_z, num_modes)
As = torch.nn.ParameterList()
rs = torch.nn.ParameterList()
vs = torch.nn.ParameterList()
Bs = torch.nn.ParameterList()
os = torch.nn.ParameterList()
for mode in range(num_modes):
As.append(torch.nn.Parameter(2. * (torch.randn(dim_z, dim_z) - .5)))
rs.append(torch.nn.Parameter(2. * (torch.randn(dim_z) - .5)))
vs.append(torch.nn.Parameter(2. * (torch.randn(dim_z) - .5)))
Bs.append(torch.nn.Parameter(2. * (torch.randn(dim_z, dim_u) - .5)))
os.append(torch.nn.Parameter(2. * (torch.randn(dim_z, 1) - .5)))
return PWATransition(dim_z, dim_u, mode_classifier, rs, vs, As, Bs, os, low_rank=low_rank)
|
py | 1a4300af60413f6813be0e65dcab0f52a27a067f | # ----------------------------------------------------------------------
# |
# | IntTypeInfo_UnitTest.py
# |
# | David Brownell <[email protected]>
# | 2018-04-23 09:31:34
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Unit test for IntTypeInfo.py."""
import os
import sys
import unittest
import CommonEnvironment
from CommonEnvironment.TypeInfo.FundamentalTypes.IntTypeInfo import IntTypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
class StandardSuite(unittest.TestCase):
# ----------------------------------------------------------------------
def test_Default(self):
iti = IntTypeInfo()
self.assertEqual(iti.Desc, "Integer")
self.assertEqual(iti.ConstraintsDesc, '')
self.assertEqual(iti.ExpectedType, int)
self.assertTrue(iti.IsValidItem(20))
self.assertFalse(iti.IsValidItem("20"))
# ----------------------------------------------------------------------
def test_Min(self):
iti = IntTypeInfo(-10)
self.assertEqual(iti.Desc, "Integer")
self.assertEqual(iti.ConstraintsDesc, "Value must be >= -10")
self.assertEqual(iti.ExpectedType, int)
self.assertTrue(iti.IsValidItem(20))
self.assertFalse(iti.IsValidItem(-20))
# ----------------------------------------------------------------------
def test_Max(self):
iti = IntTypeInfo(max=20)
self.assertEqual(iti.Desc, "Integer")
self.assertEqual(iti.ConstraintsDesc, "Value must be <= 20")
self.assertEqual(iti.ExpectedType, int)
self.assertTrue(iti.IsValidItem(20))
self.assertFalse(iti.IsValidItem(21))
# ----------------------------------------------------------------------
def test_MinMax(self):
iti = IntTypeInfo(-10, 20)
self.assertEqual(iti.Desc, "Integer")
self.assertEqual(iti.ConstraintsDesc, "Value must be >= -10, <= 20")
self.assertEqual(iti.ExpectedType, int)
self.assertTrue(iti.IsValidItem(20))
self.assertTrue(iti.IsValidItem(-10))
self.assertFalse(iti.IsValidItem(-20))
self.assertFalse(iti.IsValidItem(21))
# ----------------------------------------------------------------------
def test_Bytes(self):
self.assertEqual(IntTypeInfo(bytes=1).Min, -128)
self.assertEqual(IntTypeInfo(bytes=1).Max, 127)
self.assertEqual(IntTypeInfo(bytes=1, unsigned=True).Min, 0)
self.assertEqual(IntTypeInfo(bytes=1, unsigned=True).Max, 255)
self.assertEqual(IntTypeInfo(bytes=2).Min, -32768)
self.assertEqual(IntTypeInfo(bytes=2).Max, 32767)
self.assertEqual(IntTypeInfo(bytes=2, unsigned=True).Min, 0)
self.assertEqual(IntTypeInfo(bytes=2, unsigned=True).Max, 65535)
self.assertEqual(IntTypeInfo(bytes=4).Min, -2147483648)
self.assertEqual(IntTypeInfo(bytes=4).Max, 2147483647)
self.assertEqual(IntTypeInfo(bytes=4, unsigned=True).Min, 0)
self.assertEqual(IntTypeInfo(bytes=4, unsigned=True).Max, 4294967295)
self.assertEqual(IntTypeInfo(bytes=8).Min, -9223372036854775808)
self.assertEqual(IntTypeInfo(bytes=8).Max, 9223372036854775807)
self.assertEqual(IntTypeInfo(bytes=8, unsigned=True).Min, 0)
self.assertEqual(IntTypeInfo(bytes=8, unsigned=True).Max, 18446744073709551615)
# ----------------------------------------------------------------------
def test_ConstructErrors(self):
self.assertRaises(Exception, lambda: IntTypeInfo(20, 10))
self.assertRaises(Exception, lambda: IntTypeInfo(bytes=3))
self.assertRaises(Exception, lambda: IntTypeInfo(min=0, max=1000, bytes=1))
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(unittest.main(verbosity=2))
except KeyboardInterrupt: pass
|
py | 1a430188b6587734a3986106d5ce8ceb4379f42b | import matplotlib.pyplot as plt
from celluloid import Camera
import sys
fig = plt.figure()
camera = Camera(fig)
infile = sys.argv[1]
with open(infile, "r") as f:
for line in f.readlines():
plt.plot([float(i.strip()) for i in line.strip()[1:-1].split(",")], c="b")
camera.snap()
animation = camera.animate()
plt.show()
if len(sys.argv) == 3:
animation.save(sys.argv[2])
print(f"animation saved to {sys.argv[2]}")
|
py | 1a4302d4a12fd2a4545876809a00faf71ed6af0d | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wildcard-import,unused-wildcard-import
"""Compat shim for backwards compatability with qiskit.util."""
# The 'qiskit._util' module is deprecated and has been renamed
# 'qiskit.util'. Please update your imports as 'qiskit._util'
# will be removed in Qiskit Terra 0.9.
from qiskit.util import *
|
py | 1a43034b9209cf4657333fed990df0923e24922e | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn.functional as F
import time
import numpy as np
from torch import nn
import sys
# This might convert the output of the head to be usable in FPN or further layers
def unmoldDetections(config, camera, detections, detection_masks, depth_np, unmold_masks=True, debug=False):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
if config.GLOBAL_MASK:
masks = detection_masks[torch.arange(len(detection_masks)).cuda().long(), 0, :, :]
else:
masks = detection_masks[torch.arange(len(detection_masks)).cuda().long(), detections[:, 4].long(), :, :]
pass
final_masks = []
for detectionIndex in range(len(detections)):
box = detections[detectionIndex][:4].long()
if (box[2] - box[0]) * (box[3] - box[1]) <= 0:
continue
mask = masks[detectionIndex]
mask = mask.unsqueeze(0).unsqueeze(0)
mask = F.upsample(mask, size=(box[2] - box[0], box[3] - box[1]), mode='bilinear')
mask = mask.squeeze(0).squeeze(0)
final_mask = torch.zeros(config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM).cuda()
final_mask[box[0]:box[2], box[1]:box[3]] = mask
final_masks.append(final_mask)
continue
final_masks = torch.stack(final_masks, dim=0)
if config.NUM_PARAMETER_CHANNELS > 0:
## We could potentially predict depth and/or normals for each instance (not being used)
parameters_array = detection_masks[torch.arange(len(detection_masks)).cuda().long(), -config.NUM_PARAMETER_CHANNELS:, :, :]
final_parameters_array = []
for detectionIndex in range(len(detections)):
box = detections[detectionIndex][:4].long()
if (box[2] - box[0]) * (box[3] - box[1]) <= 0:
continue
parameters = F.upsample(parameters_array[detectionIndex].unsqueeze(0), size=(box[2] - box[0], box[3] - box[1]), mode='bilinear').squeeze(0)
final_parameters = torch.zeros(config.NUM_PARAMETER_CHANNELS, config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM).cuda()
final_parameters[:, box[0]:box[2], box[1]:box[3]] = parameters
final_parameters_array.append(final_parameters)
continue
final_parameters = torch.stack(final_parameters_array, dim=0)
final_masks = torch.cat([final_masks.unsqueeze(1), final_parameters], dim=1)
pass
masks = final_masks
if 'normal' in config.ANCHOR_TYPE:
## Compute offset based normal prediction and depthmap prediction
ranges = config.getRanges(camera).transpose(1, 2).transpose(0, 1)
zeros = torch.zeros(3, (config.IMAGE_MAX_DIM - config.IMAGE_MIN_DIM) // 2, config.IMAGE_MAX_DIM).cuda()
ranges = torch.cat([zeros, ranges, zeros], dim=1)
if config.NUM_PARAMETER_CHANNELS == 4:
## If we predict depthmap and normal map for each instance, we compute normals again (not used)
masks_cropped = masks[:, 0:1, 80:560]
mask_sum = masks_cropped.sum(-1).sum(-1)
plane_normals = (masks[:, 2:5, 80:560] * masks_cropped).sum(-1).sum(-1) / mask_sum
plane_normals = plane_normals / torch.clamp(torch.norm(plane_normals, dim=-1, keepdim=True), min=1e-4)
XYZ_np_cropped = (ranges * masks[:, 1:2])[:, :, 80:560]
offsets = ((plane_normals.view(-1, 3, 1, 1) * XYZ_np_cropped).sum(1, keepdim=True) * masks_cropped).sum(-1).sum(-1) / mask_sum
plane_parameters = plane_normals * offsets.view((-1, 1))
masks = masks[:, 0]
else:
if config.NUM_PARAMETER_CHANNELS > 0:
## If we predict depthmap independently for each instance, we use the individual depthmap instead of the global depth map (not used)
if config.OCCLUSION:
XYZ_np = ranges * depth_np
XYZ_np_cropped = XYZ_np[:, 80:560]
masks_cropped = masks[:, 1, 80:560]
masks = masks[:, 0]
else:
XYZ_np_cropped = (ranges * masks[:, 1:2])[:, :, 80:560]
masks = masks[:, 0]
masks_cropped = masks[:, 80:560]
pass
else:
## We use the global depthmap prediction to compute plane offsets
XYZ_np = ranges * depth_np
XYZ_np_cropped = XYZ_np[:, 80:560]
masks_cropped = masks[:, 80:560]
pass
if config.FITTING_TYPE % 2 == 1:
## We fit all plane parameters using depthmap prediction (not used)
A = masks_cropped.unsqueeze(1) * XYZ_np_cropped
b = masks_cropped
Ab = (A * b.unsqueeze(1)).sum(-1).sum(-1)
AA = (A.unsqueeze(2) * A.unsqueeze(1)).sum(-1).sum(-1)
plane_parameters = torch.stack([torch.matmul(torch.inverse(AA[planeIndex]), Ab[planeIndex]) for planeIndex in range(len(AA))], dim=0)
plane_offsets = torch.norm(plane_parameters, dim=-1, keepdim=True)
plane_parameters = plane_parameters / torch.clamp(torch.pow(plane_offsets, 2), 1e-4)
else:
## We compute only plane offset using depthmap prediction
plane_parameters = detections[:, 6:9]
plane_normals = plane_parameters / torch.clamp(torch.norm(plane_parameters, dim=-1, keepdim=True), 1e-4)
offsets = ((plane_normals.view(-1, 3, 1, 1) * XYZ_np_cropped).sum(1) * masks_cropped).sum(-1).sum(-1) / torch.clamp(masks_cropped.sum(-1).sum(-1), min=1e-4)
plane_parameters = plane_normals * offsets.view((-1, 1))
pass
pass
detections = torch.cat([detections[:, :6], plane_parameters], dim=-1)
pass
return detections, masks
def planeXYZModule(ranges, planes, width, height, max_depth=10):
"""Compute plane XYZ from plane parameters
ranges: K^(-1)x
planes: plane parameters
Returns:
plane depthmaps
"""
planeOffsets = torch.norm(planes, dim=-1, keepdim=True)
planeNormals = planes / torch.clamp(planeOffsets, min=1e-4)
normalXYZ = torch.matmul(ranges, planeNormals.transpose(0, 1))
normalXYZ[normalXYZ == 0] = 1e-4
planeDepths = planeOffsets.squeeze(-1) / normalXYZ
planeDepths = torch.clamp(planeDepths, min=0, max=max_depth)
return planeDepths.unsqueeze(-1) * ranges.unsqueeze(2)
def planeDepthsModule(ranges, planes, width, height, max_depth=10):
"""Compute coordinate maps from plane parameters
ranges: K^(-1)x
planes: plane parameters
Returns:
plane coordinate maps
"""
planeOffsets = torch.norm(planes, dim=-1, keepdim=True)
planeNormals = planes / torch.clamp(planeOffsets, min=1e-4)
normalXYZ = torch.matmul(ranges, planeNormals.transpose(0, 1))
normalXYZ[normalXYZ == 0] = 1e-4
planeDepths = planeOffsets.squeeze(-1) / normalXYZ
if max_depth > 0:
planeDepths = torch.clamp(planeDepths, min=0, max=max_depth)
pass
return planeDepths
def warpModuleDepth(config, camera, depth_1, features_2, extrinsics_1, extrinsics_2, width, height):
"""Warp one feature map to another view given camera pose and depth"""
padding = (width - height) // 2
XYZ_1 = config.getRanges(camera) * depth_1[padding:-padding].unsqueeze(-1)
warped_features, valid_mask = warpModuleXYZ(config, camera, XYZ_1.unsqueeze(2), features_2, extrinsics_1, extrinsics_2, width, height)
return warped_features.squeeze(0), valid_mask
def warpModuleXYZ(config, camera, XYZ_1, features_2, extrinsics_1, extrinsics_2, width, height):
"""Warp one feature map to another view given camera pose and XYZ"""
XYZ_shape = XYZ_1.shape
numPlanes = int(XYZ_1.shape[2])
XYZ_1 = XYZ_1.view((-1, 3))
XYZ_2 = torch.matmul(torch.matmul(torch.cat([XYZ_1, torch.ones((len(XYZ_1), 1)).cuda()], dim=-1), extrinsics_1.inverse().transpose(0, 1)), extrinsics_2.transpose(0, 1))
validMask = XYZ_2[:, 1] > 1e-4
U = (XYZ_2[:, 0] / torch.clamp(XYZ_2[:, 1], min=1e-4) * camera[0] + camera[2]) / camera[4] * 2 - 1
V = (-XYZ_2[:, 2] / torch.clamp(XYZ_2[:, 1], min=1e-4) * camera[1] + camera[3]) / camera[5] * 2 - 1
padding = (width - height) // 2
grids = torch.stack([U, V], dim=-1)
validMask = (validMask) & (U >= -1) & (U <= 1) & (V >= -1) & (V <= 1)
warped_features = F.grid_sample(features_2[:, :, padding:-padding], grids.unsqueeze(1).unsqueeze(0))
numFeatureChannels = int(features_2.shape[1])
warped_features = warped_features.view((numFeatureChannels, height, width, numPlanes)).transpose(2, 3).transpose(1, 2).transpose(0, 1).contiguous().view((-1, int(features_2.shape[1]), height, width))
zeros = torch.zeros((numPlanes, numFeatureChannels, (width - height) // 2, width)).cuda()
warped_features = torch.cat([zeros, warped_features, zeros], dim=2)
validMask = validMask.view((numPlanes, height, width))
validMask = torch.cat([zeros[:, 1], validMask.float(), zeros[:, 1]], dim=1)
return warped_features, validMask
def calcXYZModule(config, camera, detections, masks, depth_np, return_individual=False, debug_type=0):
"""Compute a global coordinate map from plane detections"""
ranges = config.getRanges(camera)
ranges_ori = ranges
zeros = torch.zeros(3, (config.IMAGE_MAX_DIM - config.IMAGE_MIN_DIM) // 2, config.IMAGE_MAX_DIM).cuda()
ranges = torch.cat([zeros, ranges.transpose(1, 2).transpose(0, 1), zeros], dim=1)
XYZ_np = ranges * depth_np
if len(detections) == 0:
detection_mask = torch.zeros((config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM)).cuda()
if return_individual:
return XYZ_np, detection_mask, []
else:
return XYZ_np, detection_mask
pass
plane_parameters = detections[:, 6:9]
XYZ = torch.ones((3, config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM)).cuda() * 10
depthMask = torch.zeros((config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM)).cuda()
planeXYZ = planeXYZModule(ranges_ori, plane_parameters, width=config.IMAGE_MAX_DIM, height=config.IMAGE_MIN_DIM)
planeXYZ = planeXYZ.transpose(2, 3).transpose(1, 2).transpose(0, 1)
zeros = torch.zeros(3, (config.IMAGE_MAX_DIM - config.IMAGE_MIN_DIM) // 2, config.IMAGE_MAX_DIM, int(planeXYZ.shape[-1])).cuda()
planeXYZ = torch.cat([zeros, planeXYZ, zeros], dim=1)
one_hot = True
if one_hot:
for detectionIndex in range(len(detections)):
mask = masks[detectionIndex]
with torch.no_grad():
mask_binary = torch.round(mask)
pass
if config.FITTING_TYPE >= 2:
if (torch.norm(planeXYZ[:, :, :, detectionIndex] - XYZ_np, dim=0) * mask_binary).sum() / torch.clamp(mask_binary.sum(), min=1e-4) > 0.5:
mask_binary = torch.zeros(mask_binary.shape).cuda()
pass
pass
mask_binary = mask_binary * (planeXYZ[1, :, :, detectionIndex] < XYZ[1]).float()
XYZ = planeXYZ[:, :, :, detectionIndex] * mask_binary + XYZ * (1 - mask_binary)
depthMask = torch.max(depthMask, mask)
continue
XYZ = XYZ * torch.round(depthMask) + XYZ_np * (1 - torch.round(depthMask))
else:
background_mask = torch.clamp(1 - masks.sum(0, keepdim=True), min=0)
all_masks = torch.cat([background_mask, masks], dim=0)
all_XYZ = torch.cat([XYZ_np.unsqueeze(-1), planeXYZ], dim=-1)
XYZ = (all_XYZ.transpose(2, 3).transpose(1, 2) * all_masks).sum(1)
depthMask = torch.ones(depthMask.shape).cuda()
pass
if debug_type == 2:
XYZ = XYZ_np
pass
if return_individual:
return XYZ, depthMask, planeXYZ.transpose(2, 3).transpose(1, 2).transpose(0, 1)
return XYZ, depthMask
class ConvBlock(torch.nn.Module):
"""The block consists of a convolution layer, an optional batch normalization layer, and a ReLU layer"""
def __init__(self, in_planes, out_planes, kernel_size=1, stride=1, padding=0, output_padding=0, mode='conv', use_bn=True):
super(ConvBlock, self).__init__()
self.use_bn = use_bn
if mode == 'conv':
self.conv = torch.nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=not self.use_bn)
elif mode == 'deconv':
self.conv = torch.nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=not self.use_bn)
elif mode == 'upsample':
self.conv = torch.nn.Sequential(torch.nn.Upsample(scale_factor=stride, mode='nearest'), torch.nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=1, padding=padding, bias=not self.use_bn))
elif mode == 'conv_3d':
self.conv = torch.nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=not self.use_bn)
elif mode == 'deconv_3d':
self.conv = torch.nn.ConvTranspose3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=not self.use_bn)
else:
print('conv mode not supported', mode)
exit(1)
pass
if '3d' not in mode:
self.bn = torch.nn.BatchNorm2d(out_planes)
else:
self.bn = torch.nn.BatchNorm3d(out_planes)
pass
self.relu = torch.nn.ReLU(inplace=True)
return
def forward(self, inp):
if self.use_bn:
return self.relu(self.bn(self.conv(inp)))
else:
return self.relu(self.conv(inp))
class LinearBlock(torch.nn.Module):
"""The block consists of a linear layer and a ReLU layer"""
def __init__(self, in_planes, out_planes):
super(LinearBlock, self).__init__()
self.linear = torch.nn.Linear(in_planes, out_planes)
self.relu = torch.nn.ReLU(inplace=True)
return
def forward(self, inp):
return self.relu(self.linear(inp))
def l2NormLossMask(pred, gt, mask, dim):
"""L2 loss with a mask"""
return torch.sum(torch.norm(pred - gt, dim=dim) * mask) / torch.clamp(mask.sum(), min=1)
def l2LossMask(pred, gt, mask):
"""MSE with a mask"""
return torch.sum(torch.pow(pred - gt, 2) * mask) / torch.clamp(mask.sum(), min=1)
def l1LossMask(pred, gt, mask):
"""L1 loss with a mask"""
return torch.sum(torch.abs(pred - gt) * mask) / torch.clamp(mask.sum(), min=1)
def invertDepth(depth, inverse=False):
"""Invert depth or not"""
if inverse:
valid_mask = (depth > 1e-4).float()
depth_inv = 1.0 / torch.clamp(depth, min=1e-4)
return depth_inv * valid_mask
else:
return depth
class PlaneToDepth(torch.nn.Module):
def __init__(self, normalized_K = True, normalized_flow = True, inverse_depth = True, W = 64, H = 48):
super(PlaneToDepth, self).__init__()
self.normalized_K = normalized_K
self.normalized_flow = normalized_flow
self.inverse_depth = inverse_depth
with torch.no_grad():
self.URANGE = ((torch.arange(W).float() + 0.5) / W).cuda().view((1, -1)).repeat(H, 1)
self.VRANGE = ((torch.arange(H).float() + 0.5) / H).cuda().view((-1, 1)).repeat(1, W)
self.ONES = torch.ones((H, W)).cuda()
pass
def forward(self, intrinsics, plane, return_XYZ=False):
"""
:param K1: intrinsics of 1st image, 3x3
:param K2: intrinsics of 2nd image, 3x3
:param depth: depth map of first image, 1 x height x width
:param rot: rotation from first to second image, 3
:param trans: translation from first to second, 3
:return: normalized flow from 1st image to 2nd image, 2 x height x width
"""
with torch.no_grad():
urange = (self.URANGE * intrinsics[4] - intrinsics[2]) / intrinsics[0]
vrange = (self.VRANGE * intrinsics[5] - intrinsics[3]) / intrinsics[1]
ranges = torch.stack([urange,
self.ONES,
-vrange], -1)
pass
planeOffset = torch.norm(plane, dim=-1)
planeNormal = plane / torch.clamp(planeOffset.unsqueeze(-1), min=1e-4)
depth = planeOffset / torch.clamp(torch.sum(ranges.unsqueeze(-2) * planeNormal, dim=-1), min=1e-4)
depth = torch.clamp(depth, min=0, max=10)
if self.inverse_depth:
depth = invertDepth(depth)
depth = depth.transpose(1, 2).transpose(0, 1)
if return_XYZ:
return depth, depth.unsqueeze(-1) * ranges
return depth
class PlaneToDepthLayer(torch.nn.Module):
def __init__(self, normalized_K = False, normalized_flow = True, inverse_depth = True):
super(PlaneToDepthLayer, self).__init__()
self.plane_to_depth = PlaneToDepth(normalized_K = normalized_K,
normalized_flow = normalized_flow,
inverse_depth = inverse_depth)
def forward(self, intrinsics, plane, mask):
"""
:param K1: 3x3 if shared_K is True, otherwise K1 is nx3x3
:param K2: 3x3 if shared_K is True, otherwise K2 is nx3x3
:param depth: n x 1 x h x w
:param rot: n x 3
:param trans: n x3
:param shared_K: if True, we share intrinsics for the depth images of the whole batch
:return: n x 2 x h x w
"""
batch_size = plane.size(0)
depths = ()
for i in range(batch_size):
depth = self.plane_to_depth(intrinsics[i], plane[i], mask[i])
depths += (depth, )
depth = torch.stack(depths, 0)
return depth
|
py | 1a4303d00d17573c5e1afcbfe763902d17153727 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Deferred initialization of tf.Modules (distributions, bijectors, etc.)."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.util import special_methods
class DeferredModule(tf.Module, special_methods.SpecialMethods):
"""Wrapper to defer initialization of a `tf.Module` instance.
`DeferredModule` is a general-purpose mechanism for creating objects that are
'tape safe', meaning that computation occurs only when an instance
method is called, not at construction. This ensures that method calls inside
of a `tf.GradientTape` context will produce gradients to any underlying
`tf.Variable`s.
### Examples
TFP's built-in Distributions and Bijectors are tape-safe by contract, but
this does not extend to cases where computation is required
to construct an object's parameters prior to initialization.
For example, suppose we want to construct a Gamma
distribution with a given mean and variance. In a naive implementation,
we would convert these to the Gamma's native `concentration` and
`rate` parameters when the distribution is constructed. Any future method
calls would produce gradients to `concentration` and `rate`, but not to the
underlying mean and variance:
```python
mean, variance = tf.Variable(3.2), tf.Variable(9.1)
dist = tfd.Gamma(concentration=mean**2 / variance,
rate=mean / variance)
with tf.GradientTape() as tape:
lp = dist.log_prob(5.0)
grads = tape.gradient(lp, [mean, variance])
# ==> `grads` are `[None, None]` !! :-(
```
To preserve the gradients, we can defer the parameter transformation using
`DeferredModule`. The resulting object behaves just like a
`tfd.Gamma` instance, however, instead of running the `Gamma` constructor just
once, it internally applies the parameter transformation and constructs a
new, temporary instance of `tfd.Gamma` on *every method invocation*.
This ensures that all operations needed to compute a method's return value
from any underlying variables are performed every time the method is invoked.
A surrounding `GradientTape` context will therefore be able to trace the full
computation.
```python
def gamma_from_mean_and_variance(mean, variance, **kwargs):
rate = mean / variance
return tfd.Gamma(concentration=mean * rate, rate=rate, **kwargs)
mean, variance = tf.Variable(3.2), tf.Variable(9.1)
deferred_dist = tfp.experimental.util.DeferredModule(
build_fn=gamma_from_mean_and_variance,
mean=mean, # May be passed by position or by name.
variance=variance)
with tf.GradientTape() as tape:
lp = deferred_dist.log_prob(5.0)
grads = tape.gradient(lp, [mean, variance])
# ==> `grads` are defined!
```
Note that we could have achieved a similar effect by using
`tfp.util.DeferredTensor` to individually defer the `concentration` and `rate`
parameters. However, this would have been significantly more verbose, and
would not share any computation between the two parameter transformations.
In general, `DeferredTensor` is often idiomatic for simple transformations of
a single value, while `DeferredModule` may be preferred for transformations
that operate on multiple values and/or contain multiple steps.
### Caveats
Objects derived from a `DeferredModule` are no longer deferred, so
they will not preserve gradients. For example, slicing into a deferred
Distribution yields a new, concrete Distribution instance:
```python
def normal_from_log_scale(scaled_loc, log_scale):
return tfd.Normal(loc=5 * scaled_loc, scale=tf.exp(log_scale))
dist = tfp.experimental.util.DeferredModule(
build_fn=normal_from_log_scale,
scaled_loc=tf.Variable([1., 2., 3.]),
log_scale=tf.Variable([1., 1., 1.]))
dist.batch_shape # ==> [3]
len(dist.trainable_variables) # ==> 2
slice = dist[:2] # Instantiates a new, non-deferred Distribution.
slice.batch_shape # ==> [2]
len(slice.trainable_variables) # ==> 0 (!)
# If needed, we could defer the slice with another layer of wrapping.
deferred_slice = tfp.experimental.util.DeferredModule(
build_fn=lambda d: d[:2],
d=dist)
len(deferred_slice.trainable_variables) # ==> 2
```
"""
def __init__(self, build_fn, *args, also_track=None, **kwargs):
"""Defers initialization of an object with transformed arguments.
Args:
build_fn: Python callable specifying a deferred transformation of the
provided arguments. This must have signature
`module = build_fn(*args, **kwargs)`. The return value `module` is an
instance of `tf.Module`.
*args: Optional positional arguments to `build_fn`.
also_track: Optional instance or structure of instances of `tf.Variable`
and/or `tf.Module`, containing any additional trainable variables that
the `build_fn` may access beyond the given `args` and `kwargs`. This
ensures that such variables will be correctly tracked in
`self.trainable_variables`.
Default value: `None`.
**kwargs: Optional keyword arguments to `build_fn`.
"""
self._build_fn = build_fn
self._param_args = args
self._param_kwargs = kwargs
self._deferred_module_also_track = also_track
# In order for DeferredModule to work as a tf.Module, we need to ensure that
# attrs used by tf.Module are handled directly, rather than being forwarded
# to the inner class.
self._module_attrs = set(dir(tf.Module()))
super(DeferredModule, self).__init__()
def __action__(self, fn, *args, **kwargs):
kwargs.pop('_action_name', None)
return fn(self._build_module(), *args, **kwargs)
def _build_module(self):
return self._build_fn(*self._param_args, **self._param_kwargs)
def __getattr__(self, attr, **kwargs):
if attr in ('_build_fn',
'_param_args',
'_param_kwargs',
'_module_attrs',
'_deferred_module_also_track'):
raise AttributeError()
if attr in self._module_attrs:
raise AttributeError()
return super(DeferredModule, self).__getattr__(attr, **kwargs)
|
py | 1a4305e3b1feb05a4d0df9fa99f3a1346efefc9f | import itertools
import os
import re
import subprocess
import sys
import requests
import shutil
from shutil import make_archive
from zipfile import ZipFile
# helper functions
def replace(string, substitutions):
substrings = sorted(substitutions, key=len, reverse=True)
regex = re.compile('|'.join(map(re.escape, substrings)))
return regex.sub(lambda match: substitutions[match.group(0)], string)
def upload_file(file, connection):
ssh_user, host, path = connection
destination = f'{ssh_user}@{host}:{path}'
p = subprocess.Popen(["scp", f'{str(file)}', destination])
sts = os.waitpid(p.pid, 0)
print(sts)
def write_config_to_file(content, filepath):
# Writing to file
with open(filepath, "w") as file:
# Writing data to a file
file.write(content)
def make_archive(source, destination, format='zip'):
import os
import shutil
from shutil import make_archive
base, name = os.path.split(destination)
archive_from = os.path.dirname(source)
archive_to = os.path.basename(source.strip(os.sep))
print(f'Source: {source}\nDestination: {destination}\nArchive From: {archive_from}\nArchive To: {archive_to}\n')
shutil.make_archive(name, format, archive_from, archive_to)
shutil.move('%s.%s' % (name, format), destination)
############### Begin Config ###############################
vpn_servers_dict = dict(
softy1={"hostname": "softy1.example.com", "ip": "", "country": "NL", "city": "AMSTERDAM"},
softy3={"hostname": "softy3.example.com", "ip": "", "country": "US", "city": "MIAMI"},
softy5={"hostname": "softy5.example.com", "ip": "", "country": "US", "city": "New Jersey"},
softy15={"hostname": "softy15.example.com", "ip": "", "country": "NL", "city": "AMSTERDAM"},
softy16={"hostname": "softy16.example.com", "ip": "", "country": "CA", "city": "Toronto"})
output_folder_name = os.path.expanduser("~/Desktop/softy_test_generation")
viscosity_profiles_folder_name = 'vpn.wts.com.config_files_visc'
openvpn_profiles_folder_name = 'vpn.wts.com.config_files'
softethervpn_profiles_folder_name = 'SoftEtherVPN_Profiles'
# VPN protocols
protocols = ['udp', 'tcp']
# VPN listener ports
listener_ports = ['443', '992', '1194']
# Typically Signed Cert for VPN server or if self signed
# ca_crt = """
# -----BEGIN CERTIFICATE-----
# SOME BIGASS CERT WOULD HERE
# -----END CERTIFICATE-----
# """
ca_crt = """
"""
# Dummy client cert and key for compatibility
client_cert = """
-----BEGIN CERTIFICATE-----
MIIF1jCCA76gAwIBAgIBADANBgkqhkiG9w0BAQsFADBqMR0wGwYDVQQDDBQxNzUx
NjgxNzkxNzcyNTg2MzMxMDEdMBsGA1UECgwUMTc1MTY4MTc5MTc3MjU4NjMzMTAx
HTAbBgNVBAsMFDE3NTE2ODE3OTE3NzI1ODYzMzEwMQswCQYDVQQGEwJVUzAeFw0x
ODA5MDExNDMxMDdaFw0zNzEyMzExNDMxMDdaMGoxHTAbBgNVBAMMFDE3NTE2ODE3
OTE3NzI1ODYzMzEwMR0wGwYDVQQKDBQxNzUxNjgxNzkxNzcyNTg2MzMxMDEdMBsG
A1UECwwUMTc1MTY4MTc5MTc3MjU4NjMzMTAxCzAJBgNVBAYTAlVTMIICIjANBgkq
hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0uqW569ugET53flkstDkumb2HG1yqRQk
NbUr8pbGHn46mEtnqPZd/UJJ7ykjfKxcnG/emfInbyfonrIYjY94lWgij6ZhaGaT
8CKhoiSmAXKMlrlP+fqj+Z4K4caknJqRiFMa1eC/3Nu/jfv8upo1ixo1HFTZ7T4G
P3Oh2Rqd05Zz+ZsgrEWPzBVMuJxDMrIZ1WXFJrGJf8vhRIZtl5+TYDi3x8G5VmqA
k8LtBRAZPHMnEpp5s2Zh1dl8SuwdpU7PD33yM9T39Lyy12MNOEGgZw/BdpFuLAk1
EwBHV6xS8MMibD3mXxd4CZuhDqvBJn9QgT6gjllpPWJ0Wi69qtHnU87Cw44lPBSz
pmWcKlip6AgNJBQIASA4kGZpir6opFSW0bi5xN1UQO68F3kZ7GLyClJVJo/xBU7G
M/2XmYjnqrRgDfsotPnzURPpgrC3oVHjP9jGmLj32sFuAYBXA15aljeISsmkSThQ
fcyXdGo+BT5KGVhFBr+2rkNOqNiWeINzE24JBlH/pJCEg3xjEG1kAbqJqeerl+kM
tNgCKZhARL4XFEl9j0FP87eV75N/dqPvm9R+jgvVLPDk94aktmoalWHjIwkWXMDY
0ClmawsGpgEoyJdQfqwoi8sDpMnC/kokgEwoDT2QbPzYIMnzIqKBlY8CWLy/qbIo
h3KOzcY9XbECAwEAAaOBhjCBgzAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB
9jBjBgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMDBggrBgEF
BQcDBAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcDBwYIKwYBBQUHAwgGCCsG
AQUFBwMJMA0GCSqGSIb3DQEBCwUAA4ICAQAHSfAzAbOE8+1fz7o5mSIozmQbzKbg
z2nXKZTimhtVv91Npmdlchin3+t+OM+5CmyEBKJGO0JtMR2s4vGQ23OXgWU1lDeX
W9b9d+0oBGrhl/dQRvlKrCFUH6SoRJ9Go2AWtt91eUaK2Wb/39Ek4hcr+5U26H5u
erzHl1UGyGk34GCiUUK25IjiTk0YyHrcGn9mk9ZsX8pmgcgsOUxwnC66J+vEPl4q
mWqL29hhHSPL/y/ta1hg/DtGGIZJrCHLOFDB/kHXVbOoKsPVOwR5MKKeBtyX3so6
wAerrgnzfsZ52OhZTVet+82nN9pCNASEZVqCZHqS7QPtqUYQYMMpXNWdIOpFmgH9
bDGI6LeEApaxlODv7u0s61DugTbkNco/Yi+YpwkxnqXQCEG7jJbq+LKVSG7z9FIG
fAxT2jIiMy4a7fKH+n4hldiTbdtI4042WFE1LBbEABRu59gsy0r0kIHFTU+npjDO
AbSogPIze/jYtDXu8RpukujcF5abqCMxogiibil/xFq3wVujhe9/nRCioo7k29Ge
B1zmbU0PJQQk70Dtor7LyEnhlsQEjMR8Usod/GKyi2dUT9/osCAYPyvK3SaCWUUv
s1e/Ua39l57406nV/3p32c0GAhnL3cujl2tO/Y4eBUYt2X9Sk8BsH+Huo6+X6pIR
WUlv/XLFpC359g==
-----END CERTIFICATE-----
"""
client_key = """
-----BEGIN PRIVATE KEY-----
MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDS6pbnr26ARPnd
+WSy0OS6ZvYcbXKpFCQ1tSvylsYefjqYS2eo9l39QknvKSN8rFycb96Z8idvJ+ie
shiNj3iVaCKPpmFoZpPwIqGiJKYBcoyWuU/5+qP5ngrhxqScmpGIUxrV4L/c27+N
+/y6mjWLGjUcVNntPgY/c6HZGp3TlnP5myCsRY/MFUy4nEMyshnVZcUmsYl/y+FE
hm2Xn5NgOLfHwblWaoCTwu0FEBk8cycSmnmzZmHV2XxK7B2lTs8PffIz1Pf0vLLX
Yw04QaBnD8F2kW4sCTUTAEdXrFLwwyJsPeZfF3gJm6EOq8Emf1CBPqCOWWk9YnRa
Lr2q0edTzsLDjiU8FLOmZZwqWKnoCA0kFAgBIDiQZmmKvqikVJbRuLnE3VRA7rwX
eRnsYvIKUlUmj/EFTsYz/ZeZiOeqtGAN+yi0+fNRE+mCsLehUeM/2MaYuPfawW4B
gFcDXlqWN4hKyaRJOFB9zJd0aj4FPkoZWEUGv7auQ06o2JZ4g3MTbgkGUf+kkISD
fGMQbWQBuomp56uX6Qy02AIpmEBEvhcUSX2PQU/zt5Xvk392o++b1H6OC9Us8OT3
hqS2ahqVYeMjCRZcwNjQKWZrCwamASjIl1B+rCiLywOkycL+SiSATCgNPZBs/Ngg
yfMiooGVjwJYvL+psiiHco7Nxj1dsQIDAQABAoICAFXgZmkXKOrZKtOSg8m6/CZZ
XcPdXF4zcTrc9XPkp+4qfzkbGq3VAhfoMapLGcPdeifH9N7BlgTQPwq+gPjCfdp3
d/r9R5P2kC2qLB2UxnK4bT3BXiruPm2YR939v3B0DuVu0PJcfEI0xx3Mh+6Cc2Kb
3RwYAFN0eZ7EOhXnnHNWQwpSe08pU49I7OAN3954XcRhl5BVoSKDpMj94wllU+BN
t6aB3jCtVITVTSROlbfjOvl8JClDenpT/yOSV8/C8tPf+AnaoMrpOfgwwGUzc1gD
NkZMmIsdhJqj9mhgJbUZ/p4L5gy5xYpD76PFkvNVyzWUhlLvXMY9nBpMBbXTmVfs
/mSUm4pj7t8E2GM3coZCspGAXMG+5oo7EsYX1tGEe6a0EI5Z7TLI9IJ018R2HlTf
Y0tToZx8wcqfxwCmazRo2wUq5G2cOGk0mBlkqqTvu7VIKfI9LFU6vC68KNDLvq2T
9WXjRHNAUlMZyTd2Q8fj6IOB9s8PaYnn/cjaz6icNgD8JqjznffREUU3WCTQ0b6N
cdfnn/+V1KotUvVnPZy+7m+2y3vsNjKvx913zNgaoHKyzj5z2zREWBR+wPWG1Mdj
ybcZHDwiHD9Pj5jFwVAGlAo4U4VtBZmVK3//j67pHul5IIsMjfoUJpvHaTRZAUyj
Z4rH05Btlz8Dg5LR0RgBAoIBAQDvIRzW5LRXyVOIbET+1mRr/3ak7bk6FE/KHDb4
5EJ7kuJX7iI2km+TsPYNkw9ncUbTEjE0D1g9CikaBdZVCIQJcG0LNcP4ZJTWdst2
0F6nyzNiIim/nAIt9KQaMzg4up8T6Rj3dl9gEQlO4ABtsIzR/1M2oeWFkNsPr0xM
SXv7c14lQnZYGUVEHknTPFvFHatIsBeRbw8zHNuplCOtixE7/N56W6LZNTXr3O7s
gw6puLkHiDS6qkQJKl1UfuCECFhELUIWTAbUm/zzV+LLzlcqNciTWeIuY38EvloB
y/ZUYcxN7NH7OiLzkYObPlOHkMpc8D/up84FQOOaH1SaVH4RAoIBAQDhy+zp1Ein
hz/VLCJyEAPiu3noE9ocelrrnRkIm5OWp1gHlJ7nuvru/HBEjgbeseS2opn1t9ia
LKQRLoVAa/XapEdtpn5Vz06N8ksxbA+kI75LcH0uaCQvEkZ8XQBn+vpz1eZUtXfm
2ojAhkhb7AGAb2Os9G/MRjNufiiymBeMbN+rBOlKnO2GkMLiUSWfzsRb0Z1zmCSS
l1EpPP3zvEMo9wiDxYdUZdBGCbF83cimNMM5oaSgjLoD5JObSBA7FpeZWCOd/Obr
bZYLDH7BXj5qif+NLQKj9lY8dJjr9r0e9TfZct+lt/eCa1bvzNWH8sYCGaLE+c7Y
2O/DYY8lp8WhAoIBADOawr2BR4X2VYeITe9s1ukTjUgUYTeucHWeVyKpJ8vBLAVr
x4hdW6TUuJS/WsCpCm68a0/fy9wIWExcXB+noc7jqzSTGsJ8+j26DziJyROO33zb
AIVwJmxCcjORQB3F8FR8pj2pFvYFVyvlXVJOmmUrI4sTrxN+6jddTircZNwjznpd
+GoUsgT8QFRMn0VPnMon7j4daHtQS/sxk+18qHB4po5jPiZ/vC41VH5H3h80VQHS
HAb4fYw4z0FPzCXSIzI7Thq/t22kaBcfrTrsQZVDXBCogg4evKeKaKQgnv0gydU7
OSltJ2PG0E7tSVtlHLanxjQ7lFM/6J43CqvvdfECggEASUIPgKIGXIxOEoy6NEWr
7REkcT6Xomu0OgODTr6jONrcfcEjeU26AnXWuvdVlUUkTnkc+JMIbKUVKhns08Tz
RFfOcO32yUJ0WyuEZ+mGfZu3LuS1SKwzKS6Fve2ypwnP3mtEyrEB0N2QRt6KdYBx
0EjTjxbTevQ/1ZaK/77GzSG5w9PZGQMnMWSgRitLyLieDqhIrGttWj5L79RBFKY9
J/pWQeKBkMljtIWKl1ehtQMjX/xo3EosQ/0SQuwzj+g5kV/+VlDqXvH0H2uTaIt6
NrjFN/mlhKr0ubKettgb7gJjd2KE21B/tkm7MBxGH1COG7pTjBL8oHBqAfsSJYZ0
YQKCAQB9KPikirwQdk/9ErcBoAC8LdwdEH3fA+uZaOPt9Xlwbg5c3zKezlfWq8Xh
sJpPym1a8BQGGeR/g7SLcEuJ8Bjqd4x+XbvFm21wI2VFJQ1qc9zvdkWB2T7iOuXD
idHjaW1WqcXy7RCzl18nvcglQ6UzLO5XOTAEKHqQ5euXNgoOtRWXUZ+F6aCVtfO3
+pMvFSRqMhOPxI0T7N1BUlPIm3TNKdYIlsjnffti5uxEDW/7nv6mx8L+9bromkd7
6dgQVe8brlZg7wkF3z03oY7asVvk9uYEuswh3ypz83NJNy6EK+qv5jQieOKqIoNR
bXQXJZcDGE13qAJownesrS5a6L/r
-----END PRIVATE KEY-----
"""
openvpn_config_template = f"""###############################################################################
# OpenVPN 2.0 Sample Configuration File
# for PacketiX VPN / SoftEther VPN Server
#
# !!! AUTO-GENERATED BY SOFTETHER VPN SERVER MANAGEMENT TOOL !!!
#
# !!! YOU HAVE TO REVIEW IT BEFORE USE AND MODIFY IT AS NECESSARY !!!
#
# This configuration file is auto-generated. You might use this config file
# in order to connect to the PacketiX VPN / SoftEther VPN Server.
# However, before you try it, you should review the descriptions of the file
# to determine the necessity to modify to suitable for your real environment.
# If necessary, you have to modify a little adequately on the file.
# For example, the IP address or the hostname as a destination VPN Server
# should be confirmed.
#
# Note that to use OpenVPN 2.0, you have to put the certification file of
# the destination VPN Server on the OpenVPN Client computer when you use this
# config file. Please refer the below descriptions carefully.
###############################################################################
# Specify the type of the layer of the VPN connection.
#
# To connect to the VPN Server as a "Remote-Access VPN Client PC",
# specify 'dev tun'. (Layer-3 IP Routing Mode)
#
# To connect to the VPN Server as a bridging equipment of "Site-to-Site VPN",
# specify 'dev tap'. (Layer-2 Ethernet Bridging Mode)
dev tun
###############################################################################
# Specify the underlying protocol beyond the Internet.
# Note that this setting must be correspond with the listening setting on
# the VPN Server.
#
# Specify either 'proto tcp' or 'proto udp'.
proto udp
###############################################################################
# The destination hostname / IP address, and port number of
# the target VPN Server.
#
# You have to specify as 'remote <HOSTNAME> <PORT>'. You can also
# specify the IP address instead of the hostname.
#
# Note that the auto-generated below hostname are a "auto-detected
# IP address" of the VPN Server. You have to confirm the correctness
# beforehand.
#
# When you want to connect to the VPN Server by using TCP protocol,
# the port number of the destination TCP port should be same as one of
# the available TCP listeners on the VPN Server.
#
# When you use UDP protocol, the port number must same as the configuration
# setting of "OpenVPN Server Compatible Function" on the VPN Server.
# Note: The below hostname is came from the Dynamic DNS Client function
# which is running on the VPN Server. If you don't want to use
# the Dynamic DNS hostname, replace it to either IP address or
# other domain's hostname.
VPN_SERVER_REMOTES
###############################################################################
# The HTTP/HTTPS proxy setting.
#
# Only if you have to use the Internet via a proxy, uncomment the below
# two lines and specify the proxy address and the port number.
# In the case of using proxy-authentication, refer the OpenVPN manual.
;http-proxy-retry
;http-proxy [proxy server] [proxy port]
###############################################################################
# The encryption and authentication algorithm.
#
# Default setting is good. Modify it as you prefer.
# When you specify an unsupported algorithm, the error will occur.
#
# The supported algorithms are as follows:
# cipher: [NULL-CIPHER] NULL AES-128-CBC AES-192-CBC AES-256-CBC BF-CBC
# CAST-CBC CAST5-CBC DES-CBC DES-EDE-CBC DES-EDE3-CBC DESX-CBC
# RC2-40-CBC RC2-64-CBC RC2-CBC CAMELLIA-128-CBC CAMELLIA-192-CBC CAMELLIA-256-CBC
# auth: SHA SHA1 SHA256 SHA384 SHA512 MD5 MD4 RMD160
cipher AES-256-CBC
auth SHA384
###############################################################################
# Other parameters necessary to connect to the VPN Server.
#
# It is not recommended to modify it unless you have a particular need.
#block-outside-dns
resolv-retry infinite
nobind
persist-key
persist-tun
client
verb 3
auth-user-pass
###############################################################################
# The certificate file of the destination VPN Server.
#
# The CA certificate file is embedded in the inline format.
# You can replace this CA contents if necessary.
# Please note that if the server certificate is not a self-signed, you have to
# specify the signer's root certificate (CA) here.
<ca>{ca_crt}
</ca>
###############################################################################
# The client certificate file (dummy).
#
# In some implementations of OpenVPN Client software
# (for example: OpenVPN Client for iOS),
# a pair of client certificate and private key must be included on the
# configuration file due to the limitation of the client.
# So this sample configuration file has a dummy pair of client certificate
# and private key as follows.
<cert>{client_cert}
</cert>
<key>{client_key}
</key>
"""
viscosity_template = """#-- Config Auto Generated By Viscosity --#
#viscosity name VPN_CONNECTION_NAME
#viscosity autoreconnect true
#viscosity dns automatic
#viscosity usepeerdns true
#viscosity manageadapter true
#viscosity startonopen false
VPN_SERVER_REMOTES
dev tun
nobind
persist-key
persist-tun
pull
tls-client
auth-user-pass
ca ca.crt
cert cert.crt
key key.key
cipher AES-256-CBC
auth SHA384
resolv-retry infinite
dev-node {8FA040D2-FDB9-48ED-8C14-FD257DCD22E6}
"""
# Special Note: Auth Type for ldap/radius is "2" if your using basic username and pass you may need to change that
# in the template.
softethervpn_template = """# VPN Client VPN Connection Setting File
#
# This file is exported using the VPN Client Manager.
# The contents of this file can be edited using a text editor.
#
# When this file is imported to the Client Connection Manager
# it can be used immediately.
declare root
{
bool CheckServerCert false
uint64 CreateDateTime 0
uint64 LastConnectDateTime 0
bool StartupAccount false
uint64 UpdateDateTime 0
declare ClientAuth
{
uint AuthType 2
byte EncryptedPassword $
string Username $
}
declare ClientOption
{
string AccountName CONNECTION_NAME
uint AdditionalConnectionInterval 1
uint ConnectionDisconnectSpan 0
string DeviceName VPN
bool DisableQoS false
bool HalfConnection false
bool HideNicInfoWindow true
bool HideStatusWindow false
string Hostname HOSTNAME_PLACEHOLDER/tcp
string HubName VPN
uint MaxConnection 8
bool NoRoutingTracking false
bool NoTls1 false
bool NoUdpAcceleration false
uint NumRetry 4294967295
uint Port 443
uint PortUDP 0
string ProxyName $
byte ProxyPassword $
uint ProxyPort 0
uint ProxyType 0
string ProxyUsername $
bool RequireBridgeRoutingMode false
bool RequireMonitorMode false
uint RetryInterval 5
bool UseCompress false
bool UseEncrypt true
}
}
"""
############### END Config ###############################
def generate_multiline_str_from_list(list):
return "\n".join(list)
def generate_openvpn_remote_combos(server_hosts, listener_ports, protocols):
# build a list of all remotes for hostname/ip and port/protocol combinations
list = [server_hosts, listener_ports, protocols]
combination = [p for p in itertools.product(*list)]
# print(combination)
server_remotes = []
for item in combination:
host, port, protocol = item
remote = f"remote {host} {port} {protocol}"
server_remotes.append(remote)
return server_remotes
def generate_openvpn_configs(compress=True):
for server in vpn_servers_dict:
hostname, ip, country, city = vpn_servers_dict[server].values()
# print(vpn_servers_dict[server])
filename = f"{hostname}_{country}_{city}.ovpn"
print(filename)
server_hosts = [hostname, ip]
server_remotes = generate_openvpn_remote_combos(server_hosts, listener_ports, protocols)
vpn_server_remotes = generate_multiline_str_from_list(server_remotes)
substitutions = {"VPN_SERVER_REMOTES": vpn_server_remotes}
content = replace(openvpn_config_template, substitutions)
ovpn_folder = os.path.join(output_folder_name, openvpn_profiles_folder_name)
path = os.path.join(ovpn_folder, filename)
if not os.path.exists(output_folder_name):
os.makedirs(output_folder_name, exist_ok=True)
if not os.path.exists(ovpn_folder):
os.makedirs(ovpn_folder, exist_ok=True)
write_config_to_file(content, path)
print(f'VPN Config written: {path}')
print()
if compress:
# Individually ZIP each connection into its own archive
make_archive(ovpn_folder, str(os.path.join(output_folder_name, f'{ovpn_folder}.zip')))
generate_openvpn_configs()
def generate_viscosity_openvpn_configs(compress=None):
for server in vpn_servers_dict:
hostname, ip, country, city = vpn_servers_dict[server].values()
connection_name = f"{hostname}_{country}_{city}"
print(connection_name)
server_hosts = [hostname, ip]
server_remotes = generate_openvpn_remote_combos(server_hosts, listener_ports, protocols)
vpn_server_remotes = generate_multiline_str_from_list(server_remotes)
substitutions = {"VPN_SERVER_REMOTES": vpn_server_remotes, "VPN_CONNECTION_NAME": connection_name}
content = replace(viscosity_template, substitutions)
visc_folder = os.path.join(output_folder_name, viscosity_profiles_folder_name)
config_folder_path = os.path.join(visc_folder, f'{connection_name}.visc')
paths = [output_folder_name, visc_folder, config_folder_path]
for path in paths:
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
# connection_name/config.conf
path = os.path.join(config_folder_path, 'config.conf')
write_config_to_file(content, path)
# connection_name/ca.crt
ca_crt_path = os.path.join(config_folder_path, 'ca.crt')
write_config_to_file(ca_crt, ca_crt_path)
# connection_name/cert.crt
client_cert_path = os.path.join(config_folder_path, 'cert.crt')
write_config_to_file(content, client_cert_path)
# connection_name/key.key
client_key_path = os.path.join(config_folder_path, 'key.key')
write_config_to_file(content, client_key_path)
print(f'VPN Config written: {config_folder_path}')
print()
if compress == 'single':
# Individually ZIP each connection into its own archive
make_archive(config_folder_path, str(f'{config_folder_path}.zip'))
if compress == 'all':
# Zip all Viscosity folders into a single ZIP archive
softethervpn_all_profiles = os.path.join(output_folder_name, viscosity_profiles_folder_name)
make_archive(softethervpn_all_profiles,
str(os.path.join(output_folder_name, f'{viscosity_profiles_folder_name}.zip')))
generate_viscosity_openvpn_configs('all')
def generate_softethervpn_configs(compress=True):
for server in vpn_servers_dict:
hostname, ip, country, city = vpn_servers_dict[server].values()
filename = f"{hostname}_{country}_{city}.vpn"
print(filename)
substitutions = {"CONNECTION_NAME": str(filename).replace('.vpn', ''), "HOSTNAME_PLACEHOLDER": hostname}
content = replace(softethervpn_template, substitutions)
folder = os.path.join(output_folder_name, softethervpn_profiles_folder_name)
path = os.path.join(folder, filename)
if not os.path.exists(output_folder_name):
os.makedirs(output_folder_name, exist_ok=True)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
write_config_to_file(content, path)
print(f'VPN Config written: {path}')
print()
if compress:
# Individually ZIP each connection into its own archive
make_archive(folder, str(os.path.join(output_folder_name, f'{softethervpn_profiles_folder_name}.zip')))
generate_softethervpn_configs()
|
py | 1a43062e126707af011cba372e1aec94dc192980 | #
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.config
from libqtile import layout
from libqtile.confreader import Config
from test.conftest import no_xinerama
from test.layouts.layout_utils import assert_focus_path, assert_focused
class ColumnsConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Columns(num_columns=3),
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
screens = []
follow_mouse_focus = False
def columns_config(x):
return no_xinerama(pytest.mark.parametrize("manager", [ColumnsConfig], indirect=True)(x))
# This currently only tests the window focus cycle
@columns_config
def test_columns_window_focus_cycle(manager):
# setup 3 tiled and two floating clients
manager.test_window("one")
manager.test_window("two")
manager.test_window("three")
manager.test_window("float1")
manager.c.window.toggle_floating()
manager.test_window("float2")
manager.c.window.toggle_floating()
manager.test_window("four")
# test preconditions, columns adds clients at pos after current, in two stacks
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['one']
assert columns[1]['clients'] == ['two']
assert columns[2]['clients'] == ['four', 'three']
# last added window has focus
assert_focused(manager, "four")
# assert window focus cycle, according to order in layout
assert_focus_path(manager, 'three', 'float1', 'float2', 'one', 'two', 'four')
@columns_config
def test_columns_swap_column_left(manager):
manager.test_window("1")
manager.test_window("2")
manager.test_window("3")
manager.test_window("4")
# test preconditions
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['1']
assert columns[1]['clients'] == ['2']
assert columns[2]['clients'] == ['4', '3']
assert_focused(manager, "4")
# assert columns are swapped left
manager.c.layout.swap_column_left()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['1']
assert columns[1]['clients'] == ['4', '3']
assert columns[2]['clients'] == ['2']
manager.c.layout.swap_column_left()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['4', '3']
assert columns[1]['clients'] == ['1']
assert columns[2]['clients'] == ['2']
manager.c.layout.swap_column_left()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['2']
assert columns[1]['clients'] == ['1']
assert columns[2]['clients'] == ['4', '3']
@columns_config
def test_columns_swap_column_right(manager):
manager.test_window("1")
manager.test_window("2")
manager.test_window("3")
manager.test_window("4")
# test preconditions
assert manager.c.layout.info()['columns'][0]['clients'] == ['1']
assert manager.c.layout.info()['columns'][1]['clients'] == ['2']
assert manager.c.layout.info()['columns'][2]['clients'] == ['4', '3']
assert_focused(manager, "4")
# assert columns are swapped right
manager.c.layout.swap_column_right()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['4', '3']
assert columns[1]['clients'] == ['2']
assert columns[2]['clients'] == ['1']
manager.c.layout.swap_column_right()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['2']
assert columns[1]['clients'] == ['4', '3']
assert columns[2]['clients'] == ['1']
manager.c.layout.swap_column_right()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['2']
assert columns[1]['clients'] == ['1']
assert columns[2]['clients'] == ['4', '3']
|
py | 1a43067be215f7e1ee9c3963da0cfae66c30a905 | from __future__ import print_function, division, absolute_import
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
import imgaug.augmenters.size as iaa_size
from imgaug.testutils import array_equal_lists, keypoints_equal, reseed
from imgaug.augmentables.heatmaps import HeatmapsOnImage
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
class Test__handle_position_parameter(unittest.TestCase):
def setUp(self):
reseed()
def test_string_uniform(self):
observed = iaa_size._handle_position_parameter("uniform")
assert isinstance(observed, tuple)
assert len(observed) == 2
for i in range(2):
assert isinstance(observed[i], iap.Uniform)
assert isinstance(observed[i].a, iap.Deterministic)
assert isinstance(observed[i].b, iap.Deterministic)
assert 0.0 - 1e-4 < observed[i].a.value < 0.0 + 1e-4
assert 1.0 - 1e-4 < observed[i].b.value < 1.0 + 1e-4
def test_string_center(self):
observed = iaa_size._handle_position_parameter("center")
assert isinstance(observed, tuple)
assert len(observed) == 2
for i in range(2):
assert isinstance(observed[i], iap.Deterministic)
assert 0.5 - 1e-4 < observed[i].value < 0.5 + 1e-4
def test_string_normal(self):
observed = iaa_size._handle_position_parameter("normal")
assert isinstance(observed, tuple)
assert len(observed) == 2
for i in range(2):
assert isinstance(observed[i], iap.Clip)
assert isinstance(observed[i].other_param, iap.Normal)
assert isinstance(observed[i].other_param.loc, iap.Deterministic)
assert isinstance(observed[i].other_param.scale, iap.Deterministic)
assert 0.5 - 1e-4 < observed[i].other_param.loc.value < 0.5 + 1e-4
assert 0.35/2 - 1e-4 < observed[i].other_param.scale.value < 0.35/2 + 1e-4
def test_xy_axis_combined_strings(self):
pos_x = [
("left", 0.0),
("center", 0.5),
("right", 1.0)
]
pos_y = [
("top", 0.0),
("center", 0.5),
("bottom", 1.0)
]
for x_str, x_val in pos_x:
for y_str, y_val in pos_y:
position = "%s-%s" % (x_str, y_str)
with self.subTest(position=position):
observed = iaa_size._handle_position_parameter(position)
assert isinstance(observed[0], iap.Deterministic)
assert x_val - 1e-4 < observed[0].value < x_val + 1e-4
assert isinstance(observed[1], iap.Deterministic)
assert y_val - 1e-4 < observed[1].value < y_val + 1e-4
def test_stochastic_parameter(self):
observed = iaa_size._handle_position_parameter(iap.Poisson(2))
assert isinstance(observed, iap.Poisson)
def test_tuple_of_floats(self):
observed = iaa_size._handle_position_parameter((0.4, 0.6))
assert isinstance(observed, tuple)
assert len(observed) == 2
assert isinstance(observed[0], iap.Deterministic)
assert 0.4 - 1e-4 < observed[0].value < 0.4 + 1e-4
assert isinstance(observed[1], iap.Deterministic)
assert 0.6 - 1e-4 < observed[1].value < 0.6 + 1e-4
def test_tuple_of_floats_outside_value_range_leads_to_failure(self):
got_exception = False
try:
_ = iaa_size._handle_position_parameter((1.2, 0.6))
except Exception as e:
assert "must be within the value range" in str(e)
got_exception = True
assert got_exception
def test_tuple_of_stochastic_parameters(self):
observed = iaa_size._handle_position_parameter(
(iap.Poisson(2), iap.Poisson(3)))
assert isinstance(observed[0], iap.Poisson)
assert isinstance(observed[0].lam, iap.Deterministic)
assert 2 - 1e-4 < observed[0].lam.value < 2 + 1e-4
assert isinstance(observed[1], iap.Poisson)
assert isinstance(observed[1].lam, iap.Deterministic)
assert 3 - 1e-4 < observed[1].lam.value < 3 + 1e-4
def test_tuple_of_float_and_stochastic_paramter(self):
observed = iaa_size._handle_position_parameter((0.4, iap.Poisson(3)))
assert isinstance(observed, tuple)
assert len(observed) == 2
assert isinstance(observed[0], iap.Deterministic)
assert 0.4 - 1e-4 < observed[0].value < 0.4 + 1e-4
assert isinstance(observed[1], iap.Poisson)
assert isinstance(observed[1].lam, iap.Deterministic)
assert 3 - 1e-4 < observed[1].lam.value < 3 + 1e-4
def test_bad_datatype_leads_to_failure(self):
got_exception = False
try:
_ = iaa_size._handle_position_parameter(False)
except Exception as e:
assert "Expected one of the following as position parameter" in str(e)
got_exception = True
assert got_exception
class TestResize(unittest.TestCase):
def setUp(self):
reseed()
@property
def image2d(self):
# 4x8
base_img2d = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 255, 255, 255, 255, 255, 255, 0],
[0, 255, 255, 255, 255, 255, 255, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
base_img2d = np.array(base_img2d, dtype=np.uint8)
return base_img2d
@property
def image3d(self):
base_img3d = np.tile(self.image2d[..., np.newaxis], (1, 1, 3))
return base_img3d
@property
def kpsoi2d(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=4, y=1)]
return ia.KeypointsOnImage(kps, shape=self.image2d.shape)
@property
def kpsoi3d(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=4, y=1)]
return ia.KeypointsOnImage(kps, shape=self.image3d.shape)
@property
def psoi2d(self):
polygons = [
ia.Polygon([(0, 0), (8, 0), (8, 4)]),
ia.Polygon([(1, 1), (7, 1), (7, 3), (1, 3)]),
]
return ia.PolygonsOnImage(polygons, shape=self.image2d.shape)
@property
def psoi3d(self):
polygons = [
ia.Polygon([(0, 0), (8, 0), (8, 4)]),
ia.Polygon([(1, 1), (7, 1), (7, 3), (1, 3)]),
]
return ia.PolygonsOnImage(polygons, shape=self.image3d.shape)
@classmethod
def _aspect_ratio(cls, image):
return image.shape[1] / image.shape[0]
def test_resize_to_fixed_int(self):
aug = iaa.Resize(12)
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == (12, 12)
assert observed3d.shape == (12, 12, 3)
assert 50 < np.average(observed2d) < 205
assert 50 < np.average(observed3d) < 205
def test_resize_to_fixed_float(self):
aug = iaa.Resize(0.5)
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == (2, 4)
assert observed3d.shape == (2, 4, 3)
assert 50 < np.average(observed2d) < 205
assert 50 < np.average(observed3d) < 205
def test_heatmaps_with_width_int_and_height_int(self):
aug = iaa.Resize({"height": 8, "width": 12})
heatmaps_arr = (self.image2d / 255.0).astype(np.float32)
heatmaps_aug = aug.augment_heatmaps([
HeatmapsOnImage(heatmaps_arr, shape=self.image3d.shape)])[0]
assert heatmaps_aug.shape == (8, 12, 3)
assert 0 - 1e-6 < heatmaps_aug.min_value < 0 + 1e-6
assert 1 - 1e-6 < heatmaps_aug.max_value < 1 + 1e-6
assert np.average(heatmaps_aug.get_arr()[0, :]) < 0.05
assert np.average(heatmaps_aug.get_arr()[-1, :]) < 0.05
assert np.average(heatmaps_aug.get_arr()[:, 0]) < 0.05
assert 0.8 < np.average(heatmaps_aug.get_arr()[2:6, 2:10]) < 1 + 1e-6
def test_segmaps_with_width_int_and_height_int(self):
for nb_channels in [None, 1, 10]:
aug = iaa.Resize({"height": 8, "width": 12})
segmaps_arr = (self.image2d > 0).astype(np.int32)
if nb_channels is not None:
segmaps_arr = np.tile(
segmaps_arr[..., np.newaxis], (1, 1, nb_channels))
segmaps_aug = aug.augment_segmentation_maps([
SegmentationMapsOnImage(segmaps_arr, shape=self.image3d.shape)])[0]
assert segmaps_aug.shape == (8, 12, 3)
assert segmaps_aug.arr.shape == (8, 12, nb_channels if nb_channels is not None else 1)
assert np.all(segmaps_aug.arr[0, 1:-1, :] == 0)
assert np.all(segmaps_aug.arr[-1, 1:-1, :] == 0)
assert np.all(segmaps_aug.arr[1:-1, 0, :] == 0)
assert np.all(segmaps_aug.arr[1:-1, -1, :] == 0)
assert np.all(segmaps_aug.arr[2:-2, 2:-2, :] == 1)
def test_heatmaps_with_diff_size_than_img_and_width_float_height_int(self):
aug = iaa.Resize({"width": 2.0, "height": 16})
heatmaps_arr = (self.image2d / 255.0).astype(np.float32)
heatmaps = HeatmapsOnImage(
heatmaps_arr,
shape=(2*self.image3d.shape[0], 2*self.image3d.shape[1], 3))
heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]
assert heatmaps_aug.shape == (16, int(self.image3d.shape[1]*2*2), 3)
assert heatmaps_aug.arr_0to1.shape == (8, 16, 1)
assert 0 - 1e-6 < heatmaps_aug.min_value < 0 + 1e-6
assert 1 - 1e-6 < heatmaps_aug.max_value < 1 + 1e-6
assert np.average(heatmaps_aug.get_arr()[0, :]) < 0.05
assert np.average(heatmaps_aug.get_arr()[-1:, :]) < 0.05
assert np.average(heatmaps_aug.get_arr()[:, 0]) < 0.05
assert 0.8 < np.average(heatmaps_aug.get_arr()[2:6, 2:10]) < 1 + 1e-6
def test_segmaps_with_diff_size_than_img_and_width_float_height_int(self):
aug = iaa.Resize({"width": 2.0, "height": 16})
segmaps_arr = (self.image2d > 0).astype(np.int32)
segmaps = SegmentationMapsOnImage(
segmaps_arr,
shape=(2*self.image3d.shape[0], 2*self.image3d.shape[1], 3))
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert segmaps_aug.shape == (16, int(self.image3d.shape[1]*2*2), 3)
assert segmaps_aug.arr.shape == (8, 16, 1)
assert np.all(segmaps_aug.arr[0, 1:-1, :] == 0)
assert np.all(segmaps_aug.arr[-1, 1:-1, :] == 0)
assert np.all(segmaps_aug.arr[1:-1, 0, :] == 0)
assert np.all(segmaps_aug.arr[1:-1, -1, :] == 0)
assert np.all(segmaps_aug.arr[2:-2, 2:-2, :] == 1)
def test_keypoints_on_3d_img_and_with_width_int_and_height_int(self):
aug = iaa.Resize({"height": 8, "width": 12})
kpsoi_aug = aug.augment_keypoints([self.kpsoi3d])[0]
assert len(kpsoi_aug.keypoints) == 2
assert kpsoi_aug.shape == (8, 12, 3)
assert np.allclose(kpsoi_aug.keypoints[0].x, 1.5)
assert np.allclose(kpsoi_aug.keypoints[0].y, 4)
assert np.allclose(kpsoi_aug.keypoints[1].x, 6)
assert np.allclose(kpsoi_aug.keypoints[1].y, 2)
def test_polygons_on_3d_img_and_with_width_int_and_height_int(self):
aug = iaa.Resize({"width": 12, "height": 8})
psoi_aug = aug.augment_polygons(self.psoi3d)
assert len(psoi_aug.polygons) == 2
assert psoi_aug.shape == (8, 12, 3)
assert psoi_aug.polygons[0].exterior_almost_equals(
ia.Polygon([(0, 0), (12, 0), (12, 8)])
)
assert psoi_aug.polygons[1].exterior_almost_equals(
ia.Polygon([(1.5, 2), (10.5, 2), (10.5, 6), (1.5, 6)])
)
def test_keypoints_on_2d_img_and_with_width_float_and_height_int(self):
aug = iaa.Resize({"width": 3.0, "height": 8})
kpsoi_aug = aug.augment_keypoints([self.kpsoi2d])[0]
assert len(kpsoi_aug.keypoints) == 2
assert kpsoi_aug.shape == (8, 24)
assert np.allclose(kpsoi_aug.keypoints[0].x, 3)
assert np.allclose(kpsoi_aug.keypoints[0].y, 4)
assert np.allclose(kpsoi_aug.keypoints[1].x, 12)
assert np.allclose(kpsoi_aug.keypoints[1].y, 2)
def test_polygons_on_2d_img_and_with_width_float_and_height_int(self):
aug = iaa.Resize({"width": 3.0, "height": 8})
psoi_aug = aug.augment_polygons(self.psoi2d)
assert len(psoi_aug.polygons) == 2
assert psoi_aug.shape == (8, 24)
assert psoi_aug.polygons[0].exterior_almost_equals(
ia.Polygon([(3*0, 0), (3*8, 0), (3*8, 8)])
)
assert psoi_aug.polygons[1].exterior_almost_equals(
ia.Polygon([(3*1, 2), (3*7, 2), (3*7, 6), (3*1, 6)])
)
def test_empty_keypoints(self):
aug = iaa.Resize({"height": 8, "width": 12})
kpsoi = ia.KeypointsOnImage([], shape=(4, 8, 3))
kpsoi_aug = aug.augment_keypoints(kpsoi)
assert len(kpsoi_aug.keypoints) == 0
assert kpsoi_aug.shape == (8, 12, 3)
def test_empty_polygons(self):
aug = iaa.Resize({"height": 8, "width": 12})
psoi = ia.PolygonsOnImage([], shape=(4, 8, 3))
psoi_aug = aug.augment_polygons(psoi)
assert len(psoi_aug.polygons) == 0
assert psoi_aug.shape == (8, 12, 3)
def test_size_is_list_of_ints(self):
aug = iaa.Resize([12, 14])
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in [(12, 12), (14, 14)]
assert observed3d.shape in [(12, 12, 3), (14, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_size_is_tuple_of_ints(self):
aug = iaa.Resize((12, 14))
seen2d = [False, False, False]
seen3d = [False, False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in [(12, 12), (13, 13), (14, 14)]
assert observed3d.shape in [(12, 12, 3), (13, 13, 3), (14, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
elif observed2d.shape == (13, 13):
seen2d[1] = True
else:
seen2d[2] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
elif observed3d.shape == (13, 13, 3):
seen3d[1] = True
else:
seen3d[2] = True
if all(seen2d) and all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_size_is_string_keep(self):
aug = iaa.Resize("keep")
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == self.image2d.shape
assert observed3d.shape == self.image3d.shape
# TODO shouldn't this be more an error?
def test_size_is_empty_list(self):
aug = iaa.Resize([])
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == self.image2d.shape
assert observed3d.shape == self.image3d.shape
def test_size_is_empty_dict(self):
aug = iaa.Resize({})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == self.image2d.shape
assert observed3d.shape == self.image3d.shape
def test_change_height_to_fixed_int(self):
aug = iaa.Resize({"height": 11})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == (11, self.image2d.shape[1])
assert observed3d.shape == (11, self.image3d.shape[1], 3)
def test_change_width_to_fixed_int(self):
aug = iaa.Resize({"width": 13})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == (self.image2d.shape[0], 13)
assert observed3d.shape == (self.image3d.shape[0], 13, 3)
def test_change_height_and_width_to_fixed_ints(self):
aug = iaa.Resize({"height": 12, "width": 13})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == (12, 13)
assert observed3d.shape == (12, 13, 3)
def test_change_height_to_fixed_int_but_dont_change_width(self):
aug = iaa.Resize({"height": 12, "width": "keep"})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == (12, self.image2d.shape[1])
assert observed3d.shape == (12, self.image3d.shape[1], 3)
def test_dont_change_height_but_width_to_fixed_int(self):
aug = iaa.Resize({"height": "keep", "width": 12})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape == (self.image2d.shape[0], 12)
assert observed3d.shape == (self.image3d.shape[0], 12, 3)
def test_change_height_to_fixed_int_width_keeps_aspect_ratio(self):
aug = iaa.Resize({"height": 12, "width": "keep-aspect-ratio"})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
aspect_ratio2d = self._aspect_ratio(self.image2d)
aspect_ratio3d = self._aspect_ratio(self.image3d)
assert observed2d.shape == (12, int(12 * aspect_ratio2d))
assert observed3d.shape == (12, int(12 * aspect_ratio3d), 3)
def test_height_keeps_aspect_ratio_width_changed_to_fixed_int(self):
aug = iaa.Resize({"height": "keep-aspect-ratio", "width": 12})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
aspect_ratio2d = self._aspect_ratio(self.image2d)
aspect_ratio3d = self._aspect_ratio(self.image3d)
assert observed2d.shape == (int(12 * (1/aspect_ratio2d)), 12)
assert observed3d.shape == (int(12 * (1/aspect_ratio3d)), 12, 3)
# TODO add test for shorter side being tuple, list, stochastic parameter
def test_change_shorter_side_by_fixed_int_longer_keeps_aspect_ratio(self):
aug = iaa.Resize({"shorter-side": 6,
"longer-side": "keep-aspect-ratio"})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
aspect_ratio2d = self._aspect_ratio(self.image2d)
aspect_ratio3d = self._aspect_ratio(self.image3d)
assert observed2d.shape == (6, int(6 * aspect_ratio2d))
assert observed3d.shape == (6, int(6 * aspect_ratio3d), 3)
# TODO add test for longer side being tuple, list, stochastic parameter
def test_change_longer_side_by_fixed_int_shorter_keeps_aspect_ratio(self):
aug = iaa.Resize({"longer-side": 6,
"shorter-side": "keep-aspect-ratio"})
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
aspect_ratio2d = self._aspect_ratio(self.image2d)
aspect_ratio3d = self._aspect_ratio(self.image3d)
assert observed2d.shape == (int(6 * (1/aspect_ratio2d)), 6)
assert observed3d.shape == (int(6 * (1/aspect_ratio3d)), 6, 3)
def test_change_height_by_list_of_ints_width_by_fixed_int(self):
aug = iaa.Resize({"height": [12, 14], "width": 12})
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in [(12, 12), (14, 12)]
assert observed3d.shape in [(12, 12, 3), (14, 12, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if np.all(seen2d) and np.all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_change_height_by_fixed_int_width_by_list_of_ints(self):
aug = iaa.Resize({"height": 12, "width": [12, 14]})
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in [(12, 12), (12, 14)]
assert observed3d.shape in [(12, 12, 3), (12, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if np.all(seen2d) and np.all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_change_height_by_fixed_int_width_by_stochastic_parameter(self):
aug = iaa.Resize({"height": 12, "width": iap.Choice([12, 14])})
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in [(12, 12), (12, 14)]
assert observed3d.shape in [(12, 12, 3), (12, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if np.all(seen2d) and np.all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_change_height_by_tuple_of_ints_width_by_fixed_int(self):
aug = iaa.Resize({"height": (12, 14), "width": 12})
seen2d = [False, False, False]
seen3d = [False, False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in [(12, 12), (13, 12), (14, 12)]
assert observed3d.shape in [(12, 12, 3), (13, 12, 3), (14, 12, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
elif observed2d.shape == (13, 12):
seen2d[1] = True
else:
seen2d[2] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
elif observed3d.shape == (13, 12, 3):
seen3d[1] = True
else:
seen3d[2] = True
if np.all(seen2d) and np.all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_size_is_float(self):
aug = iaa.Resize(2.0)
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
intensity_avg = np.average(self.image2d)
intensity_low = intensity_avg - 0.2 * np.abs(intensity_avg - 128)
intensity_high = intensity_avg + 0.2 * np.abs(intensity_avg - 128)
assert observed2d.shape == (self.image2d.shape[0]*2,
self.image2d.shape[1]*2)
assert observed3d.shape == (self.image3d.shape[0]*2,
self.image3d.shape[1]*2,
3)
assert intensity_low < np.average(observed2d) < intensity_high
assert intensity_low < np.average(observed3d) < intensity_high
def test_size_is_list(self):
aug = iaa.Resize([2.0, 4.0])
seen2d = [False, False]
seen3d = [False, False]
expected_shapes_2d = [
(self.image2d.shape[0]*2, self.image2d.shape[1]*2),
(self.image2d.shape[0]*4, self.image2d.shape[1]*4)]
expected_shapes_3d = [
(self.image3d.shape[0]*2, self.image3d.shape[1]*2, 3),
(self.image3d.shape[0]*4, self.image3d.shape[1]*4, 3)]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in expected_shapes_2d
assert observed3d.shape in expected_shapes_3d
if observed2d.shape == expected_shapes_2d[0]:
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == expected_shapes_3d[0]:
seen3d[0] = True
else:
seen3d[1] = True
if np.all(seen2d) and np.all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_size_is_stochastic_parameter(self):
aug = iaa.Resize(iap.Choice([2.0, 4.0]))
seen2d = [False, False]
seen3d = [False, False]
expected_shapes_2d = [
(self.image2d.shape[0]*2, self.image2d.shape[1]*2),
(self.image2d.shape[0]*4, self.image2d.shape[1]*4)]
expected_shapes_3d = [
(self.image3d.shape[0]*2, self.image3d.shape[1]*2, 3),
(self.image3d.shape[0]*4, self.image3d.shape[1]*4, 3)]
for _ in sm.xrange(100):
observed2d = aug.augment_image(self.image2d)
observed3d = aug.augment_image(self.image3d)
assert observed2d.shape in expected_shapes_2d
assert observed3d.shape in expected_shapes_3d
if observed2d.shape == expected_shapes_2d[0]:
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == expected_shapes_3d[0]:
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert np.all(seen2d)
assert np.all(seen3d)
def test_decrease_size_by_tuple_of_floats__one_for_both_sides(self):
image2d = self.image2d[0:4, 0:4]
image3d = self.image3d[0:4, 0:4, :]
aug = iaa.Resize((0.76, 1.0))
not_seen2d = set()
not_seen3d = set()
for size in sm.xrange(3, 4+1):
not_seen2d.add((size, size))
for size in sm.xrange(3, 4+1):
not_seen3d.add((size, size, 3))
possible2d = set(list(not_seen2d))
possible3d = set(list(not_seen3d))
for _ in sm.xrange(100):
observed2d = aug.augment_image(image2d)
observed3d = aug.augment_image(image3d)
assert observed2d.shape in possible2d
assert observed3d.shape in possible3d
if observed2d.shape in not_seen2d:
not_seen2d.remove(observed2d.shape)
if observed3d.shape in not_seen3d:
not_seen3d.remove(observed3d.shape)
if not not_seen2d and not not_seen3d:
break
assert not not_seen2d
assert not not_seen3d
def test_decrease_size_by_tuples_of_floats__one_per_side(self):
image2d = self.image2d[0:4, 0:4]
image3d = self.image3d[0:4, 0:4, :]
aug = iaa.Resize({"height": (0.76, 1.0), "width": (0.76, 1.0)})
not_seen2d = set()
not_seen3d = set()
for hsize in sm.xrange(3, 4+1):
for wsize in sm.xrange(3, 4+1):
not_seen2d.add((hsize, wsize))
for hsize in sm.xrange(3, 4+1):
for wsize in sm.xrange(3, 4+1):
not_seen3d.add((hsize, wsize, 3))
possible2d = set(list(not_seen2d))
possible3d = set(list(not_seen3d))
for _ in sm.xrange(100):
observed2d = aug.augment_image(image2d)
observed3d = aug.augment_image(image3d)
assert observed2d.shape in possible2d
assert observed3d.shape in possible3d
if observed2d.shape in not_seen2d:
not_seen2d.remove(observed2d.shape)
if observed3d.shape in not_seen3d:
not_seen3d.remove(observed3d.shape)
if not not_seen2d and not not_seen3d:
break
assert not not_seen2d
assert not not_seen3d
def test_bad_datatype_for_size_leads_to_failure(self):
got_exception = False
try:
aug = iaa.Resize("foo")
_ = aug.augment_image(self.image2d)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_get_parameters(self):
aug = iaa.Resize(size=1, interpolation="nearest")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == "nearest"
class TestPad(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
base_img = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def heatmap(self):
heatmaps_arr = np.float32([[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=self.image.shape)
@property
def segmap(self):
segmaps_arr = np.int32([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
return ia.SegmentationMapsOnImage(segmaps_arr, shape=self.image.shape)
def test___init___pad_mode_is_all(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode=ia.ALL,
pad_cval=0,
keep_size=False)
expected = ["constant", "edge", "linear_ramp", "maximum", "mean",
"median", "minimum", "reflect", "symmetric", "wrap"]
assert isinstance(aug.pad_mode, iap.Choice)
assert len(aug.pad_mode.a) == len(expected)
assert np.all([v in aug.pad_mode.a for v in expected])
def test___init___pad_mode_is_list(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode=["constant", "maximum"],
pad_cval=0,
keep_size=False)
expected = ["constant", "maximum"]
assert isinstance(aug.pad_mode, iap.Choice)
assert len(aug.pad_mode.a) == len(expected)
assert np.all([v in aug.pad_mode.a for v in expected])
def test___init___pad_cval_is_list(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval=[50, 100],
keep_size=False)
expected = [50, 100]
assert isinstance(aug.pad_cval, iap.Choice)
assert len(aug.pad_cval.a) == len(expected)
assert np.all([v in aug.pad_cval.a for v in expected])
def test_pad_images_by_1px_each_side_on_its_own(self):
# test pad by 1 pixel on each side
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
base_img_padded = np.pad(
self.image,
((top, bottom), (left, right), (0, 0)),
mode="constant",
constant_values=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, np.array([base_img_padded]))
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [base_img_padded])
def test_pad_keypoints_by_1px_each_side_on_its_own(self):
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
image_padded_shape = list(self.image.shape)
image_padded_shape[0] += top + bottom
image_padded_shape[1] += left + right
kpsoi = self.kpsoi
expected = [kpsoi[0].shift(x=left, y=top)]
observed = aug.augment_keypoints(kpsoi)
assert observed[0].shape == tuple(image_padded_shape)
assert keypoints_equal(observed, expected)
def test_pad_heatmaps_by_1px_each_side_on_its_own(self):
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
heatmaps_arr = self.heatmap.get_arr()
heatmaps_arr_padded = np.pad(
heatmaps_arr,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
heatmaps = [ia.HeatmapsOnImage(
heatmaps_arr, shape=self.image.shape)]
image_padded_shape = list(self.image.shape)
image_padded_shape[0] += top + bottom
image_padded_shape[1] += left + right
observed = aug.augment_heatmaps(heatmaps)[0]
assert observed.shape == tuple(image_padded_shape)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.array_equal(observed.get_arr(), heatmaps_arr_padded)
def test_pad_segmaps_by_1px_each_side_on_its_own(self):
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
segmaps_arr = self.segmap.get_arr()
segmaps_arr_padded = np.pad(
segmaps_arr,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
segmaps = [SegmentationMapsOnImage(
segmaps_arr, shape=self.image.shape)]
image_padded_shape = list(self.image.shape)
image_padded_shape[0] += top + bottom
image_padded_shape[1] += left + right
observed = aug.augment_segmentation_maps(segmaps)[0]
assert observed.shape == tuple(image_padded_shape)
assert np.array_equal(observed.get_arr(), segmaps_arr_padded)
def test_pad_each_side_on_its_own_by_tuple_of_ints(self):
def _to_range_tuple(val):
return val if isinstance(val, tuple) else (val, val)
pads = [
((0, 2), 0, 0, 0),
(0, (0, 2), 0, 0),
(0, 0, (0, 2), 0),
(0, 0, 0, (0, 2)),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
aug_det = aug.to_deterministic()
top, right, bottom, left = pad
images_padded = []
keypoints_padded = []
top_range = _to_range_tuple(top)
right_range = _to_range_tuple(right)
bottom_range = _to_range_tuple(bottom)
left_range = _to_range_tuple(left)
top_values = sm.xrange(top_range[0], top_range[1]+1)
right_values = sm.xrange(right_range[0], right_range[1]+1)
bottom_values = sm.xrange(bottom_range[0], bottom_range[1]+1)
left_values = sm.xrange(left_range[0], left_range[1]+1)
for top_val in top_values:
for right_val in right_values:
for bottom_val in bottom_values:
for left_val in left_values:
images_padded.append(
np.pad(
self.image,
((top_val, bottom_val),
(left_val, right_val),
(0, 0)),
mode="constant",
constant_values=0
)
)
keypoints_padded.append(
self.kpsoi[0].shift(x=left_val, y=top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded
]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded
]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images([self.image])
assert any([
array_equal_lists(observed, [base_img_padded])
for base_img_padded
in images_padded])
observed = aug.augment_keypoints(self.kpsoi)
assert any([
keypoints_equal(observed, [kp])
for kp
in keypoints_padded])
assert len(set(movements)) == 3
assert len(set(movements_det)) == 1
def test_pad_each_side_on_its_own_by_list_of_ints(self):
# test pad by list of exact pixel values
pads = [
([0, 2], 0, 0, 0),
(0, [0, 2], 0, 0),
(0, 0, [0, 2], 0),
(0, 0, 0, [0, 2]),
]
for pad in pads:
top, right, bottom, left = pad
aug = iaa.Pad(px=pad, keep_size=False)
aug_det = aug.to_deterministic()
images_padded = []
keypoints_padded = []
top_range = top if isinstance(top, list) else [top]
right_range = right if isinstance(right, list) else [right]
bottom_range = bottom if isinstance(bottom, list) else [bottom]
left_range = left if isinstance(left, list) else [left]
for top_val in top_range:
for right_val in right_range:
for bottom_val in bottom_range:
for left_val in left_range:
images_padded.append(
np.pad(
self.image,
((top_val, bottom_val),
(left_val, right_val),
(0, 0)),
mode="constant",
constant_values=0
)
)
keypoints_padded.append(
self.kpsoi[0].shift(x=left_val, y=top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images([self.image])
assert any([
array_equal_lists(observed, [base_img_padded])
for base_img_padded
in images_padded])
observed = aug.augment_keypoints(self.kpsoi)
assert any([
keypoints_equal(observed, [kp])
for kp
in keypoints_padded])
assert len(set(movements)) == 2
assert len(set(movements_det)) == 1
def test_pad_heatmaps_smaller_than_img_by_tuple_of_ints_without_ks(self):
# pad smaller heatmaps
# heatmap is (6, 4), image is (6, 16)
# image is padded by (2, 4, 2, 4)
# expected image size: (10, 24)
# expected heatmap size: (10, 6)
aug = iaa.Pad(px=(2, 4, 2, 4), keep_size=False)
heatmaps_arr_small = np.float32([
[0, 0, 0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 0, 0, 0]
])
top, bottom, left, right = 2, 2, 1, 1
heatmaps_arr_small_padded = np.pad(
heatmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
heatmaps = [ia.HeatmapsOnImage(heatmaps_arr_small, shape=(6, 16))]
observed = aug.augment_heatmaps(heatmaps)[0]
assert observed.shape == (10, 24)
assert observed.arr_0to1.shape == (10, 6, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.arr_0to1[..., 0], heatmaps_arr_small_padded)
def test_pad_segmaps_smaller_than_img_by_tuple_of_ints_without_ks(self):
# pad smaller segmaps
# same sizes and paddings as above
aug = iaa.Pad(px=(2, 4, 2, 4), keep_size=False)
segmaps_arr_small = np.int32([
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]
])
segmaps = [SegmentationMapsOnImage(segmaps_arr_small, shape=(6, 16))]
top, bottom, left, right = 2, 2, 1, 1
segmaps_arr_small_padded = np.pad(
segmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
observed = aug.augment_segmentation_maps(segmaps)[0]
assert observed.shape == (10, 24)
assert observed.arr.shape == (10, 6, 1)
assert np.array_equal(observed.arr[..., 0], segmaps_arr_small_padded)
def test_pad_heatmaps_smaller_than_img_by_tuple_of_ints_with_ks(self):
# pad smaller heatmaps, with keep_size=True
# heatmap is (6, 4), image is (6, 16)
# image is padded by (2, 4, 2, 4)
# expected image size: (10, 24) -> (6, 16) after resize
# expected heatmap size: (10, 6) -> (6, 4) after resize
aug = iaa.Pad(px=(2, 4, 2, 4), keep_size=True)
heatmaps_arr_small = np.float32([
[0, 0, 0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 0, 0, 0]
])
top, bottom, left, right = 2, 2, 1, 1
heatmaps_arr_small_padded = np.pad(
heatmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
heatmaps = [ia.HeatmapsOnImage(heatmaps_arr_small, shape=(6, 16))]
observed = aug.augment_heatmaps(heatmaps)[0]
assert observed.shape == (6, 16)
assert observed.arr_0to1.shape == (6, 4, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(
observed.arr_0to1[..., 0],
np.clip(
ia.imresize_single_image(
heatmaps_arr_small_padded,
(6, 4),
interpolation="cubic"),
0, 1.0
)
)
def test_pad_segmaps_smaller_than_img_by_tuple_of_ints_with_keep_size(self):
# pad smaller segmaps, with keep_size=True
# same sizes and paddings as above
aug = iaa.Pad(px=(2, 4, 2, 4), keep_size=True)
segmaps_arr_small = np.int32([
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]
])
top, bottom, left, right = 2, 2, 1, 1
segmaps_arr_small_padded = np.pad(
segmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
segmaps = [SegmentationMapsOnImage(segmaps_arr_small, shape=(6, 16))]
observed = aug.augment_segmentation_maps(segmaps)[0]
assert observed.shape == (6, 16)
assert observed.arr.shape == (6, 4, 1)
assert np.array_equal(
observed.arr[..., 0],
ia.imresize_single_image(
segmaps_arr_small_padded,
(6, 4),
interpolation="nearest"
),
)
def test_pad_keypoints_by_tuple_of_fixed_ints_without_keep_size(self):
aug = iaa.Pad((2, 0, 4, 4), keep_size=False)
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=0)]
kpsoi = ia.KeypointsOnImage(kps, shape=(4, 4, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (10, 8, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, 4+1)
assert np.allclose(kpsoi_aug.keypoints[0].y, 2+2)
assert np.allclose(kpsoi_aug.keypoints[1].x, 4+3)
assert np.allclose(kpsoi_aug.keypoints[1].y, 2+0)
def test_pad_keypoints_by_tuple_of_fixed_ints_with_keep_size(self):
aug = iaa.Pad((2, 0, 4, 4), keep_size=True)
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=0)]
kpsoi = ia.KeypointsOnImage(kps, shape=(4, 4, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (4, 4, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, ((4+1)/8)*4)
assert np.allclose(kpsoi_aug.keypoints[0].y, ((2+2)/10)*4)
assert np.allclose(kpsoi_aug.keypoints[1].x, ((4+3)/8)*4)
assert np.allclose(kpsoi_aug.keypoints[1].y, ((2+0)/10)*4)
def test_pad_polygons_by_tuple_of_fixed_ints_without_keep_size(self):
aug = iaa.Pad((2, 0, 4, 4), keep_size=False)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
psoi = ia.PolygonsOnImage(polygons, shape=(4, 4, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (10, 8, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(4, 2), (8, 2), (8, 6), (4, 6)])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(5, 3), (9, 3), (9, 7), (5, 7)])
)
def test_pad_polygons_by_tuple_of_fixed_ints_with_keep_size(self):
aug = iaa.Pad((2, 0, 4, 4), keep_size=True)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
psoi = ia.PolygonsOnImage(polygons, shape=(4, 4, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (4, 4, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(4*(4/8), 4*(2/10)),
(4*(8/8), 4*(2/10)),
(4*(8/8), 4*(6/10)),
(4*(4/8), 4*(6/10))])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(4*(5/8), 4*(3/10)),
(4*(9/8), 4*(3/10)),
(4*(9/8), 4*(7/10)),
(4*(5/8), 4*(7/10))])
)
def test_pad_mode_is_stochastic_parameter(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode=iap.Choice(["constant", "maximum", "edge"]),
pad_cval=0,
keep_size=False)
image = np.zeros((1, 2), dtype=np.uint8)
image[0, 0] = 100
image[0, 1] = 50
seen = [0, 0, 0]
for _ in sm.xrange(300):
observed = aug.augment_image(image)
if observed[0, 2] == 0:
seen[0] += 1
elif observed[0, 2] == 100:
seen[1] += 1
elif observed[0, 2] == 50:
seen[2] += 1
else:
assert False
assert np.all([100 - 50 < v < 100 + 50 for v in seen])
def test_bad_datatype_for_pad_mode_causes_failure(self):
got_exception = False
try:
_aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode=False,
pad_cval=0,
keep_size=False)
except Exception as exc:
assert "Expected pad_mode to be " in str(exc)
got_exception = True
assert got_exception
def test_pad_heatmaps_with_pad_mode_set(self):
# pad modes, heatmaps (always uses constant padding)
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="edge",
pad_cval=0,
keep_size=False)
heatmaps_arr = np.ones((3, 3, 1), dtype=np.float32)
heatmaps = HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum(observed.get_arr() <= 1e-4) == 3
def test_pad_segmaps_with_pad_mode_set(self):
# pad modes, segmaps (always uses constant padding)
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="edge",
pad_cval=0,
keep_size=False)
segmaps_arr = np.ones((3, 3, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
assert np.sum(observed.get_arr() == 0) == 3
def test_pad_cval_is_int(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval=100,
keep_size=False)
image = np.zeros((1, 1), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed[0, 0] == 0
assert observed[0, 1] == 100
def test_pad_cval_is_stochastic_parameter(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval=iap.Choice([50, 100]),
keep_size=False)
image = np.zeros((1, 1), dtype=np.uint8)
seen = [0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(image)
if observed[0, 1] == 50:
seen[0] += 1
elif observed[0, 1] == 100:
seen[1] += 1
else:
assert False
assert np.all([100 - 50 < v < 100 + 50 for v in seen])
def test_pad_cval_is_tuple(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval=(50, 52),
keep_size=False)
image = np.zeros((1, 1), dtype=np.uint8)
seen = [0, 0, 0]
for _ in sm.xrange(300):
observed = aug.augment_image(image)
if observed[0, 1] == 50:
seen[0] += 1
elif observed[0, 1] == 51:
seen[1] += 1
elif observed[0, 1] == 52:
seen[2] += 1
else:
assert False
assert np.all([100 - 50 < v < 100 + 50 for v in seen])
def test_invalid_pad_cval_datatype_leads_to_failure(self):
got_exception = False
try:
_aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval="test",
keep_size=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_pad_heatmaps_with_cval_set(self):
# pad cvals, heatmaps (should always use cval 0)
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval=255,
keep_size=False)
heatmaps_arr = np.zeros((3, 3, 1), dtype=np.float32)
heatmaps = HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum(observed.get_arr() > 1e-4) == 0
def test_pad_segmaps_with_cval_set(self):
# pad cvals, segmaps (should always use cval 0)
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval=255,
keep_size=False)
segmaps_arr = np.zeros((3, 3, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
assert np.sum(observed.get_arr() > 0) == 0
def test_pad_all_sides_by_100_percent_without_keep_size(self):
aug = iaa.Pad(percent=1.0, keep_size=False)
image = np.zeros((4, 4), dtype=np.uint8) + 1
observed = aug.augment_image(image)
assert observed.shape == (4+4+4, 4+4+4)
assert np.sum(observed[4:-4, 4:-4]) == 4*4
assert np.sum(observed) == 4*4
def test_pad_all_sides_by_stochastic_param_without_keep_size(self):
aug = iaa.Pad(percent=iap.Deterministic(1.0), keep_size=False)
image = np.zeros((4, 4), dtype=np.uint8) + 1
observed = aug.augment_image(image)
assert observed.shape == (4+4+4, 4+4+4)
assert np.sum(observed[4:-4, 4:-4]) == 4*4
assert np.sum(observed) == 4*4
def test_pad_by_tuple_of_two_floats_dont_sample_independently_noks(self):
aug = iaa.Pad(percent=(1.0, 2.0),
sample_independently=False,
keep_size=False)
image = np.zeros((4, 4), dtype=np.uint8) + 1
observed = aug.augment_image(image)
assert np.sum(observed) == 4*4
assert (observed.shape[0] - 4) % 2 == 0
assert (observed.shape[1] - 4) % 2 == 0
def test_bad_datatype_for_percent_leads_to_failure_without_keep_size(self):
got_exception = False
try:
_ = iaa.Pad(percent="test", keep_size=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_pad_each_side_by_100_percent_without_keep_size(self):
image = np.zeros((4, 4), dtype=np.uint8)
image[0, 0] = 255
image[3, 0] = 255
image[0, 3] = 255
image[3, 3] = 255
height, width = image.shape[0:2]
pads = [
(1.0, 0, 0, 0),
(0, 1.0, 0, 0),
(0, 0, 1.0, 0),
(0, 0, 0, 1.0),
]
for pad in pads:
with self.subTest(pad=pad):
top, right, bottom, left = pad
top_px = int(top * height)
right_px = int(right * width)
bottom_px = int(bottom * height)
left_px = int(left * width)
aug = iaa.Pad(percent=pad, keep_size=False)
image_padded = np.pad(
image,
((top_px, bottom_px), (left_px, right_px)),
mode="constant",
constant_values=0)
observed = aug.augment_image(image)
assert np.array_equal(observed, image_padded)
def test_pad_keypoints_each_side_by_100_percent_without_keep_size(self):
image = np.zeros((4, 4), dtype=np.uint8)
image[0, 0] = 255
image[3, 0] = 255
image[0, 3] = 255
image[3, 3] = 255
height, width = image.shape[0:2]
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=3),
ia.Keypoint(x=3, y=3)]
keypoints = [ia.KeypointsOnImage(kps, shape=(4, 4))]
pads = [
(1.0, 0, 0, 0),
(0, 1.0, 0, 0),
(0, 0, 1.0, 0),
(0, 0, 0, 1.0),
]
for pad in pads:
with self.subTest(pad=pad):
top, right, bottom, left = pad
top_px = int(top * height)
left_px = int(left * width)
aug = iaa.Pad(percent=pad, keep_size=False)
keypoints_moved = [keypoints[0].shift(x=left_px, y=top_px)]
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_moved)
def test_pad_heatmaps_smaller_than_img_by_floats_without_keep_size(self):
# pad smaller heatmaps
# heatmap is (6, 4), image is (6, 16)
# image is padded by (0.5, 0.25, 0.5, 0.25)
# expected image size: (12, 24)
# expected heatmap size: (12, 6)
aug = iaa.Pad(percent=(0.5, 0.25, 0.5, 0.25), keep_size=False)
heatmaps_arr_small = np.float32([
[0, 0, 0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 0, 0, 0]
])
top, bottom, left, right = 3, 3, 1, 1
heatmaps_arr_small_padded = np.pad(
heatmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
heatmaps = [ia.HeatmapsOnImage(heatmaps_arr_small, shape=(6, 16))]
observed = aug.augment_heatmaps(heatmaps)[0]
assert observed.shape == (12, 24)
assert observed.arr_0to1.shape == (12, 6, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.arr_0to1[..., 0], heatmaps_arr_small_padded)
def test_pad_segmaps_smaller_than_img_by_floats_without_keep_size(self):
aug = iaa.Pad(percent=(0.5, 0.25, 0.5, 0.25), keep_size=False)
segmaps_arr_small = np.int32([
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]
])
top, bottom, left, right = 3, 3, 1, 1
segmaps_arr_small_padded = np.pad(
segmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
segmaps = [SegmentationMapsOnImage(segmaps_arr_small, shape=(6, 16))]
observed = aug.augment_segmentation_maps(segmaps)[0]
assert observed.shape == (12, 24)
assert observed.arr.shape == (12, 6, 1)
assert np.array_equal(observed.arr[..., 0], segmaps_arr_small_padded)
def test_pad_heatmaps_smaller_than_img_by_floats_with_keep_size(self):
# pad smaller heatmaps, with keep_size=True
# heatmap is (6, 4), image is (6, 16)
# image is padded by (0.5, 0.25, 0.5, 0.25)
# expected image size: (12, 24) -> (6, 16) after resize
# expected heatmap size: (12, 6) -> (6, 4) after resize
aug = iaa.Pad(percent=(0.5, 0.25, 0.5, 0.25), keep_size=True)
heatmaps_arr_small = np.float32([
[0, 0, 0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 1.0, 1.0, 0],
[0, 0, 0, 0]
])
top, bottom, left, right = 3, 3, 1, 1
heatmaps_arr_small_padded = np.pad(
heatmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
heatmaps = [ia.HeatmapsOnImage(heatmaps_arr_small, shape=(6, 16))]
observed = aug.augment_heatmaps(heatmaps)[0]
assert observed.shape == (6, 16)
assert observed.arr_0to1.shape == (6, 4, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(
observed.arr_0to1[..., 0],
np.clip(
ia.imresize_single_image(
heatmaps_arr_small_padded, (6, 4), interpolation="cubic"),
0, 1.0
)
)
def test_pad_segmaps_smaller_than_img_by_floats_with_keep_size(self):
aug = iaa.Pad(percent=(0.5, 0.25, 0.5, 0.25), keep_size=True)
segmaps_arr_small = np.int32([
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]
])
top, bottom, left, right = 3, 3, 1, 1
segmaps_arr_small_padded = np.pad(
segmaps_arr_small,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
segmaps = [SegmentationMapsOnImage(segmaps_arr_small, shape=(6, 16))]
observed = aug.augment_segmentation_maps(segmaps)[0]
assert observed.shape == (6, 16)
assert observed.arr.shape == (6, 4, 1)
assert np.array_equal(
observed.arr[..., 0],
ia.imresize_single_image(
segmaps_arr_small_padded, (6, 4), interpolation="nearest")
)
def test_pad_keypoints_by_floats_without_keep_size(self):
aug = iaa.Pad(percent=(0.5, 0, 1.0, 1.0), keep_size=False)
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=0)]
kpsoi = ia.KeypointsOnImage(kps, shape=(4, 4, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (10, 8, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, 4+1)
assert np.allclose(kpsoi_aug.keypoints[0].y, 2+2)
assert np.allclose(kpsoi_aug.keypoints[1].x, 4+3)
assert np.allclose(kpsoi_aug.keypoints[1].y, 2+0)
def test_pad_keypoints_by_floats_with_keep_size(self):
aug = iaa.Pad(percent=(0.5, 0, 1.0, 1.0), keep_size=True)
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=0)]
kpsoi = ia.KeypointsOnImage(kps, shape=(4, 4, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (4, 4, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, ((4+1)/8)*4)
assert np.allclose(kpsoi_aug.keypoints[0].y, ((2+2)/10)*4)
assert np.allclose(kpsoi_aug.keypoints[1].x, ((4+3)/8)*4)
assert np.allclose(kpsoi_aug.keypoints[1].y, ((2+0)/10)*4)
def test_pad_polygons_by_floats_without_keep_size(self):
aug = iaa.Pad(percent=(0.5, 0, 1.0, 1.0), keep_size=False)
psoi = ia.PolygonsOnImage([
ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])
], shape=(4, 4, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (10, 8, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(4, 2), (8, 2), (8, 6), (4, 6)])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(5, 3), (9, 3), (9, 7), (5, 7)])
)
def test_pad_polygons_by_floats_with_keep_size(self):
# polygons, with keep_size=True
aug = iaa.Pad(percent=(0.5, 0, 1.0, 1.0), keep_size=True)
psoi = ia.PolygonsOnImage([
ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])
], shape=(4, 4, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (4, 4, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(4*(4/8), 4*(2/10)),
(4*(8/8), 4*(2/10)),
(4*(8/8), 4*(6/10)),
(4*(4/8), 4*(6/10))])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(4*(5/8), 4*(3/10)),
(4*(9/8), 4*(3/10)),
(4*(9/8), 4*(7/10)),
(4*(5/8), 4*(7/10))])
)
def test_pad_by_tuple_of_floats_at_top_side_without_keep_size(self):
# test pad by range of percentages
aug = iaa.Pad(percent=((0, 1.0), 0, 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(
np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[0, :] == 0):
n_padded += 1
observed = observed[1:, :]
seen[n_padded] += 1
# note that we cant just check for 100-50 < x < 100+50 here. The
# first and last value (0px and 4px) padding have half the
# probability of occuring compared to the other values. E.g. 0px is
# padded if sampled p falls in range [0, 0.125). 1px is padded if
# sampled p falls in range [0.125, 0.375].
assert np.all([v > 30 for v in seen])
def test_pad_by_tuple_of_floats_at_right_side_without_keep_size(self):
aug = iaa.Pad(percent=(0, (0, 1.0), 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[:, -1] == 0):
n_padded += 1
observed = observed[:, 0:-1]
seen[n_padded] += 1
assert np.all([v > 30 for v in seen])
def test_pad_by_list_of_floats_at_top_side_without_keep_size(self):
aug = iaa.Pad(percent=([0.0, 1.0], 0, 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(
np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[0, :] == 0):
n_padded += 1
observed = observed[1:, :]
seen[n_padded] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
def test_pad_by_list_of_floats_at_right_side_without_keep_size(self):
aug = iaa.Pad(percent=(0, [0.0, 1.0], 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(
np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[:, -1] == 0):
n_padded += 1
observed = observed[:, 0:-1]
seen[n_padded] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
def test_pad_other_dtypes_bool_by_int_without_keep_size(self):
aug = iaa.Pad(px=(1, 0, 0, 0), keep_size=False)
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == (4, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == 1)
def test_pad_other_dtypes_uint_int_by_int_without_keep_size(self):
aug = iaa.Pad(px=(1, 0, 0, 0), keep_size=False)
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [
1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [
1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (4, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == value)
def test_pad_other_dtypes_float_by_int_without_keep_size(self):
aug = iaa.Pad(px=(1, 0, 0, 0), keep_size=False)
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert image_aug.shape == (4, 3)
assert np.all(_isclose(image_aug[~mask], 0))
assert np.all(_isclose(image_aug[mask],
np.float128(value)))
class TestCrop(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
base_img = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
return base_img
@property
def images(self):
return np.array([self.image])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=self.image.shape)]
return kpsoi
@property
def heatmaps(self):
heatmaps_arr = np.float32([[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]])
return [ia.HeatmapsOnImage(heatmaps_arr, shape=self.image.shape)]
@property
def segmaps(self):
segmaps_arr = np.int32([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
return [ia.SegmentationMapsOnImage(segmaps_arr, shape=self.image.shape)]
def test_crop_by_fixed_int_on_each_side_on_its_own(self):
# test crop by 1 pixel on each side
crops = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for crop in crops:
with self.subTest(px=crop):
aug = iaa.Crop(px=crop, keep_size=False)
top, right, bottom, left = crop
height, width = self.image.shape[0:2]
base_img_cropped = self.image[top:height-bottom,
left:width-right,
:]
observed = aug.augment_images(self.images)
assert np.array_equal(observed, np.array([base_img_cropped]))
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [base_img_cropped])
keypoints_moved = [self.kpsoi[0].shift(x=-left, y=-top)]
observed = aug.augment_keypoints(self.kpsoi)
assert keypoints_equal(observed, keypoints_moved)
heatmaps_arr = self.heatmaps[0].get_arr()
height, width = heatmaps_arr.shape[0:2]
heatmaps_arr_cropped = heatmaps_arr[top:height-bottom,
left:width-right]
observed = aug.augment_heatmaps(self.heatmaps)[0]
assert observed.shape == base_img_cropped.shape
assert np.array_equal(observed.get_arr(), heatmaps_arr_cropped)
segmaps_arr = self.segmaps[0].get_arr()
height, width = segmaps_arr.shape[0:2]
segmaps_arr_cropped = segmaps_arr[top:height-bottom,
left:width-right]
observed = aug.augment_segmentation_maps(self.segmaps)[0]
assert observed.shape == base_img_cropped.shape
assert np.array_equal(observed.get_arr(), segmaps_arr_cropped)
def test_crop_by_tuple_of_ints_on_each_side_on_its_own(self):
def _to_range_tuple(val):
return val if isinstance(val, tuple) else (val, val)
crops = [
((0, 2), 0, 0, 0),
(0, (0, 2), 0, 0),
(0, 0, (0, 2), 0),
(0, 0, 0, (0, 2)),
]
for crop in crops:
with self.subTest(px=crop):
aug = iaa.Crop(px=crop, keep_size=False)
aug_det = aug.to_deterministic()
top, right, bottom, left = crop
height, width = self.image.shape[0:2]
top_range = _to_range_tuple(top)
right_range = _to_range_tuple(right)
bottom_range = _to_range_tuple(bottom)
left_range = _to_range_tuple(left)
top_values = sm.xrange(top_range[0], top_range[1]+1)
right_values = sm.xrange(right_range[0], right_range[1]+1)
bottom_values = sm.xrange(bottom_range[0], bottom_range[1]+1)
left_values = sm.xrange(left_range[0], left_range[1]+1)
images_cropped = []
keypoints_cropped = []
for top_val in top_values:
for right_val in right_values:
for bottom_val in bottom_values:
for left_val in left_values:
images_cropped.append(
self.image[top_val:height-bottom_val,
left_val:width-right_val,
:]
)
keypoints_cropped.append(
self.kpsoi[0].shift(
x=-left_val, y=-top_val)
)
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(self.images)
matches = [
(1
if np.array_equal(observed,
np.array([base_img_cropped]))
else 0)
for base_img_cropped
in images_cropped]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(self.images)
matches = [
(1
if np.array_equal(observed,
np.array([base_img_cropped]))
else 0)
for base_img_cropped
in images_cropped]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images([self.image])
assert any([array_equal_lists(observed, [base_img_cropped])
for base_img_cropped
in images_cropped])
observed = aug.augment_keypoints(self.kpsoi)
assert any([keypoints_equal(observed, [kp])
for kp
in keypoints_cropped])
assert len(set(movements)) == 3
assert len(set(movements_det)) == 1
def test_crop_by_list_of_ints_on_each_side_on_its_own(self):
# test crop by list of exact pixel values
crops = [
([0, 2], 0, 0, 0),
(0, [0, 2], 0, 0),
(0, 0, [0, 2], 0),
(0, 0, 0, [0, 2]),
]
for crop in crops:
with self.subTest(px=crop):
aug = iaa.Crop(px=crop, keep_size=False)
aug_det = aug.to_deterministic()
top, right, bottom, left = crop
height, width = self.image.shape[0:2]
top_range = top if isinstance(top, list) else [top]
right_range = right if isinstance(right, list) else [right]
bottom_range = bottom if isinstance(bottom, list) else [bottom]
left_range = left if isinstance(left, list) else [left]
images_cropped = []
keypoints_cropped = []
for top_val in top_range:
for right_val in right_range:
for bottom_val in bottom_range:
for left_val in left_range:
images_cropped.append(
self.image[top_val:height-bottom_val,
left_val:width-right_val,
:]
)
keypoints_cropped.append(
self.kpsoi[0].shift(
x=-left_val, y=-top_val)
)
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(self.images)
matches = [
(1
if np.array_equal(observed,
np.array([base_img_cropped]))
else 0)
for base_img_cropped
in images_cropped]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(self.images)
matches = [
(1
if np.array_equal(observed,
np.array([base_img_cropped]))
else 0)
for base_img_cropped in images_cropped]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images([self.image])
assert any([array_equal_lists(observed, [base_img_cropped])
for base_img_cropped
in images_cropped])
observed = aug.augment_keypoints(self.kpsoi)
assert any([keypoints_equal(observed, [kp])
for kp
in keypoints_cropped])
assert len(set(movements)) == 2
assert len(set(movements_det)) == 1
def test_crop_heatmaps_smaller_than_img_by_fixed_ints_without_ks(self):
# crop smaller heatmaps
# heatmap is (6, 8), image is (6, 16)
# image is cropped by (1, 4, 1, 4)
# expected image size: (4, 8)
# expected heatmap size: (4, 4)
aug = iaa.Crop(px=(1, 4, 1, 4), keep_size=False)
heatmaps_arr_small = np.zeros((6, 8), dtype=np.float32)
heatmaps_arr_small[1:-1, 1:-1] = 1.0
heatmaps = HeatmapsOnImage(heatmaps_arr_small, shape=(6, 16))
top, bottom, left, right = 1, 1, 2, 2
heatmaps_arr_small_cropped = \
heatmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (4, 8)
assert observed.arr_0to1.shape == (4, 4, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.arr_0to1[..., 0],
heatmaps_arr_small_cropped)
def test_crop_segmaps_smaller_than_img_by_fixed_ints_without_ks(self):
aug = iaa.Crop(px=(1, 4, 1, 4), keep_size=False)
segmaps_arr_small = np.zeros((6, 8), dtype=np.int32)
segmaps_arr_small[1:-1, 1:-1] = 1
segmaps = SegmentationMapsOnImage(segmaps_arr_small, shape=(6, 16))
top, bottom, left, right = 1, 1, 2, 2
segmaps_arr_small_cropped = segmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (4, 8)
assert observed.arr.shape == (4, 4, 1)
assert np.array_equal(observed.arr[..., 0], segmaps_arr_small_cropped)
def test_crop_heatmaps_smaller_than_img_by_fixed_ints_with_ks(self):
# crop smaller heatmaps, with keep_size=True
# heatmap is (6, 8), image is (6, 16)
# image is cropped by (1, 4, 1, 4)
# expected image size: (4, 8) -> (6, 16) after resize
# expected heatmap size: (4, 4) -> (6, 4) after resize
aug = iaa.Crop(px=(1, 4, 1, 4), keep_size=True)
heatmaps_arr_small = np.zeros((6, 8), dtype=np.float32)
heatmaps_arr_small[1:-1, 1:-1] = 1.0
heatmaps = HeatmapsOnImage(heatmaps_arr_small, shape=(6, 16))
top, bottom, left, right = 1, 1, 2, 2
heatmaps_arr_small_cropped = \
heatmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (6, 16)
assert observed.arr_0to1.shape == (6, 8, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(
observed.arr_0to1[..., 0],
np.clip(
ia.imresize_single_image(
heatmaps_arr_small_cropped,
(6, 8),
interpolation="cubic"),
0,
1.0
)
)
def test_crop_segmaps_smaller_than_img_by_fixed_ints_with_ks(self):
aug = iaa.Crop(px=(1, 4, 1, 4), keep_size=True)
segmaps_arr_small = np.zeros((6, 8), dtype=np.int32)
segmaps_arr_small[1:-1, 1:-1] = 1
segmaps = SegmentationMapsOnImage(segmaps_arr_small, shape=(6, 16))
top, bottom, left, right = 1, 1, 2, 2
segmaps_arr_small_cropped = segmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (6, 16)
assert observed.arr.shape == (6, 8, 1)
assert np.array_equal(
observed.arr[..., 0],
ia.imresize_single_image(
segmaps_arr_small_cropped,
(6, 8),
interpolation="nearest"),
)
def test_crop_keypoints_by_fixed_ints_without_keep_size(self):
aug = iaa.Crop((1, 0, 4, 4), keep_size=False)
kps = [ia.Keypoint(x=3, y=6), ia.Keypoint(x=8, y=5)]
kpsoi = ia.KeypointsOnImage(kps, shape=(14, 14, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (9, 10, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, 3-4)
assert np.allclose(kpsoi_aug.keypoints[0].y, 6-1)
assert np.allclose(kpsoi_aug.keypoints[1].x, 8-4)
assert np.allclose(kpsoi_aug.keypoints[1].y, 5-1)
def test_crop_keypoints_by_fixed_ints_with_keep_size(self):
aug = iaa.Crop((1, 0, 4, 4), keep_size=True)
kps = [ia.Keypoint(x=3, y=6), ia.Keypoint(x=8, y=5)]
kpsoi = ia.KeypointsOnImage(kps, shape=(14, 14, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (14, 14, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, ((3-4)/10)*14)
assert np.allclose(kpsoi_aug.keypoints[0].y, ((6-1)/9)*14)
assert np.allclose(kpsoi_aug.keypoints[1].x, ((8-4)/10)*14)
assert np.allclose(kpsoi_aug.keypoints[1].y, ((5-1)/9)*14)
def test_crop_polygons_by_fixed_ints_without_keep_size(self):
aug = iaa.Crop((1, 0, 4, 4), keep_size=False)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
psoi = ia.PolygonsOnImage(polygons, shape=(10, 10, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (5, 6, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(0-4, 0-1), (4-4, 0-1), (4-4, 4-1), (0-4, 4-1)])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(1-4, 1-1), (5-4, 1-1), (5-4, 5-1), (1-4, 5-1)])
)
def test_crop_polygons_by_fixed_ints_with_keep_size(self):
aug = iaa.Crop((1, 0, 4, 4), keep_size=True)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
psoi = ia.PolygonsOnImage(polygons, shape=(10, 10, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (10, 10, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(10*(-4/6), 10*(-1/5)),
(10*(0/6), 10*(-1/5)),
(10*(0/6), 10*(3/5)),
(10*(-4/6), 10*(3/5))])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(10*(-3/6), 10*(0/5)),
(10*(1/6), 10*(0/5)),
(10*(1/6), 10*(4/5)),
(10*(-3/6), 10*(4/5))])
)
def test_crop_by_one_fixed_float_without_keep_size(self):
aug = iaa.Crop(percent=0.1, keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.shape == (40, 40)
assert np.all(observed == image[5:-5, 5:-5])
def test_crop_by_stochastic_parameter_without_keep_size(self):
aug = iaa.Crop(percent=iap.Deterministic(0.1), keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.shape == (40, 40)
assert np.all(observed == image[5:-5, 5:-5])
def test_crop_by_tuple_of_two_floats_without_keep_size(self):
aug = iaa.Crop(percent=(0.1, 0.2), keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert 30 <= observed.shape[0] <= 40
assert 30 <= observed.shape[1] <= 40
def test_invalid_datatype_for_percent_parameter_fails(self):
got_exception = False
try:
_ = iaa.Crop(percent="test", keep_size=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_crop_by_fixed_float_on_each_side_on_its_own(self):
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
height, width = image.shape[0:2]
crops = [
(0.1, 0, 0, 0),
(0, 0.1, 0, 0),
(0, 0, 0.1, 0),
(0, 0, 0, 0.1),
]
for crop in crops:
with self.subTest(percent=crop):
aug = iaa.Crop(percent=crop, keep_size=False)
top, right, bottom, left = crop
top_px = int(round(top * height))
right_px = int(round(right * width))
bottom_px = int(round(bottom * height))
left_px = int(round(left * width))
# dont use :-bottom_px and ;-right_px here, because these
# values can be 0
image_cropped = image[top_px:50-bottom_px, left_px:50-right_px]
observed = aug.augment_image(image)
assert np.array_equal(observed, image_cropped)
def test_crop_keypoints_by_fixed_float_on_each_side_on_its_own(self):
height, width = (50, 50)
kps = [ia.Keypoint(x=10, y=11), ia.Keypoint(x=20, y=21),
ia.Keypoint(x=30, y=31)]
keypoints = [ia.KeypointsOnImage(kps, shape=(height, width))]
crops = [
(0.1, 0, 0, 0),
(0, 0.1, 0, 0),
(0, 0, 0.1, 0),
(0, 0, 0, 0.1),
]
for crop in crops:
with self.subTest(percent=crop):
aug = iaa.Crop(percent=crop, keep_size=False)
top, right, bottom, left = crop
top_px = int(round(top * height))
left_px = int(round(left * width))
keypoints_moved = [keypoints[0].shift(x=-left_px, y=-top_px)]
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_moved)
def test_crop_heatmaps_smaller_than_img_by_fixed_floats_without_ks(self):
# crop smaller heatmaps
# heatmap is (8, 12), image is (16, 32)
# image is cropped by (0.25, 0.25, 0.25, 0.25)
# expected image size: (8, 16)
# expected heatmap size: (4, 6)
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=False)
heatmaps_arr_small = np.zeros((8, 12), dtype=np.float32)
heatmaps_arr_small[2:-2, 4:-4] = 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
heatmaps_arr_small_cropped = \
heatmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (8, 16)
assert observed.arr_0to1.shape == (4, 6, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.arr_0to1[..., 0], heatmaps_arr_small_cropped)
def test_crop_segmaps_smaller_than_img_by_fixed_floats_without_ks(self):
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=False)
segmaps_arr_small = np.zeros((8, 12), dtype=np.int32)
segmaps_arr_small[2:-2, 4:-4] = 1
segmaps = SegmentationMapsOnImage(segmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
segmaps_arr_small_cropped = segmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (8, 16)
assert observed.arr.shape == (4, 6, 1)
assert np.array_equal(observed.arr[..., 0], segmaps_arr_small_cropped)
def test_crop_heatmaps_smaller_than_img_by_fixed_floats_with_ks(self):
# crop smaller heatmaps, with keep_size=True
# heatmap is (8, 12), image is (16, 32)
# image is cropped by (0.25, 0.25, 0.25, 0.25)
# expected image size: (8, 16) -> (16, 32) after resize
# expected heatmap size: (4, 6) -> (8, 12) after resize
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=True)
heatmaps_arr_small = np.zeros((8, 12), dtype=np.float32)
heatmaps_arr_small[2:-2, 4:-4] = 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
heatmaps_arr_small_cropped = \
heatmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (16, 32)
assert observed.arr_0to1.shape == (8, 12, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(
observed.arr_0to1[..., 0],
np.clip(
ia.imresize_single_image(
heatmaps_arr_small_cropped,
(8, 12),
interpolation="cubic"),
0,
1.0
)
)
def test_crop_segmaps_smaller_than_img_by_fixed_floats_with_ks(self):
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=True)
segmaps_arr_small = np.zeros((8, 12), dtype=np.int32)
segmaps_arr_small[2:-2, 4:-4] = 1
segmaps = SegmentationMapsOnImage(segmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
segmaps_arr_small_cropped = segmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (16, 32)
assert observed.arr.shape == (8, 12, 1)
assert np.allclose(
observed.arr[..., 0],
ia.imresize_single_image(
segmaps_arr_small_cropped,
(8, 12),
interpolation="nearest")
)
def test_crop_keypoints_by_fixed_floats_without_keep_size(self):
aug = iaa.Crop(percent=(0.25, 0, 0.5, 0.1), keep_size=False)
kps = [ia.Keypoint(x=12, y=10), ia.Keypoint(x=8, y=12)]
kpsoi = ia.KeypointsOnImage(kps, shape=(16, 20, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (4, 18, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, 12-2)
assert np.allclose(kpsoi_aug.keypoints[0].y, 10-4)
assert np.allclose(kpsoi_aug.keypoints[1].x, 8-2)
assert np.allclose(kpsoi_aug.keypoints[1].y, 12-4)
def test_crop_keypoints_by_fixed_floats_with_keep_size(self):
aug = iaa.Crop(percent=(0.25, 0, 0.5, 0.1), keep_size=True)
kps = [ia.Keypoint(x=12, y=10), ia.Keypoint(x=8, y=12)]
kpsoi = ia.KeypointsOnImage(kps, shape=(16, 20, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (16, 20, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, ((12-2)/18)*20)
assert np.allclose(kpsoi_aug.keypoints[0].y, ((10-4)/4)*16)
assert np.allclose(kpsoi_aug.keypoints[1].x, ((8-2)/18)*20)
assert np.allclose(kpsoi_aug.keypoints[1].y, ((12-4)/4)*16)
def test_crop_polygons_by_fixed_floats_without_keep_size(self):
aug = iaa.Crop(percent=(0.2, 0, 0.5, 0.1), keep_size=False)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
psoi = ia.PolygonsOnImage(polygons, shape=(10, 10, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (3, 9, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(0-1, 0-2), (4-1, 0-2), (4-1, 4-2), (0-1, 4-2)])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(1-1, 1-2), (5-1, 1-2), (5-1, 5-2), (1-1, 5-2)])
)
def test_crop_polygons_by_fixed_floats_with_keep_size(self):
aug = iaa.Crop(percent=(0.2, 0, 0.5, 0.1), keep_size=True)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
psoi = ia.PolygonsOnImage(polygons, shape=(10, 10, 3))
psoi_aug = aug.augment_polygons([psoi, psoi])
assert len(psoi_aug) == 2
for psoi_aug_i in psoi_aug:
assert psoi_aug_i.shape == (10, 10, 3)
assert len(psoi_aug_i.polygons) == 2
assert psoi_aug_i.polygons[0].exterior_almost_equals(
ia.Polygon([(10*(-1/9), 10*(-2/3)),
(10*(3/9), 10*(-2/3)),
(10*(3/9), 10*(2/3)),
(10*(-1/9), 10*(2/3))])
)
assert psoi_aug_i.polygons[1].exterior_almost_equals(
ia.Polygon([(10*(0/9), 10*(-1/3)),
(10*(4/9), 10*(-1/3)),
(10*(4/9), 10*(3/3)),
(10*(0/9), 10*(3/3))])
)
def test_crop_by_tuple_of_floats_on_top_side_without_ks(self):
aug = iaa.Crop(percent=((0, 0.1), 0, 0, 0), keep_size=False)
image = np.zeros((40, 40), dtype=np.uint8)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(image)
n_cropped = 40 - observed.shape[0]
seen[n_cropped] += 1
# note that we cant just check for 100-50 < x < 100+50 here. The first
# and last value (0px and 4px) have half the probability of occuring
# compared to the other values. E.g. 0px is cropped if sampled p
# falls in range [0, 0.125). 1px is cropped if sampled p falls in
# range [0.125, 0.375].
assert np.all([v > 30 for v in seen])
def test_crop_by_tuple_of_floats_on_right_side_without_ks(self):
aug = iaa.Crop(percent=(0, (0, 0.1), 0, 0), keep_size=False)
image = np.zeros((40, 40), dtype=np.uint8) + 255
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(image)
n_cropped = 40 - observed.shape[1]
seen[n_cropped] += 1
assert np.all([v > 30 for v in seen])
def test_crop_by_list_of_floats_on_top_side_without_ks(self):
aug = iaa.Crop(percent=([0.0, 0.1], 0, 0, 0), keep_size=False)
image = np.zeros((40, 40), dtype=np.uint8) + 255
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(image)
n_cropped = 40 - observed.shape[0]
seen[n_cropped] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
def test_crop_by_list_of_floats_on_right_side_without_ks(self):
aug = iaa.Crop(percent=(0, [0.0, 0.1], 0, 0), keep_size=False)
image = np.zeros((40, 40), dtype=np.uint8) + 255
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(image)
n_cropped = 40 - observed.shape[1]
seen[n_cropped] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
def test_other_dtypes_bool(self):
aug = iaa.Crop(px=(1, 0, 0, 0), keep_size=False)
mask = np.zeros((2, 3), dtype=bool)
mask[0, 1] = True
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == (2, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.Crop(px=(1, 0, 0, 0), keep_size=False)
mask = np.zeros((2, 3), dtype=bool)
mask[0, 1] = True
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [
1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [
1, 5, 10, 100, int(center_value), int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (2, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == value)
def test_other_dtypes_float(self):
aug = iaa.Crop(px=(1, 0, 0, 0), keep_size=False)
mask = np.zeros((2, 3), dtype=bool)
mask[0, 1] = True
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert image_aug.shape == (2, 3)
assert np.all(_isclose(image_aug[~mask], 0))
assert np.all(_isclose(image_aug[mask],
np.float128(value)))
class TestPadToFixedSize(unittest.TestCase):
def setUp(self):
reseed()
def test_image2d_that_needs_to_be_padded_on_both_sides(self):
aug = iaa.PadToFixedSize(height=5, width=5)
image = np.uint8([[255]])
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5)
def test_image3d_that_needs_to_be_padded_on_both_sides(self):
aug = iaa.PadToFixedSize(height=5, width=5)
image3d = np.atleast_3d(np.uint8([[255]]))
observed = aug.augment_image(image3d)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5, 1)
def test_image3d_rgb_that_needs_to_be_padded_on_both_sides(self):
aug = iaa.PadToFixedSize(height=5, width=5)
image3d_rgb = np.tile(
np.atleast_3d(np.uint8([[255]])),
(1, 1, 3)
)
observed = aug.augment_image(image3d_rgb)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5, 3)
# why does this exist when there is already a test for other float dtypes?
def test_image2d_with_other_dtypes(self):
aug = iaa.PadToFixedSize(height=5, width=5)
image = np.uint8([[255]])
for dtype in ["float32", "float64", "int32"]:
with self.subTest(dtype=dtype):
observed = aug.augment_image(image.astype(dtype))
assert observed.dtype.name == dtype
assert observed.shape == (5, 5)
def test_image_with_height_being_too_small(self):
aug = iaa.PadToFixedSize(height=5, width=5)
image = np.zeros((1, 5, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5, 3)
def test_image_with_width_being_too_small(self):
aug = iaa.PadToFixedSize(height=5, width=5)
image = np.zeros((5, 1, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5, 3)
def test_image_fullfills_exactly_min_shape(self):
# change no side when all sides have exactly desired size
aug = iaa.PadToFixedSize(height=5, width=5)
img5x5 = np.zeros((5, 5, 3), dtype=np.uint8)
img5x5[2, 2, :] = 255
observed = aug.augment_image(img5x5)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5, 3)
assert np.array_equal(observed, img5x5)
def test_image_that_is_larger_than_min_shape(self):
# change no side when all sides have larger than desired size
aug = iaa.PadToFixedSize(height=5, width=5)
img6x6 = np.zeros((6, 6, 3), dtype=np.uint8)
img6x6[3, 3, :] = 255
observed = aug.augment_image(img6x6)
assert observed.dtype.name == "uint8"
assert observed.shape == (6, 6, 3)
assert np.array_equal(observed, img6x6)
def test_image_pad_mode(self):
# make sure that pad mode is recognized
aug = iaa.PadToFixedSize(height=4, width=4, pad_mode="edge")
aug.position = (iap.Deterministic(0.5), iap.Deterministic(0.5))
img2x2 = np.uint8([
[50, 100],
[150, 200]
])
observed = aug.augment_image(img2x2)
expected = np.uint8([
[50, 50, 100, 100],
[50, 50, 100, 100],
[150, 150, 200, 200],
[150, 150, 200, 200]
])
assert observed.dtype.name == "uint8"
assert observed.shape == (4, 4)
assert np.array_equal(observed, expected)
def test_image_pad_at_left_top(self):
# explicit non-center position test
aug = iaa.PadToFixedSize(
height=3, width=3, pad_mode="constant", pad_cval=128,
position="left-top")
img1x1 = np.uint8([[255]])
observed = aug.augment_image(img1x1)
expected = np.uint8([
[128, 128, 128],
[128, 128, 128],
[128, 128, 255]
])
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_image_pad_at_right_bottom(self):
aug = iaa.PadToFixedSize(
height=3, width=3, pad_mode="constant", pad_cval=128,
position="right-bottom")
img1x1 = np.uint8([[255]])
observed = aug.augment_image(img1x1)
expected = np.uint8([
[255, 128, 128],
[128, 128, 128],
[128, 128, 128]
])
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_image_pad_at_bottom_center_given_as_tuple_of_floats(self):
aug = iaa.PadToFixedSize(
height=3, width=3, pad_mode="constant", pad_cval=128,
position=(0.5, 1.0))
img1x1 = np.uint8([[255]])
observed = aug.augment_image(img1x1)
expected = np.uint8([
[128, 255, 128],
[128, 128, 128],
[128, 128, 128]
])
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_keypoints__image_already_fullfills_min_shape(self):
# keypoint test with shape not being changed
aug = iaa.PadToFixedSize(
height=3, width=3, pad_mode="edge", position="center")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_keypoints_pad_at_center(self):
aug = iaa.PadToFixedSize(
height=4, width=4, pad_mode="edge", position="center")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(4, 4))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_keypoints_pad_at_left_top(self):
# keypoint test with explicit non-center position
aug = iaa.PadToFixedSize(
height=4, width=4, pad_mode="edge", position="left-top")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(4, 4))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_keypoints_pad_at_right_bottom(self):
aug = iaa.PadToFixedSize(
height=4, width=4, pad_mode="edge", position="right-bottom")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(4, 4))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_heatmaps__pad_mode_should_be_ignored(self):
# basic heatmaps test
# pad_mode should be ignored for heatmaps
aug = iaa.PadToFixedSize(
height=3, width=3, pad_mode="edge", position="center")
heatmaps_arr = np.zeros((1, 1, 1), dtype=np.float32) + 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(1, 1, 3))
observed = aug.augment_heatmaps([heatmaps])[0]
expected = np.float32([
[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]
])
expected = expected[..., np.newaxis]
assert observed.shape == (3, 3, 3)
assert np.allclose(observed.arr_0to1, expected)
def test_heatmaps_smaller_than_image__pad_mode_should_be_ignored(self):
# heatmaps with size unequal to image
# pad_mode should be ignored for heatmaps
aug = iaa.PadToFixedSize(
height=32, width=32, pad_mode="edge", position="left-top")
heatmaps_arr = np.zeros((15, 15, 1), dtype=np.float32) + 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(30, 30, 3))
observed = aug.augment_heatmaps([heatmaps])[0]
expected = np.zeros((16, 16, 1), dtype=np.float32) + 1.0
expected[:, 0, 0] = 0.0
expected[0, :, 0] = 0.0
assert observed.shape == (32, 32, 3)
assert np.allclose(observed.arr_0to1, expected)
def test_segmaps__pad_mode_should_be_ignored(self):
# basic segmaps test
# pad_mode should be ignored for segmaps
aug = iaa.PadToFixedSize(
height=3, width=3, pad_mode="edge", position="center")
segmaps_arr = np.ones((1, 1, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(1, 1, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
expected = np.int32([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
])
expected = expected[..., np.newaxis]
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.arr, expected)
def test_segmaps_smaller_than_image__pad_mode_should_be_ignored(self):
# segmaps with size unequal to image
# pad_mode should be ignored for segmaps
aug = iaa.PadToFixedSize(
height=32, width=32, pad_mode="edge", position="left-top")
segmaps_arr = np.ones((15, 15, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(30, 30, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
expected = np.ones((16, 16, 1), dtype=np.int32)
expected[:, 0, 0] = 0
expected[0, :, 0] = 0
assert observed.shape == (32, 32, 3)
assert np.array_equal(observed.arr, expected)
def test_other_dtypes_bool(self):
aug = iaa.PadToFixedSize(height=4, width=3, position="center-top")
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == (4, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.PadToFixedSize(height=4, width=3, position="center-top")
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [
1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [
1, 5, 10, 100, int(center_value), int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (4, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == value)
def test_other_dtypes_float(self):
aug = iaa.PadToFixedSize(height=4, width=3, position="center-top")
dtypes = ["float16", "float32", "float64", "float128"]
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (4, 3)
assert np.all(_isclose(image_aug[~mask], 0))
assert np.all(_isclose(image_aug[mask],
np.float128(value)))
class TestCropToFixedSize(unittest.TestCase):
def setUp(self):
reseed()
def test_image2d_that_needs_to_be_cropped_on_both_sides(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 1)
def test_image3d_that_needs_to_be_cropped_on_both_sides(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
image3d = np.atleast_3d(image)
observed = aug.augment_image(image3d)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 1, 1)
def test_image3d_rgb_that_needs_to_be_cropped_on_both_sides(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
image3d_rgb = np.tile(
np.atleast_3d(image),
(1, 1, 3)
)
observed = aug.augment_image(image3d_rgb)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 1, 3)
def test_image2d_with_other_dtypes(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
for dtype in ["float32", "float64", "int32"]:
with self.subTest(dtype=dtype):
observed = aug.augment_image(image.astype(dtype))
assert observed.dtype.name == dtype
assert observed.shape == (1, 1)
def test_image_with_height_being_too_large(self):
# change only one side when other side has already desired size
aug = iaa.CropToFixedSize(height=1, width=5)
image = np.zeros((3, 5, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 5, 3)
def test_image_with_width_being_too_large(self):
aug = iaa.CropToFixedSize(height=5, width=1)
image = np.zeros((5, 3, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 1, 3)
def test_image_fullfills_exactly_max_shape(self):
# change no side when all sides have exactly desired size
aug = iaa.CropToFixedSize(height=5, width=5)
img5x5 = np.zeros((5, 5, 3), dtype=np.uint8)
img5x5[2, 2, :] = 255
observed = aug.augment_image(img5x5)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5, 3)
assert np.array_equal(observed, img5x5)
def test_image_that_is_smaller_than_max_shape(self):
# change no side when all sides have smaller than desired size
aug = iaa.CropToFixedSize(height=5, width=5)
img4x4 = np.zeros((4, 4, 3), dtype=np.uint8)
img4x4[2, 2, :] = 255
observed = aug.augment_image(img4x4)
assert observed.dtype.name == "uint8"
assert observed.shape == (4, 4, 3)
assert np.array_equal(observed, img4x4)
def test_image_crop_at_left_top(self):
# explicit non-center position test
aug = iaa.CropToFixedSize(height=3, width=3, position="left-top")
img5x5 = np.arange(25, dtype=np.uint8).reshape((5, 5))
observed = aug.augment_image(img5x5)
expected = img5x5[2:, 2:]
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_image_crop_at_right_bottom(self):
aug = iaa.CropToFixedSize(height=3, width=3, position="right-bottom")
img5x5 = np.arange(25, dtype=np.uint8).reshape((5, 5))
observed = aug.augment_image(img5x5)
expected = img5x5[:3, :3]
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_image_crop_at_bottom_center_given_as_tuple_of_floats(self):
aug = iaa.CropToFixedSize(height=3, width=3, position=(0.5, 1.0))
img5x5 = np.arange(25, dtype=np.uint8).reshape((5, 5))
observed = aug.augment_image(img5x5)
expected = img5x5[:3, 1:4]
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_keypoints__image_already_fullfills_max_shape(self):
# keypoint test with shape not being changed
aug = iaa.CropToFixedSize(height=3, width=3, position="center")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_keypoints_crop_at_center(self):
# basic keypoint test
aug = iaa.CropToFixedSize(height=1, width=1, position="center")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 1))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_keypoints_crop_at_left_top(self):
# keypoint test with explicit non-center position
aug = iaa.CropToFixedSize(height=3, width=3, position="left-top")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(5, 5))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(3, 3))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_keypoints_crop_at_right_bottom(self):
aug = iaa.CropToFixedSize(height=3, width=3, position="right-bottom")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(5, 5))
observed = aug.augment_keypoints([kpsoi])
expected = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(3, 3))
assert observed[0].shape == expected.shape
assert keypoints_equal(observed, [expected])
def test_heatmaps(self):
# basic heatmaps test
aug = iaa.CropToFixedSize(height=3, width=3, position="center")
heatmaps_arr = np.zeros((5, 5, 1), dtype=np.float32) + 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(5, 5, 3))
observed = aug.augment_heatmaps([heatmaps])[0]
expected = np.zeros((3, 3, 1), dtype=np.float32) + 1.0
assert observed.shape == (3, 3, 3)
assert np.allclose(observed.arr_0to1, expected)
def test_heatmaps_crop_at_left_top(self):
# heatmaps, crop at non-center position
aug = iaa.CropToFixedSize(height=3, width=3, position="left-top")
heatmaps_arr = np.linspace(
0.0, 1.0, 5 * 5 * 1).reshape((5, 5, 1)).astype(np.float32)
heatmaps_oi = ia.HeatmapsOnImage(heatmaps_arr, shape=(5, 5, 3))
observed = aug.augment_heatmaps([heatmaps_oi])[0]
expected = heatmaps_arr[2:, 2:, :]
assert observed.shape == (3, 3, 3)
assert np.allclose(observed.arr_0to1, expected)
def test_heatmaps_crop_at_right_bottom(self):
# heatmaps, crop at non-center position
aug = iaa.CropToFixedSize(height=3, width=3, position="right-bottom")
heatmaps_arr = np.linspace(
0.0, 1.0, 5 * 5 * 1).reshape((5, 5, 1)).astype(np.float32)
heatmaps_oi = ia.HeatmapsOnImage(heatmaps_arr, shape=(5, 5, 3))
observed = aug.augment_heatmaps([heatmaps_oi])[0]
expected = heatmaps_arr[:3, :3, :]
assert observed.shape == (3, 3, 3)
assert np.allclose(observed.arr_0to1, expected)
def test_heatmaps_smaller_than_image(self):
# heatmaps with size unequal to image
aug = iaa.CropToFixedSize(height=32, width=32, position="left-top")
heatmaps_arr = np.zeros((17, 17, 1), dtype=np.float32) + 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(34, 34, 3))
observed = aug.augment_heatmaps([heatmaps])[0]
expected = np.zeros((16, 16, 1), dtype=np.float32) + 1.0
assert observed.shape == (32, 32, 3)
assert np.allclose(observed.arr_0to1, expected)
def test_segmaps_crop_at_center(self):
# basic segmaps test
aug = iaa.CropToFixedSize(height=3, width=3, position="center")
segmaps_arr = np.ones((5, 5, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(5, 5, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
expected = np.ones((3, 3, 1), dtype=np.int32)
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.arr, expected)
def test_segmaps_crop_at_left_top(self):
# segmaps, crop at non-center position
aug = iaa.CropToFixedSize(height=3, width=3, position="left-top")
segmaps_arr = np.arange(5*5).reshape((5, 5, 1)).astype(np.int32)
segmaps_oi = SegmentationMapsOnImage(segmaps_arr, shape=(5, 5, 3))
observed = aug.augment_segmentation_maps([segmaps_oi])[0]
expected = segmaps_arr[2:, 2:, :]
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.arr, expected)
def test_segmaps_crop_at_right_bottom(self):
# segmaps, crop at non-center position
aug = iaa.CropToFixedSize(height=3, width=3, position="right-bottom")
segmaps_arr = np.arange(5*5).reshape((5, 5, 1)).astype(np.int32)
segmaps_oi = SegmentationMapsOnImage(segmaps_arr, shape=(5, 5, 3))
observed = aug.augment_segmentation_maps([segmaps_oi])[0]
expected = segmaps_arr[:3, :3, :]
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.arr, expected)
def test_segmaps_smaller_than_image(self):
# segmaps with size unequal to image
aug = iaa.CropToFixedSize(height=32, width=32, position="left-top")
segmaps_arr = np.ones((17, 17, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(34, 34, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
expected = np.ones((16, 16, 1), dtype=np.int32)
assert observed.shape == (32, 32, 3)
assert np.array_equal(observed.arr, expected)
def test_other_dtypes_bool(self):
aug = iaa.CropToFixedSize(height=2, width=3, position="center-top")
mask = np.zeros((2, 3), dtype=bool)
mask[0, 1] = True
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == (2, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.CropToFixedSize(height=2, width=3, position="center-top")
mask = np.zeros((2, 3), dtype=bool)
mask[0, 1] = True
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [
1, 5, 10, 100, int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [
1, 5, 10, 100, int(center_value), int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (2, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == value)
def test_other_dtypes_float(self):
aug = iaa.CropToFixedSize(height=2, width=3, position="center-top")
mask = np.zeros((2, 3), dtype=bool)
mask[0, 1] = True
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (2, 3)
assert np.all(_isclose(image_aug[~mask], 0))
assert np.all(_isclose(image_aug[mask],
np.float128(value)))
class TestKeepSizeByResize(unittest.TestCase):
def setUp(self):
reseed()
@property
def children(self):
return iaa.Crop((1, 0, 0, 0), keep_size=False)
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=3)]
return ia.KeypointsOnImage(kps, shape=(4, 4, 3))
@property
def heatmaps(self):
heatmaps_arr = np.linspace(
0.0, 1.0, 4*4*1).reshape((4, 4, 1)).astype(np.float32)
return HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 1))
@property
def heatmaps_cubic(self):
heatmaps_arr = self.heatmaps.get_arr()
heatmaps_oi_cubic = HeatmapsOnImage(
heatmaps_arr[1:, :, :], shape=(3, 4, 3)
).resize((4, 4), interpolation="cubic")
heatmaps_oi_cubic.shape = (4, 4, 3)
return heatmaps_oi_cubic
@property
def heatmaps_nearest(self):
heatmaps_arr = self.heatmaps.get_arr()
heatmaps_oi_nearest = HeatmapsOnImage(
heatmaps_arr[1:, :, :], shape=(3, 4, 1)
).resize((4, 4), interpolation="nearest")
heatmaps_oi_nearest.shape = (4, 4, 3)
return heatmaps_oi_nearest
@property
def segmaps(self):
segmaps_arr = np.arange(4*4*1).reshape((4, 4, 1)).astype(np.int32)
return SegmentationMapsOnImage(segmaps_arr, shape=(4, 4, 1))
@property
def segmaps_nearest(self):
segmaps_arr = self.segmaps.get_arr()
segmaps_oi_nearest = SegmentationMapsOnImage(
segmaps_arr[1:, :, :], shape=(3, 4, 1)
).resize((4, 4), interpolation="nearest")
segmaps_oi_nearest.shape = (4, 4, 3)
return segmaps_oi_nearest
def test__draw_samples_each_one_interpolation(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation="nearest",
interpolation_heatmaps="linear",
interpolation_segmaps="cubic")
samples, samples_heatmaps, samples_segmaps = aug._draw_samples(
1000, iarandom.RNG(1))
assert "nearest" in samples
assert len(set(samples)) == 1
assert "linear" in samples_heatmaps
assert len(set(samples_heatmaps)) == 1
assert "cubic" in samples_segmaps
assert len(set(samples_segmaps)) == 1
def test__draw_samples_each_one_interpolation_via_cv2_constants(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation=cv2.INTER_LINEAR,
interpolation_heatmaps=cv2.INTER_NEAREST,
interpolation_segmaps=cv2.INTER_CUBIC)
samples, samples_heatmaps, samples_segmaps = aug._draw_samples(
1000, iarandom.RNG(1))
assert cv2.INTER_LINEAR in samples
assert len(set(samples)) == 1
assert cv2.INTER_NEAREST in samples_heatmaps
assert len(set(samples_heatmaps)) == 1
assert cv2.INTER_CUBIC in samples_segmaps
assert len(set(samples_segmaps)) == 1
def test__draw_samples_with_images_no_resize_and_others_same_as_imgs(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation=iaa.KeepSizeByResize.NO_RESIZE,
interpolation_heatmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES,
interpolation_segmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES)
samples, samples_heatmaps, samples_segmaps = aug._draw_samples(
1000, iarandom.RNG(1))
assert iaa.KeepSizeByResize.NO_RESIZE in samples
assert len(set(samples)) == 1
assert iaa.KeepSizeByResize.NO_RESIZE in samples_heatmaps
assert len(set(samples_heatmaps)) == 1
assert iaa.KeepSizeByResize.NO_RESIZE in samples_segmaps
assert len(set(samples_segmaps)) == 1
def test__draw_samples_list_of_interpolations_incl_same_as_images(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation=["cubic", "nearest"],
interpolation_heatmaps=[
"linear", iaa.KeepSizeByResize.SAME_AS_IMAGES],
interpolation_segmaps=[
"linear", iaa.KeepSizeByResize.SAME_AS_IMAGES])
samples, samples_heatmaps, samples_segmaps = aug._draw_samples(
5000, iarandom.RNG(1))
assert "cubic" in samples
assert "nearest" in samples
assert len(set(samples)) == 2
assert "linear" in samples_heatmaps
assert "nearest" in samples_heatmaps
assert len(set(samples_heatmaps)) == 3
assert np.isclose(
np.sum(samples == samples_heatmaps) / samples_heatmaps.size,
0.5,
rtol=0, atol=0.1)
assert "linear" in samples_segmaps
assert "nearest" in samples_segmaps
assert len(set(samples_segmaps)) == 3
assert np.isclose(
np.sum(samples == samples_segmaps) / samples_segmaps.size,
0.5,
rtol=0, atol=0.1)
def test__draw_samples_list_of_each_two_interpolations(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation=iap.Choice(["cubic", "linear"]),
interpolation_heatmaps=iap.Choice(["linear", "nearest"]),
interpolation_segmaps=iap.Choice(["linear", "nearest"]))
samples, samples_heatmaps, samples_segmaps = aug._draw_samples(
10000, iarandom.RNG(1))
assert "cubic" in samples
assert "linear" in samples
assert len(set(samples)) == 2
assert "linear" in samples_heatmaps
assert "nearest" in samples_heatmaps
assert len(set(samples_heatmaps)) == 2
assert "linear" in samples_segmaps
assert "nearest" in samples_segmaps
assert len(set(samples_segmaps)) == 2
def test_image_interpolation_is_cubic(self):
aug = iaa.KeepSizeByResize(self.children, interpolation="cubic")
img = np.arange(0, 4*4*3, 1).reshape((4, 4, 3)).astype(np.uint8)
observed = aug.augment_image(img)
assert observed.shape == (4, 4, 3)
assert observed.dtype.type == np.uint8
expected = ia.imresize_single_image(
img[1:, :, :], img.shape[0:2], interpolation="cubic")
assert np.allclose(observed, expected)
def test_image_interpolation_is_no_resize(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation=iaa.KeepSizeByResize.NO_RESIZE)
img = np.arange(0, 4*4*3, 1).reshape((4, 4, 3)).astype(np.uint8)
observed = aug.augment_image(img)
expected = img[1:, :, :]
assert observed.shape == (3, 4, 3)
assert observed.dtype.type == np.uint8
assert np.allclose(observed, expected)
def test_keypoints_interpolation_is_cubic(self):
aug = iaa.KeepSizeByResize(self.children, interpolation="cubic")
kpsoi = self.kpsoi
kpoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpoi_aug.shape == (4, 4, 3)
assert np.isclose(kpoi_aug.keypoints[0].x, 0, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[0].y,
((1-1)/3)*4,
rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[1].x, 1, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[1].y,
((1-1)/3)*4,
rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[2].x, 2, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[2].y,
((3-1)/3)*4,
rtol=0, atol=1e-4)
def test_keypoints_interpolation_is_no_resize(self):
aug = iaa.KeepSizeByResize(
self.children, interpolation=iaa.KeepSizeByResize.NO_RESIZE)
kpsoi = self.kpsoi
kpoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpoi_aug.shape == (3, 4, 3)
assert np.isclose(kpoi_aug.keypoints[0].x, 0, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[0].y, 0, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[1].x, 1, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[1].y, 0, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[2].x, 2, rtol=0, atol=1e-4)
assert np.isclose(kpoi_aug.keypoints[2].y, 2, rtol=0, atol=1e-4)
def test_heatmaps_specific_interpolation_set_to_no_nearest(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation="cubic",
interpolation_heatmaps="nearest")
heatmaps_oi = self.heatmaps
heatmaps_oi_nearest = self.heatmaps_nearest
heatmaps_oi_aug = aug.augment_heatmaps([heatmaps_oi])[0]
assert heatmaps_oi_aug.arr_0to1.shape == (4, 4, 1)
assert np.allclose(heatmaps_oi_aug.arr_0to1,
heatmaps_oi_nearest.arr_0to1)
def test_heatmaps_specific_interpolation_set_to_list_of_two(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation="cubic",
interpolation_heatmaps=["nearest", "cubic"])
heatmaps_oi = self.heatmaps
heatmaps_oi_cubic = self.heatmaps_cubic
heatmaps_oi_nearest = self.heatmaps_nearest
hmoi_aug = aug.augment_heatmaps([heatmaps_oi])[0]
assert hmoi_aug.arr_0to1.shape == (4, 4, 1)
assert (
np.allclose(hmoi_aug.arr_0to1, heatmaps_oi_nearest.arr_0to1)
or np.allclose(hmoi_aug.arr_0to1, heatmaps_oi_cubic.arr_0to1)
)
def test_heatmaps_specific_interpolation_set_to_no_resize(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation="cubic",
interpolation_heatmaps=iaa.KeepSizeByResize.NO_RESIZE)
heatmaps_oi = self.heatmaps
heatmaps_oi_aug = aug.augment_heatmaps([heatmaps_oi])[0]
assert heatmaps_oi_aug.arr_0to1.shape == (3, 4, 1)
assert np.allclose(
heatmaps_oi_aug.arr_0to1, heatmaps_oi.arr_0to1[1:, :, :])
def test_heatmaps_specific_interpolation_set_to_same_as_images(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation="cubic",
interpolation_heatmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES)
heatmaps_oi = self.heatmaps
heatmaps_oi_cubic = self.heatmaps_cubic
heatmaps_oi_aug = aug.augment_heatmaps([heatmaps_oi])[0]
assert heatmaps_oi_aug.arr_0to1.shape == (4, 4, 1)
assert np.allclose(
heatmaps_oi_aug.arr_0to1, heatmaps_oi_cubic.arr_0to1)
def test_segmaps_general_interpolation_set_to_cubic(self):
aug = iaa.KeepSizeByResize(self.children, interpolation="cubic")
segmaps_oi = self.segmaps
segmaps_oi_nearest = self.segmaps_nearest
segmaps_oi_aug = aug.augment_segmentation_maps([segmaps_oi])[0]
assert segmaps_oi_aug.arr.shape == (4, 4, 1)
assert np.array_equal(segmaps_oi_aug.arr, segmaps_oi_nearest.arr)
def test_segmaps_specific_interpolation_set_to_nearest(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation="cubic",
interpolation_segmaps="nearest")
segmaps_oi = self.segmaps
segmaps_oi_nearest = self.segmaps_nearest
segmaps_oi_aug = aug.augment_segmentation_maps([segmaps_oi])[0]
assert segmaps_oi_aug.arr.shape == (4, 4, 1)
assert np.array_equal(segmaps_oi_aug.arr, segmaps_oi_nearest.arr)
def test_segmaps_specific_interpolation_set_to_no_resize(self):
aug = iaa.KeepSizeByResize(
self.children,
interpolation="cubic",
interpolation_segmaps=iaa.KeepSizeByResize.NO_RESIZE)
segmaps_oi = self.segmaps
segmaps_oi_aug = aug.augment_segmentation_maps([segmaps_oi])[0]
assert segmaps_oi_aug.arr.shape == (3, 4, 1)
assert np.array_equal(segmaps_oi_aug.arr, segmaps_oi.arr[1:, :, :])
|
py | 1a430812c34b11f432023213d5fddb47f5b96991 | """Check the latest version at https://pypi.org/project/slack-sdk/"""
__version__ = "3.9.0rc1"
|
py | 1a43081c9933c94b7298ea80f894fccdcb694ca3 | from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
import warnings
import os
import datetime
from tqdm import tqdm
from zipfile import ZipFile, ZIP_DEFLATED
from scipy.ndimage.morphology import distance_transform_edt, binary_fill_holes
from scipy.ndimage.measurements import find_objects
from scipy.optimize import minimize_scalar
from skimage.measure import regionprops
from csbdeep.utils import _raise
from csbdeep.utils.six import Path
from .matching import matching_dataset
def gputools_available():
try:
import gputools
except:
return False
return True
def path_absolute(path_relative):
""" Get absolute path to resource"""
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, path_relative)
def _is_power_of_2(i):
assert i > 0
e = np.log2(i)
return e == int(e)
def _normalize_grid(grid,n):
try:
grid = tuple(grid)
(len(grid) == n and
all(map(np.isscalar,grid)) and
all(map(_is_power_of_2,grid))) or _raise(TypeError())
return tuple(int(g) for g in grid)
except (TypeError, AssertionError):
raise ValueError("grid = {grid} must be a list/tuple of length {n} with values that are power of 2".format(grid=grid, n=n))
def _edt_dist_func(anisotropy):
try:
from edt import edt as edt_func
# raise ImportError()
dist_func = lambda img: edt_func(np.ascontiguousarray(img>0), anisotropy=anisotropy)
except ImportError:
dist_func = lambda img: distance_transform_edt(img, sampling=anisotropy)
return dist_func
def _edt_prob(lbl_img, anisotropy=None):
constant_img = lbl_img.min() == lbl_img.max() and lbl_img.flat[0] > 0
if constant_img:
lbl_img = np.pad(lbl_img, ((1,1),)*lbl_img.ndim, mode='constant')
warnings.warn("EDT of constant label image is ill-defined. (Assuming background around it.)")
dist_func = _edt_dist_func(anisotropy)
prob = np.zeros(lbl_img.shape,np.float32)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
edt = dist_func(mask)[mask]
prob[mask] = edt/(np.max(edt)+1e-10)
if constant_img:
prob = prob[(slice(1,-1),)*lbl_img.ndim].copy()
return prob
def edt_prob(lbl_img, anisotropy=None):
"""Perform EDT on each labeled object and normalize."""
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
constant_img = lbl_img.min() == lbl_img.max() and lbl_img.flat[0] > 0
if constant_img:
lbl_img = np.pad(lbl_img, ((1,1),)*lbl_img.ndim, mode='constant')
warnings.warn("EDT of constant label image is ill-defined. (Assuming background around it.)")
dist_func = _edt_dist_func(anisotropy)
objects = find_objects(lbl_img)
prob = np.zeros(lbl_img.shape,np.float32)
for i,sl in enumerate(objects,1):
# i: object label id, sl: slices of object in lbl_img
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
# 1. grow object slice by 1 for all interior object bounding boxes
# 2. perform (correct) EDT for object with label id i
# 3. extract EDT for object of original slice and normalize
# 4. store edt for object only for pixels of given label id i
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask = grown_mask[shrink_slice]
edt = dist_func(grown_mask)[shrink_slice][mask]
prob[sl][mask] = edt/(np.max(edt)+1e-10)
if constant_img:
prob = prob[(slice(1,-1),)*lbl_img.ndim].copy()
return prob
def _fill_label_holes(lbl_img, **kwargs):
lbl_img_filled = np.zeros_like(lbl_img)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
mask_filled = binary_fill_holes(mask,**kwargs)
lbl_img_filled[mask_filled] = l
return lbl_img_filled
def fill_label_holes(lbl_img, **kwargs):
"""Fill small holes in label image."""
# TODO: refactor 'fill_label_holes' and 'edt_prob' to share code
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
objects = find_objects(lbl_img)
lbl_img_filled = np.zeros_like(lbl_img)
for i,sl in enumerate(objects,1):
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask_filled = binary_fill_holes(grown_mask,**kwargs)[shrink_slice]
lbl_img_filled[sl][mask_filled] = i
return lbl_img_filled
def sample_points(n_samples, mask, prob=None, b=2):
"""sample points to draw some of the associated polygons"""
if b is not None and b > 0:
# ignore image boundary, since predictions may not be reliable
mask_b = np.zeros_like(mask)
mask_b[b:-b,b:-b] = True
else:
mask_b = True
points = np.nonzero(mask & mask_b)
if prob is not None:
# weighted sampling via prob
w = prob[points[0],points[1]].astype(np.float64)
w /= np.sum(w)
ind = np.random.choice(len(points[0]), n_samples, replace=True, p=w)
else:
ind = np.random.choice(len(points[0]), n_samples, replace=True)
points = points[0][ind], points[1][ind]
points = np.stack(points,axis=-1)
return points
def calculate_extents(lbl, func=np.median):
""" Aggregate bounding box sizes of objects in label images. """
if isinstance(lbl,(tuple,list)) or (isinstance(lbl,np.ndarray) and lbl.ndim==4):
return func(np.stack([calculate_extents(_lbl,func) for _lbl in lbl], axis=0), axis=0)
n = lbl.ndim
n in (2,3) or _raise(ValueError("label image should be 2- or 3-dimensional (or pass a list of these)"))
regs = regionprops(lbl)
if len(regs) == 0:
return np.zeros(n)
else:
extents = np.array([np.array(r.bbox[n:])-np.array(r.bbox[:n]) for r in regs])
return func(extents, axis=0)
def polyroi_bytearray(x,y,pos=None,subpixel=True):
""" Byte array of polygon roi with provided x and y coordinates
See https://github.com/imagej/imagej1/blob/master/ij/io/RoiDecoder.java
"""
import struct
def _int16(x):
return int(x).to_bytes(2, byteorder='big', signed=True)
def _uint16(x):
return int(x).to_bytes(2, byteorder='big', signed=False)
def _int32(x):
return int(x).to_bytes(4, byteorder='big', signed=True)
def _float(x):
return struct.pack(">f", x)
subpixel = bool(subpixel)
# add offset since pixel center is at (0.5,0.5) in ImageJ
x_raw = np.asarray(x).ravel() + 0.5
y_raw = np.asarray(y).ravel() + 0.5
x = np.round(x_raw)
y = np.round(y_raw)
assert len(x) == len(y)
top, left, bottom, right = y.min(), x.min(), y.max(), x.max() # bbox
n_coords = len(x)
bytes_header = 64
bytes_total = bytes_header + n_coords*2*2 + subpixel*n_coords*2*4
B = [0] * bytes_total
B[ 0: 4] = map(ord,'Iout') # magic start
B[ 4: 6] = _int16(227) # version
B[ 6: 8] = _int16(0) # roi type (0 = polygon)
B[ 8:10] = _int16(top) # bbox top
B[10:12] = _int16(left) # bbox left
B[12:14] = _int16(bottom) # bbox bottom
B[14:16] = _int16(right) # bbox right
B[16:18] = _uint16(n_coords) # number of coordinates
if subpixel:
B[50:52] = _int16(128) # subpixel resolution (option flag)
if pos is not None:
B[56:60] = _int32(pos) # position (C, Z, or T)
for i,(_x,_y) in enumerate(zip(x,y)):
xs = bytes_header + 2*i
ys = xs + 2*n_coords
B[xs:xs+2] = _int16(_x - left)
B[ys:ys+2] = _int16(_y - top)
if subpixel:
base1 = bytes_header + n_coords*2*2
base2 = base1 + n_coords*4
for i,(_x,_y) in enumerate(zip(x_raw,y_raw)):
xs = base1 + 4*i
ys = base2 + 4*i
B[xs:xs+4] = _float(_x)
B[ys:ys+4] = _float(_y)
return bytearray(B)
def export_imagej_rois(fname, polygons, set_position=True, subpixel=True, compression=ZIP_DEFLATED):
""" polygons assumed to be a list of arrays with shape (id,2,c) """
if isinstance(polygons,np.ndarray):
polygons = (polygons,)
fname = Path(fname)
if fname.suffix == '.zip':
fname = fname.with_suffix('')
with ZipFile(str(fname)+'.zip', mode='w', compression=compression) as roizip:
for pos,polygroup in enumerate(polygons,start=1):
for i,poly in enumerate(polygroup,start=1):
roi = polyroi_bytearray(poly[1],poly[0], pos=(pos if set_position else None), subpixel=subpixel)
roizip.writestr('{pos:03d}_{i:03d}.roi'.format(pos=pos,i=i), roi)
def optimize_threshold(Y, Yhat, model, nms_thresh, measure='accuracy', iou_threshs=[0.3,0.5,0.7], bracket=None, tol=1e-2, maxiter=20, verbose=1):
""" Tune prob_thresh for provided (fixed) nms_thresh to maximize matching score (for given measure and averaged over iou_threshs). """
np.isscalar(nms_thresh) or _raise(ValueError("nms_thresh must be a scalar"))
iou_threshs = [iou_threshs] if np.isscalar(iou_threshs) else iou_threshs
values = dict()
if bracket is None:
max_prob = max([np.max(prob) for prob, dist in Yhat])
bracket = max_prob/2, max_prob
# print("bracket =", bracket)
with tqdm(total=maxiter, disable=(verbose!=1), desc="NMS threshold = %g" % nms_thresh) as progress:
def fn(thr):
prob_thresh = np.clip(thr, *bracket)
value = values.get(prob_thresh)
if value is None:
Y_instances = [model._instances_from_prediction(y.shape, *prob_dist, prob_thresh=prob_thresh, nms_thresh=nms_thresh)[0] for y,prob_dist in zip(Y,Yhat)]
stats = matching_dataset(Y, Y_instances, thresh=iou_threshs, show_progress=False, parallel=True)
values[prob_thresh] = value = np.mean([s._asdict()[measure] for s in stats])
if verbose > 1:
print("{now} thresh: {prob_thresh:f} {measure}: {value:f}".format(
now = datetime.datetime.now().strftime('%H:%M:%S'),
prob_thresh = prob_thresh,
measure = measure,
value = value,
), flush=True)
else:
progress.update()
progress.set_postfix_str("{prob_thresh:.3f} -> {value:.3f}".format(prob_thresh=prob_thresh, value=value))
progress.refresh()
return -value
opt = minimize_scalar(fn, method='golden', bracket=bracket, tol=tol, options={'maxiter': maxiter})
verbose > 1 and print('\n',opt, flush=True)
return opt.x, -opt.fun
|
py | 1a4308a554d7e430cef2c3ad215fc9dc09350dee | # Generated by Django 2.2 on 2021-08-18 12:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a430975b9a2e9c0ede4cce70781fe0a3b31c0cd | import pytest
from tools import utils, constants
from launchers.sandbox import Sandbox
from . import protocol
NUM_NODES = 3
@pytest.mark.multinode
@pytest.mark.incremental
class TestDoubleEndorsement:
"""Constructs a double endorsement, and build evidence."""
def test_init(self, sandbox: Sandbox):
for i in range(NUM_NODES):
sandbox.add_node(i, params=constants.NODE_PARAMS)
protocol.activate(sandbox.client(0), activate_in_the_past=True)
utils.bake(sandbox.client(0))
def test_level(self, sandbox: Sandbox):
level = 2
for client in sandbox.all_clients():
assert utils.check_level(client, level)
def test_terminate_nodes_1_and_2(self, sandbox: Sandbox):
sandbox.node(1).terminate()
sandbox.node(2).terminate()
def test_bake_node_0(self, sandbox: Sandbox):
"""Client 0 bakes block A at level 3, not communicated to 1 and 2
Inject an endorsement to ensure a different hash"""
sandbox.client(0).endorse('bootstrap1')
utils.bake(sandbox.client(0))
def test_endorse_node_0(self, sandbox: Sandbox, session: dict):
"""bootstrap1 builds an endorsement for block A"""
client = sandbox.client(0)
client.endorse('bootstrap1')
mempool = client.get_mempool()
endorsement = mempool['applied'][0]
session['endorsement1'] = endorsement
def test_terminate_node_0(self, sandbox: Sandbox):
sandbox.node(0).terminate()
def test_restart_node_2(self, sandbox: Sandbox):
sandbox.node(2).run()
assert sandbox.client(2).check_node_listening()
def test_bake_node_2(self, sandbox: Sandbox):
"""Client 2 bakes block B at level 3, not communicated to 0 and 1"""
utils.bake(sandbox.client(2))
def test_endorse_node_2(self, sandbox: Sandbox, session: dict):
"""bootstrap1 builds an endorsement for block B"""
client = sandbox.client(2)
client.endorse('bootstrap1')
mempool = client.get_mempool()
endorsement = mempool['applied'][0]
session['endorsement2'] = endorsement
sandbox.client(2).endorse('bootstrap2')
def test_restart_all(self, sandbox: Sandbox):
sandbox.node(0).run()
sandbox.node(1).run()
sandbox.client(0).check_node_listening()
sandbox.client(1).check_node_listening()
def test_check_level(self, sandbox: Sandbox):
"""All nodes are at level 3, head is either block A or B"""
level = 3
for client in sandbox.all_clients():
assert utils.check_level(client, level)
def test_forge_accusation(self, sandbox: Sandbox, session: dict):
"""Forge and inject a double endorsement evidence operation"""
client = sandbox.client(1)
head_hash = client.get_head()['hash']
def transform_endorsement(end):
return {
'branch': end['branch'],
'operations': end['contents'][0],
'signature': end['signature'],
}
endorsement1 = transform_endorsement(session['endorsement1'])
endorsement2 = transform_endorsement(session['endorsement2'])
operation = {
'branch': head_hash,
'contents': [
{
'kind': 'double_endorsement_evidence',
'op1': endorsement1,
'op2': endorsement2,
}
],
}
path_forge_operation = (
'/chains/main/blocks/head/helpers/forge/operations'
)
operation_hex_string = client.rpc(
'post', path_forge_operation, data=operation
)
assert isinstance(operation_hex_string, str)
sender_sk_long = constants.IDENTITIES['bootstrap1']['secret']
sender_sk = sender_sk_long[len('unencrypted:') :]
signed_op = utils.sign_operation(operation_hex_string, sender_sk)
op_hash = client.rpc('post', 'injection/operation', signed_op)
assert isinstance(op_hash, str)
session['operation'] = op_hash
def test_operation_applied(self, sandbox: Sandbox, session: dict):
"""Check operation is in mempool"""
client = sandbox.client(1)
assert utils.check_mempool_contains_operations(
client, [session['operation']]
)
|
py | 1a430a3aa0d9d650d90ce8dce9c7364d5502186e | eval(q:="print('eval(q:=%r)'%q)") |
py | 1a430c22c0dc329723bd6e2db60c21a0c302feee | """
pycmark.utils.compat
~~~~~~~~~~~~~~~~~~~~
Utilities for compatibility.
:copyright: Copyright 2017-2019 by Takeshi KOMIYA
:license: Apache License 2.0, see LICENSE for details.
"""
from typing import Any, Generator
from docutils.nodes import Node
if not hasattr(Node, 'findall'): # for docutils-0.17 or older
def findall(self, *args: Any, **kwargs: Any) -> Generator[Node, None, None]:
for node in self.traverse(*args, **kwargs):
yield node
Node.findall = findall # type: ignore
|
py | 1a430ce2d2aec17713b86a49ee5ed189288214cb | from typing import TYPE_CHECKING, Any, Callable, Type
from graphene.relay.node import Node, NodeField
from simple_graphql.django.config import extract_extra_meta_config
from simple_graphql.django.fields.authorize import authorize_query
if TYPE_CHECKING:
from simple_graphql.django import ModelSchemaConfig
def build_resolver(base_resolver: Callable, config: "ModelSchemaConfig") -> Callable:
def authorized_resolver(obj: Any, info: Any, **kwargs: Any):
authorize_query(config, info)
return base_resolver(obj, info, **kwargs)
return authorized_resolver
class DjangoAutoNodeField(NodeField):
config: "ModelSchemaConfig"
def __init__(self, node: Type, type: Type, **kwargs: Any) -> None:
self.config = extract_extra_meta_config(getattr(type, "ExtraMeta", None))
super().__init__(node, type, **kwargs)
def get_resolver(self, parent_resolver: Callable):
return build_resolver(super().get_resolver(parent_resolver), self.config)
class DjangoAutoNode(Node):
@classmethod
def Field(cls, *args: Any, **kwargs: Any) -> DjangoAutoNodeField:
return DjangoAutoNodeField(Node, *args, **kwargs)
|
py | 1a430db5c5d10436924374dd15880b522cc14664 | c = input()
vlist = sorted(map(int, raw_input().split()), reverse=True) # read and sort the degrees in descending order
def walk(c, vlist):
for i in range(0, c-1):
v = vlist[i]
if v == 0:
return
if vlist[i+1] > 0: # possibly connect to the vertex which will in turn connect to more vertices, thus, helping us walk the tree
vlist[i+1] -= 1
v -= 1
for j in range(i+2, c): # consume leaf vertices
if vlist[j] == 1:
vlist[j] = 0
v -= 1
if v == 0:
break
vlist[i] = v
walk(c, vlist)
if max(vlist) == 0: print 'Yes'
else: print 'No'
|
py | 1a430dfd69e1b19b494ca2525ecf413c09ab225e | import mitdeeplearning as mdl
songs = mdl.lab1.load_training_data()
basename = mdl.lab1.save_song_to_abc(songs[0])
ret = mdl.lab1.abc2wav(basename+'.abc')
import pdb; pdb.set_trace()
|
py | 1a430e89c5e37b3a1760f9a41860bebff0f69656 | import math
import timeit
import numpy as np
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
class GA():
def __init__(self, selectionPressure, mutationProbability, chromosomeCount, selectionProbability, problem):
self.selPs = selectionPressure # selection pressure
self.mutPa = mutationProbability # Probabilty of mutation
self.chrCount = chromosomeCount # size of choromosome
self.selPa = selectionProbability # selection probabilty
self.choromosome = [] # choromosome set
self.fitness = [] # fitness set
self.population = [[],[]] # [choromosome, fitness] set
self.generation = 0 # current generation
self.problem = problem # problem route
self.dist_ar = [] # [dots_list, dots_list ]distance array
self.cities_count = 0
self.dots_list = []
self.limit_time = 0
self.start = 0
self.stop = 0
def make_distDataframe(self, problem):
reader = open(problem, mode='rt', encoding='utf-8')
self.dots_list = reader.read().split("\n") # ['x1 y1', 'x2 y2', 'x3 y3' ... 'xn yn']
self.cities_count = int(self.dots_list.pop(0))
self.limit_time = float(self.dots_list.pop())
x_list = [] # ['x1', 'x2', 'x3' ... 'xn']
y_list = [] # ['y1', 'y2', 'y3' ... 'yn']
for i in range(self.cities_count):
temp = self.dots_list[i].split(" ")
x_list.append(float(temp[0]))
y_list.append(float(temp[1]))
for n in range(self.cities_count):
temp = []
for m in range(self.cities_count):
temp.append(round((math.sqrt(((x_list[m] - x_list[n]) ** 2) + ((y_list[m] - y_list[n]) ** 2))), 2))
self.dist_ar.append(temp)
self.dist_ar = np.array(self.dist_ar)
print(self.dist_ar)
def cal_fit(self, route) :
fit = 0
for i in range(len(route)-1) :
if i == len(route)-1 :
fit += self.dist_ar[route[i], route[0]]
else :
fit += self.dist_ar[route[i], route[i+1]]
return fit
def randomTwo(self, ranges):
randomList = []
randomList += random.sample(range(0, ranges), 2)
randomList.sort()
return randomList
def evolution(self) :
self.start = timeit.default_timer()
self.stop = timeit.default_timer()
# init choromosomes
self.make_distDataframe(self.problem)
for i in range(self.chrCount):
self.choromosome.append(random.sample(range(0, self.cities_count), self.cities_count))
for i in range(self.chrCount):
self.fitness.append(round(self.cal_fit(self.choromosome[i]), 5))
self.population = (np.array([self.choromosome, self.fitness])).T
self.population = self.population[np.argsort(self.population[:, 1])]
print('초기화 최적 해 : \n', self.population[0, 0], "\n", self.population[0, 1])
while self.stop - self.start <= self.limit_time:
offsprings = []
self.generation += 1
# selection : 토너먼트선택,
for endSel in range(int(self.chrCount*self.selPa)):
# 난수룰 발생시켜 해집단 내 두 유전자 선택, 선택난수 발생
# 선택난수가 선택압보다 작으면 두 유전자 중 좋은 유전자가 선택. 아니면 반대로
parents_index = [0, 0]
for i in range(len(parents_index)):
selGeneNum = self.randomTwo(self.chrCount)
match = random.random()
if match < self.selPs:
if self.population[selGeneNum[0], 1] < self.population[selGeneNum[1], 1]:
parents_index[i] = selGeneNum[0]
else:
parents_index[i] = selGeneNum[1]
else:
if self.population[selGeneNum[0], 1] < self.population[selGeneNum[1], 1]:
parents_index[i] = selGeneNum[1]
else:
parents_index[i] = selGeneNum[0]
# crossover : order-based crossover
daddy_value = self.population[parents_index[0], 0].copy()
mommy_value = self.population[parents_index[1], 0].copy()
CsGeneNum = self.randomTwo(self.cities_count)
offspring = daddy_value[CsGeneNum[0]: CsGeneNum[1]]
for i in daddy_value[CsGeneNum[0]: CsGeneNum[1]]:
mommy_value.remove(i)
for i in range(len(offspring)):
mommy_value.insert(CsGeneNum[0] + i, offspring[i])
offspring = mommy_value
offspring_fit = self.cal_fit(offspring)
# mutation : exchange mutation
mut_p = random.random()
if mut_p < self.mutPa:
MtGeneNum = self.randomTwo(self.cities_count)
mut_Temp = offspring[MtGeneNum[0]]
offspring[MtGeneNum[0]] = offspring[MtGeneNum[1]]
offspring[MtGeneNum[1]] = mut_Temp
offspring_fit = self.cal_fit(offspring)
offsprings.append(np.array([offspring, offspring_fit]))
self.population = np.vstack((self.population, offsprings))
# Replacement
self.population = self.population[np.argsort(self.population[:, 1])]
for i in range(int(self.chrCount*self.selPa)) :
self.population = np.delete(self.population, len(self.population)-1, axis=0)
if self.generation % 5000 == 0:
print(self.generation, '세대 최적 해 : \n', self.population[0, 1])
print(self.population[0, 0])
self.stop = timeit.default_timer()
if __name__ == "__main__":
ga = GA(selectionPressure=0.7, mutationProbability=0.2, chromosomeCount=20, selectionProbability=0.5, problem="cycle51.in")
ga.evolution()
plotData = []
for index in ga.population[0, 0]:
plotData.append([round(float(ga.dots_list[int(index)].split(" ")[0]), 3),
round(float(ga.dots_list[int(index)].split(" ")[1]), 3)])
plotData = np.array(plotData)
plotData = plotData.T
textStr = "fitness :", ga.population[0, 1]
axs = plt.plot(plotData[0], plotData[1])
plt.text(0.05, 0.95, textStr, fontsize=20, fontweight='bold')
plt.show()
print(ga.generation, '세대 최적 해 : \n', ga.population[0, 1])
print(ga.population[0, 0])
print(ga.stop - ga.start)
|
py | 1a430f2f8340afb0522a4ee7656d3c73f5943e61 | from twilio.rest import Client
import json
class MessageClient:
def __init__(self):
print('Initializing messaging client')
with open('twiliocredentials.json') as creds:
twiliocred = json.loads(creds.read())
twilio_number = int(twiliocred.get('trial_number'))
twilio_account_sid = twiliocred.get('account_sid')
twilio_auth_token = twiliocred.get('auth_token')
self.twilio_number = twilio_number
self.twilio_client = Client(twilio_account_sid, twilio_auth_token)
print('Twilio client initialized')
def send_message(self, body, to):
self.twilio_client.messages.create(
body=body,
to=to,
from_=self.twilio_number,
# media_url=['https://demo.twilio.com/owl.png']
) |
py | 1a430f8dacecb0480172929e6bd93a363daa01e6 | import os
for fname in os.listdir('.'):
spl = fname.split('.')
if len(spl) <= 1:
continue
ext = spl[-1]
if ext != 'arc':
continue
print("[Python] Fixing ARC File ", fname)
# ARCTool produces a 0x11 but a 0x07 is expected by the collection
with open(fname, 'r+b') as file:
file.seek(4)
file.write(0x07.to_bytes(1, byteorder='little')) |
py | 1a430f9a575298a12b4a2a60eea6ef64ee87238d | #!/usr/bin/python
#r'''
# >>> import hello_ext
# >>> print getting_started1.greet()
# hello, world
# >>> number = 11
# >>> print number, '*', number, '=', getting_started1.square(number)
# 11 * 11 = 121
#'''
#
#def run(args = None):
# if args is not None:
# import sys
# sys.argv = args
# import doctest, test_getting_started1
# return doctest.testmod(test_getting_started1)
#
#if __name__ == '__main__':
# import sys
# sys.exit(run()[0])
import hello_ext
with open("/home/roland/tstrunpyd.log","w") as f:
f.writelines([hello_ext.greet()])
print(hello_ext.greet())
|
py | 1a431025b7c5525173e6221ba8840ed83a78ac5c | ######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# CHECKING FLAGGING OF ALL CALIBRATORS
# use rflag mode of flagdata
logprint ("Starting EVLA_pipe_checkflag_semiFinal.py", logfileout='logs/checkflag_semiFinal.log')
time_list=runtiming('checkflag_semiFinal', 'start')
QA2_checkflag_semiFinal='Pass'
logprint ("Checking RFI flagging of all calibrators", logfileout='logs/checkflag_semiFinal.log')
default('flagdata')
vis=ms_active
mode='rflag'
field=calibrator_field_select_string
correlation='ABS_'+corrstring
scan=calibrator_scan_select_string
ntime='scan'
combinescans=False
datacolumn='corrected'
winsize=3
timedevscale=4.0
freqdevscale=4.0
extendflags=False
action='apply'
display=''
flagbackup=False
savepars=True
flagdata()
#clearstat()
# Until we know what the QA criteria are for this script, leave QA2
# set score to "Pass".
logprint ("QA2 score: "+QA2_checkflag_semiFinal, logfileout='logs/checkflag_semiFinal.log')
logprint ("Finished EVLA_pipe_checkflag_semiFinal.py", logfileout='logs/checkflag_semiFinal.log')
time_list=runtiming('checkflag_semiFinal', 'end')
pipeline_save()
######################################################################
|
py | 1a431108d8a111f56052621bbaadc250e6d0e0cc | #! python3
import SimpleITK as sitk
import numpy as np
def ApplyBiasCorrection(inputImage, shrinkFactor = (1,1,1)):
# Bias correction filter:
biasFieldCorrFilter = sitk.N4BiasFieldCorrectionImageFilter()
mask = sitk.OtsuThreshold( inputImage, 0, 1, 100)
inputImage = sitk.Cast(inputImage, sitk.sitkFloat32)
# Parameter for the bias corredtion filter:
biasFieldCorrFilter.SetSplineOrder(3)
biasFieldCorrFilter.SetConvergenceThreshold(0.0001)
biasFieldCorrFilter.SetMaximumNumberOfIterations((50, 40, 30))
if shrinkFactor != (1,1,1):
# Shrink image and mask to accelerate:
shrinkedInput = sitk.Shrink(inputImage, shrinkFactor)
mask = sitk.Shrink(mask, shrinkFactor)
#biasFieldCorrFilter.SetNumberOfThreads()
#biasFieldCorrFilter.UseMaskLabelOff() # Because I'm having problems with the mask.
# Run the filter:
output = biasFieldCorrFilter.Execute(shrinkedInput, mask)
# Get the field by dividing the output by the input:
outputArray = sitk.GetArrayFromImage(output)
shrinkedInputArray = sitk.GetArrayFromImage(shrinkedInput)
biasFieldArray = np.ones(np.shape(outputArray), 'float32')
biasFieldArray[shrinkedInputArray != 0] = outputArray[shrinkedInputArray != 0]/shrinkedInputArray[shrinkedInputArray != 0]
biasFieldArray[shrinkedInputArray == 0] = 0
# Generate bias field image:
biasField = sitk.GetImageFromArray(biasFieldArray)
biasField.SetSpacing(shrinkedInput.GetSpacing())
biasField.SetOrigin(shrinkedInput.GetOrigin())
biasField.SetDirection(shrinkedInput.GetDirection())
# Now expand
biasField = sitk.Resample(biasField, inputImage)
# Apply to the image:
output = sitk.Multiply(inputImage, biasField)
else:
#output = biasFieldCorrFilter.Execute(inputImage, mask)
output = biasFieldCorrFilter.Execute(inputImage)
# return the output:
return output
|
py | 1a43116b8b447d3c508976216d8f0fde65d398ed | #!/usr/bin/python
# looks to pmc rss to see if it is ready
# grabs the pmc URL, stores it, calls tweet.
"""
feed = feedparser.parse('http://python.mirocommunity.org/feeds/category/pyohio-2011')
>>> [ (fe['link'], [ l['href'] for l in fe['links'] if l['rel']=='via' ][0]) for fe in feed['entries'] ]
[(u'http://python.mirocommunity.org/video/4373/pyohio-2011-data-transfer-obje', u'http://blip.tv/file/5419876'), (u'http://python.mirocommunity.org/video/4372/pyohio-2011-using-fabric-from-', u'http://blip.tv/file/5419406'), (u'http://python.mirocommunity.org/video/4371/pyohio-2011-aspen-a-next-gener', u'http://blip.tv/file/5419329')]
"""
import feedparser
import blip_uploader
# from process import process
from tweet import tweet
# from main.models import Episode, Raw_File, Cut_List
class pmc_tweet(tweet):
ready_state = 5
feed = ''
def process_ep(self, ep):
if self.options.verbose: print ep.id, ep.name
show = ep.show
client = show.client
# translate blip short URL into pretty URL that pmc has
blip=blip_uploader.Blip()
xml_code = blip.Get_VideoMeta(ep.host_url)
info = blip.Parse_VideoMeta(xml_code)
host_url = info['link']
if not self.feed:
self.feed = feedparser.parse( 'http://python.mirocommunity.org/feeds/category/djangocon-2011?count=1000')
# get the URLs we care about:
# [(pmc1, [blip1]),(pmc2, [blip2])...)
# the [blipN] list should only have 1 item.
urlss = [
(fe['link'],
[ l['href'] for l in fe['links'] if l['rel']=='via' ][0])
for fe in self.feed['entries']
]
# print urlss
# given the blip url (ep.host_url)
# find the pmc url (public url)
# again, there should only be 1
public_urls = [
u[0] for u in urlss
if u[1] == host_url]
# if u[1].split('/')[-1] == ep.host_url]
print public_urls
if public_urls:
# urls[0] because there can be only 1
# '/'.join....split('/')... to drop the tail of the URL
# it is just a truncated title
# warning: MC specific.
public_url = '/'.join(public_urls[0].split('/')[:-1])
ep.public_url = public_url
ep.save()
prefix = "#%s #VIDEO" % show.client.slug
# prefix = "#DjangoCon #VIDEO"
tweet = self.mk_tweet(prefix, ep.name, ep.authors, public_url)
user = 'nextdayvideo'
ret=self.tweet_tweet(user, tweet)
else:
ret=False
return ret
if __name__ == '__main__':
p=pmc_tweet()
p.main()
|
py | 1a4311e2764c0432eef5e089455b52553c0cfbb5 | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import collections
from mixbox import fields
import stix
from stix.bindings import stix_common as stix_common_binding
class Profiles(collections.MutableSequence, stix.Entity):
_binding = stix_common_binding
_binding_class = stix_common_binding.ProfilesType
_namespace = 'http://stix.mitre.org/common-1'
# Fields
profile = fields.TypedField("Profile", multiple=True)
def __init__(self, profiles=None):
super(Profiles, self).__init__()
self.profile = profiles
def __len__(self):
return self.profile.__len__()
def __getitem__(self, item):
return self.profile.__getitem__(item)
def __setitem__(self, key, value):
self.profile.__setitem__(key, value)
def __delitem__(self, key):
self.profile.__delitem__(key)
def insert(self, index, value):
self.profile.insert(index, value)
def to_dict(self):
return [x for x in self]
@classmethod
def from_dict(cls, cls_dict=None):
if not cls_dict:
return None
obj = cls()
obj.profile = [x for x in cls_dict]
return obj
|
py | 1a431273fbe7cb39a4a4195d87dce9f9737480ae | from django.apps import AppConfig
class ProjetConfig(AppConfig):
name = 'project'
|
py | 1a431281fc90f55b111317264ae8d35c22d5a68e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Scriptim (https://github.com/Scriptim)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module runs a `abalone.game.Game`."""
from traceback import format_exc
from typing import Generator, List, Tuple, Union
from abalone.abstract_player import AbstractPlayer
from abalone.enums import Direction, Player, Space
from abalone.game import Game, IllegalMoveException
from abalone.utils import line_from_to
def _get_winner(score: Tuple[int, int]) -> Union[Player, None]:
"""Returns the winner of the game based on the current score.
Args:
score: The score tuple returned by `abalone.game.Game.get_score`
Returns:
Either the `abalone.enums.Player` who won the game or `None` if no one has won yet.
"""
if 8 in score:
return Player.WHITE if score[0] == 8 else Player.BLACK
return None
def _format_move(turn: Player, move: Tuple[Union[Space, Tuple[Space, Space]], Direction], moves: int) -> str:
"""Formats a player's move as a string with a single line.
Args:
turn: The `Player` who performs the move
move: The move as returned by `abalone.abstract_player.AbstractPlayer.turn`
moves: The number of total moves made so far (not including this move)
"""
marbles = [move[0]] if isinstance(move[0], Space) else line_from_to(*move[0])[0]
marbles = map(lambda space: space.name, marbles)
return f'{moves + 1}: {turn.name} moves {", ".join(marbles)} in direction {move[1].name}'
def run_game(black: AbstractPlayer, white: AbstractPlayer, **kwargs) \
-> Generator[Tuple[Game, List[Tuple[Union[Space, Tuple[Space, Space]], Direction]]], None, None]:
"""Runs a game instance and prints the progress / current state at every turn.
Args:
black: An `abalone.abstract_player.AbstractPlayer`
white: An `abalone.abstract_player.AbstractPlayer`
**kwargs: These arguments are passed to `abalone.game.Game.__init__`
Yields:
A tuple of the current `abalone.game.Game` instance and the move history at the start of the game and after\
every legal turn.
"""
game = Game()
moves_history = []
yield game, moves_history
while True:
score = game.get_score()
score_str = f'BLACK {score[0]} - WHITE {score[1]}'
print(score_str, game, '', sep='\n')
winner = _get_winner(score)
if winner is not None:
print(f'{winner.name} won!')
break
try:
move = black.turn(game, moves_history) if game.turn is Player.BLACK else white.turn(game, moves_history)
print(_format_move(game.turn, move, len(moves_history)), end='\n\n')
game.move(*move)
game.switch_player()
moves_history.append(move)
yield game, moves_history
except IllegalMoveException as ex:
print(f'{game.turn.name}\'s tried to perform an illegal move ({ex})\n')
break
except:
print(f'{game.turn.name}\'s move caused an exception\n')
print(format_exc())
break
if __name__ == '__main__': # pragma: no cover
# Run a game from the command line with default configuration.
import importlib
import sys
if len(sys.argv) != 3:
sys.exit(1)
black = sys.argv[1].rsplit('.', 1)
black = getattr(importlib.import_module(black[0]), black[1])
white = sys.argv[2].rsplit('.', 1)
white = getattr(importlib.import_module(white[0]), white[1])
list(run_game(black(), white()))
|
py | 1a4312d3c9829532edb3136370ec5ca2d93d2a73 | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['xr_tank'],
package_dir={'': 'src'}
)
setup(**setup_args)
|
py | 1a4313d2ced4f25f1835ff06ac28480b7a91762d | import argparse
import os.path
import re
def Convert(fh):
for line in fh.readlines():
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT FHS-CRS-4744.chr20.recaled
chrom,pos,locus_id,ref,alt,qual,fil,info,formt,sample = line.strip().split('\t')
pos = int(pos)
# 1 46366107 . T <DEL> . PASS SVLEN=-329;SVTYPE=DEL;END=46366436 GT:ABC:PE:REFCOUNTS 0/1:0,0,6:6:17,8
svtype = alt.split(":")[0].lstrip("<").rstrip(">")
end = int(info.split("END=")[1].split(";")[0])
size = abs(int(info.split("SVLEN=")[1].split(";")[0]))
if(fil == "PASS"):
if svtype == "DEL":
if size >= 100:
fout.write("%s\t.\t%d\t.\t.\t%d\t.\tDEL\t%d\t%s\n" % (chrom, min(pos, end),max(pos, end), abs(size), info))
if svtype == "DUP":
if size >= 100:
fout.write("%s\t.\t%d\t.\t.\t%d\t.\tINS\t%d\t%s\n" % (chrom, min(pos, end),max(pos, end), abs(size), info))
if svtype == "INV":
if size >= 100:
fout.write("%s\t.\t%d\t.\t.\t%d\t.\tINV\t%d\t%s\n" % (chrom, min(pos, end),max(pos, end), abs(size), info))
def pullHeader(fh):
"""
Pulls the header from vcf file
"""
while True:
line = fh.readline()
if line.startswith("##INFO=<ID=IMPRECISE"):
continue
if line.startswith("##INFO=<ID=PRECISE"):
continue
if line.startswith("##INFO="):
#sys.stdout.write(line)
fout.write(line)
if line.startswith("##FOR"):
#sys.stdout.write(line.replace("FORMAT","INFO"))
fout.write(line.replace("FORMAT","INFO"))
if line.startswith("#CH"):
return
if line is None:
sys.stderr.write("ERROR! No read good.\n")
exit(10)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("version", type=str, help="Breakseq2 Version")
parser.add_argument("input", type=str, help="Breakseq2 final vcf file. Concatenate all the vcf output files into a single vcf")
parser.add_argument("source", type=str, help="unique identifier. Fullpath to bam or samplename")
args = parser.parse_args()
samplename = os.path.splitext(os.path.basename(args.source))[0]
svpfile = samplename + "_BS.svp"
with open(args.input, 'r') as fh, open(svpfile, 'w') as fout:
fout.write("##program=" + args.version+"\n")
fout.write("##abbrev=BS"+"\n")
fout.write("##source=" + args.source+"\n")
pullHeader(fh)
mergeHeader1 = "#CHROM\tOUTERSTART\tSTART\tINNERSTART\tINNEREND\tEND\tOUTEREND\tTYPE\tSIZE\tINFO"
fout.write(mergeHeader1+"\n")
Convert(fh)
|
py | 1a4313e71d4a0bad1d1aff606c090f5381b4e476 | from flask import Flask, request
from flask_restful import Resource, Api
from flask_cors import CORS
import os
import json
import pandas as pd
import datetime
import time
from filelock import Timeout, FileLock
app = Flask(__name__)
api = Api(app)
CORS(app)
with open("server_config.json") as f:
config = json.load(f)
lock_assigned_sequences = FileLock(config["assignedSequencesFile"] + ".lock")
lock_data = FileLock(config["dataFile"] + ".lock")
lock_data_sandbox = FileLock(config["dataSandboxFile"] + ".lock")
lock_when_to_stop = FileLock(config["dashboardFile"] + ".lock")
lock_submission_file = FileLock(config["submitFile"] + ".lock")
class InitializePreview(Resource):
def initialize_vars(self):
self.trial_feedback = request.args.get("trialFeedback")
self.timestamp = datetime.datetime.now()
def get_sequence_info(self, feedback):
assigned_file = config["previewSequenceFile"]
with open(assigned_file) as f:
sequence_info = json.load(f)
index_to_run = 0
run_info = {"index_to_run": index_to_run,
"sequenceFile": assigned_file,
"images": sequence_info["sequences"][index_to_run],
"blocked": int(False),
"finished": int(False),
"timestamp": self.timestamp.__str__()}
# only send correct answers along if you will be giving trial feedback (less risk of tech savvy workers using
# it to get perfect scores
if feedback:
run_info["conditions"] = sequence_info["types"][run_info["index_to_run"]]
return run_info
def get(self):
self.initialize_vars()
return_dict = self.get_sequence_info(self.trial_feedback)
return_dict["running"] = False
return return_dict
class InitializeRun(Resource):
def initialize_vars(self):
self.workerId = request.args.get("workerId")
self.medium = request.args.get("medium")
self.trial_feedback = request.args.get("trialFeedback")
self.assigned_sequences_df = pd.read_csv(config["assignedSequencesFile"], delimiter=",")
self.assigned_sequences_df = self.assigned_sequences_df.set_index("workerId", drop=False)
self.timestamp = datetime.datetime.now()
def available_sequences(self):
sequence_files = os.listdir(os.path.join(config["sequenceDir"]))
assigned_files = self.assigned_sequences_df["sequenceFile"].values.tolist()
assigned_files = [os.path.basename(x) for x in assigned_files]
available_files = [x for x in sequence_files if x not in assigned_files and x.endswith(".json")]
available_files = [x for x in available_files if not os.path.samefile(os.path.join(config["sequenceDir"],x), config["previewSequenceFile"])]
available_files = sorted(available_files)
return available_files
def assign_new_sequence(self, workerId):
if workerId in self.assigned_sequences_df["workerId"].values:
raise Exception('cannot assign new sequence, workerId already has one')
else:
available_files = self.available_sequences()
assigned_file = os.path.join(config["sequenceDir"], available_files[0])
new_row = {"workerId": workerId,
"sequenceFile": assigned_file,
"indexToRun": int(0),
"blocked": False,
"finished": False,
"timestamp": self.timestamp.__str__(),
"version": config["version"]}
self.assigned_sequences_df = self.assigned_sequences_df.append(pd.DataFrame(new_row, index=[0]),
ignore_index=True)
self.assigned_sequences_df = self.assigned_sequences_df.set_index("workerId", drop=False)
def already_running(self, workerId, timestamp, new_worker):
if new_worker:
return False
else:
# if previous initialization was less than 5 minutes ago, session is probably still active
previous_timestamp = self.assigned_sequences_df.loc[workerId, "timestamp"]
previous_timestamp = datetime.datetime.strptime(previous_timestamp.__str__(), "%Y-%m-%d %H:%M:%S.%f")
return (timestamp - previous_timestamp) < datetime.timedelta(minutes=4)
def get_sequence_info(self, workerId, feedback):
assigned_file = self.assigned_sequences_df.loc[workerId, "sequenceFile"]
with open(assigned_file) as f:
sequence_info = json.load(f)
index_to_run = int(self.assigned_sequences_df.loc[workerId, "indexToRun"])
run_info = {"index_to_run": index_to_run,
"sequenceFile": str(self.assigned_sequences_df.loc[workerId, "sequenceFile"]),
"images": sequence_info["sequences"][index_to_run],
"blocked": int(self.assigned_sequences_df.loc[workerId, "blocked"]),
"finished": int(self.assigned_sequences_df.loc[workerId, "finished"]),
"maintenance": config["maintenance"],
"timestamp": self.timestamp.__str__()}
# only send correct answers along if you will be giving trial feedback (less risk of tech savvy workers using
# it to get perfect scores
if feedback:
run_info["conditions"] = sequence_info["types"][run_info["index_to_run"]]
return run_info
def update_df(self, run_info):
if not (run_info["running"] or run_info["finished"] or run_info["blocked"] or run_info["maintenance"]):
if run_info["index_to_run"] + 1 >= config["maxNumRuns"]:
self.assigned_sequences_df.at[self.workerId, "finished"] = True
else:
self.assigned_sequences_df.at[self.workerId, "indexToRun"] = run_info["index_to_run"] + 1
self.assigned_sequences_df.at[self.workerId, "timestamp"] = self.timestamp.__str__()
self.assigned_sequences_df.to_csv(config["assignedSequencesFile"], index=False)
def get(self):
with lock_assigned_sequences:
self.initialize_vars()
# assign sequence file if worker is new
if self.workerId not in self.assigned_sequences_df["workerId"].values:
new_worker = True
self.assign_new_sequence(self.workerId)
else:
new_worker = False
# get assigned sequence info
return_dict = self.get_sequence_info(self.workerId, self.trial_feedback)
# check if another run might be active
return_dict["running"] = self.already_running(self.workerId, self.timestamp, new_worker)
# update the database
self.update_df(return_dict)
return return_dict
class FinalizeRun(Resource):
def initialize_vars(self):
start = time.time()
self.data_received = request.get_json()
self.medium = self.data_received["medium"]
self.sequence_info = self.get_sequence_info(self.data_received["sequenceFile"])
self.return_dict = \
{"blocked": False, # initializing, will be set to True if blocked,
"finished": self.data_received["indexToRun"] + 1 >= config["maxNumRuns"],
"maintenance": config["maintenance"]}
end = time.time()
print("initialized vars, took ", end - start, " seconds")
def get_sequence_info(self, sequence_file):
with open(sequence_file) as f:
sequence_info = json.load(f)
return sequence_info
def update_data_file(self):
start = time.time()
data_received = self.data_received
sequence_info = self.sequence_info
run_index = data_received["indexToRun"]
num_trials = data_received["numTrials"]
meta_data = {
"medium": data_received["medium"],
"sequenceFile": data_received["sequenceFile"],
"workerId": data_received["workerId"],
"assignmentId": data_received["assignmentId"],
"timestamp": data_received["timestamp"],
"runIndex": run_index,
"initTime": data_received["initTime"],
"finishTime": data_received["finishTime"]
}
# Setting data file and lock
if self.medium == "mturk_sandbox":
data_file = config["dataSandboxFile"]
lock = lock_data_sandbox
else:
data_file = config["dataFile"]
lock = lock_data
print(lock)
with lock:
data_all = pd.read_csv(data_file)
# Trial data
data = {
"response": [1 if i in data_received["responseIndices"] else 0 for i in range(num_trials)],
"trialIndex": list(range(data_received["numTrials"])),
"condition": sequence_info["types"][run_index][0:num_trials],
"image": sequence_info["sequences"][run_index][0:num_trials]
}
df = pd.DataFrame.from_dict(data, orient='index').transpose()
df = pd.concat([df, pd.DataFrame([meta_data] * num_trials)], axis=1)
data_all = data_all.append(df, ignore_index=True)
data_all.to_csv(data_file, index=False)
end = time.time()
print("updated data df, took ", end - start, " seconds")
def compute_scores(self):
start = time.time()
data_received = self.data_received
sequence_info = self.sequence_info
run_index = data_received["indexToRun"]
num_trials = data_received["numTrials"]
repeat_indices = []
for i in range(num_trials):
if sequence_info["types"][run_index][i] in config["conditionLabels"]["repeatTrials"]:
repeat_indices.append(i)
no_repeat_indices = []
for i in range(num_trials):
if sequence_info["types"][run_index][i] in config["conditionLabels"]["noRepeatTrials"]:
no_repeat_indices.append(i)
hits = set(repeat_indices) & set(data_received["responseIndices"])
false_alarms = set(no_repeat_indices) & set(data_received["responseIndices"])
end = time.time()
print("computed scores, took ", end - start, " seconds")
return {"hit_rate": float(len(hits)) / len(repeat_indices) if len(repeat_indices) > 0 else -1,
"false_alarm_num": len(false_alarms)}
def evaluate_vigilance(self, vig_hr_criterion, far_criterion):
start = time.time()
# initializing
data_received = self.data_received
sequence_info = self.sequence_info
run_index = data_received["indexToRun"]
num_trials = data_received["numTrials"]
passing_criteria = True
vig_repeat_indices = [i for i in range(num_trials) if sequence_info["types"][run_index][i] == "vig repeat"]
no_repeat_indices = [i for i in range(num_trials) if sequence_info["types"][run_index][i] in ["filler",
"target",
"vig"]]
if len(vig_repeat_indices) > 0:
vig_hits = set(vig_repeat_indices) & set(data_received["responseIndices"])
vig_hit_rate = float(len(vig_hits)) / len(vig_repeat_indices)
if vig_hit_rate < vig_hr_criterion:
passing_criteria = False
false_alarms = set(no_repeat_indices) & set(data_received["responseIndices"])
false_alarm_rate = float(len(false_alarms))/len(no_repeat_indices)
if false_alarm_rate >= far_criterion:
passing_criteria = False
end = time.time()
print("evaluated vigilance, took ", end - start, " seconds")
return "pass" if passing_criteria else "fail"
def block_worker(self, workerId):
start = time.time()
print("blocking")
self.return_dict["blocked"] = True
with lock_assigned_sequences:
assigned_sequences_df = pd.read_csv(config["assignedSequencesFile"], delimiter=",")
assigned_sequences_df = assigned_sequences_df.set_index("workerId", drop=False)
assigned_sequences_df.at[workerId, "blocked"] = True
assigned_sequences_df.to_csv(config["assignedSequencesFile"], index=False)
end = time.time()
print("blocked worker, took ", end - start, " seconds")
def update_dashboard(self, valid):
start = time.time()
with lock_when_to_stop:
with open(config["dashboardFile"]) as f:
dashboard = json.load(f)
dashboard["numBlocksTotalSoFar"] += 1
dashboard["numValidBlocksSoFar"] += valid
with open(config["dashboardFile"], 'w') as fp:
json.dump(dashboard, fp)
end = time.time()
print("updated when to stop, took ", end - start, " seconds")
def post(self):
self.initialize_vars()
if not self.data_received['preview']:
self.update_data_file()
valid = 0
# Check vigilance performance and block if necessary
if self.data_received['workerId'] not in config["whitelistWorkerIds"]:
if self.evaluate_vigilance(config["blockingCriteria"]["vigHrCriterion"],
config["blockingCriteria"]["farCriterion"]) == "fail":
self.block_worker(self.data_received["workerId"])
else:
valid = 1
else:
valid = 1
self.update_dashboard(valid)
# Add scores to return_dict
self.return_dict.update(self.compute_scores())
return self.return_dict
class SubmitRuns(Resource):
def initialize_vars(self):
self.data_received = request.get_json()
def update_submissions(self):
data = self.data_received
with lock_submission_file:
submitted_runs_df = pd.read_csv(config["submitFile"])
submitted_runs_df = submitted_runs_df.append(data, ignore_index=True)
submitted_runs_df.to_csv(config["submitFile"], index=False)
def post(self):
self.initialize_vars()
self.update_submissions()
return ("submission successful")
api.add_resource(InitializePreview, '/initializepreview')
api.add_resource(InitializeRun, '/initializerun')
api.add_resource(FinalizeRun, '/finalizerun')
api.add_resource(SubmitRuns, '/submitruns')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=config["port"], debug=True)
|
py | 1a43140f84858dc692e32637f5c1cef04cd85563 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from urllib import quote
import tempfile
import shutil
from os.path import abspath, join, dirname
import os
from datetime import datetime, timedelta
import pytz
import subprocess
from json import loads
import tornado.web
from preggy import expect
from mock import Mock, patch
import unittest
from thumbor.config import Config
from thumbor.importer import Importer
from thumbor.context import Context, ServerParameters, RequestParameters
from thumbor.handlers import FetchResult, BaseHandler
from thumbor.storages.file_storage import Storage as FileStorage
from thumbor.storages.no_storage import Storage as NoStorage
from thumbor.utils import which
from tests.base import TestCase, PythonTestCase, normalize_unicode_path
from thumbor.engines.pil import Engine
from libthumbor import CryptoURL
from tests.fixtures.images import (
default_image,
alabama1,
space_image,
invalid_quantization,
animated_image,
not_so_animated_image,
)
class FetchResultTestCase(PythonTestCase):
def test_can_create_default_fetch_result(self):
result = FetchResult()
expect(result.normalized).to_be_false()
expect(result.buffer).to_be_null()
expect(result.engine).to_be_null()
expect(result.successful).to_be_false()
expect(result.loader_error).to_be_null()
def test_can_create_fetch_result(self):
buffer_mock = Mock()
engine_mock = Mock()
error_mock = Mock()
result = FetchResult(
normalized=True,
buffer=buffer_mock,
engine=engine_mock,
successful=True,
loader_error=error_mock,
)
expect(result.normalized).to_be_true()
expect(result.buffer).to_equal(buffer_mock)
expect(result.engine).to_equal(engine_mock)
expect(result.successful).to_be_true()
expect(result.loader_error).to_equal(error_mock)
class ErrorHandler(BaseHandler):
def get(self):
self._error(403)
class BaseHandlerTestApp(tornado.web.Application):
def __init__(self, context):
self.context = context
super(BaseHandlerTestApp, self).__init__([
(r'/error', ErrorHandler),
])
class BaseImagingTestCase(TestCase):
@classmethod
def setUpClass(cls, *args, **kw):
cls.root_path = tempfile.mkdtemp()
cls.loader_path = abspath(join(dirname(__file__), '../fixtures/images/'))
cls.base_uri = "/image"
@classmethod
def tearDownClass(cls, *args, **kw):
shutil.rmtree(cls.root_path)
class ImagingOperationsWithHttpLoaderTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.http_loader"
cfg.STORAGE = "thumbor.storages.file_storage"
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_image_already_generated_by_thumbor(self):
with open('./tests/fixtures/images/image.jpg', 'r') as f:
self.context.modules.storage.put(
quote("http://test.com/smart/image.jpg"),
f.read()
)
crypto = CryptoURL('ACME-SEC')
image_url = self.get_url(
crypto.generate(
image_url=quote("http://test.com/smart/image.jpg")
)
)
url = crypto.generate(
image_url=quote(image_url)
)
response = self.fetch(url)
expect(response.code).to_equal(200)
def test_image_already_generated_by_thumbor_2_times(self):
with open(
normalize_unicode_path(u'./tests/fixtures/images/alabama1_ap620é.jpg'), 'r'
) as f:
self.context.modules.storage.put(
quote("http://test.com/smart/alabama1_ap620é"),
f.read()
)
crypto = CryptoURL('ACME-SEC')
image_url = self.get_url(
crypto.generate(
image_url=quote(self.get_url(
crypto.generate(
image_url=quote("http://test.com/smart/alabama1_ap620é")
)
))
)
)
url = crypto.generate(
image_url=quote(image_url)
)
response = self.fetch(url)
expect(response.code).to_equal(200)
def test_image_with_utf8_url(self):
with open('./tests/fixtures/images/maracujá.jpg', 'r') as f:
self.context.modules.storage.put(
quote(u"http://test.com/maracujá.jpg".encode('utf-8')),
f.read()
)
crypto = CryptoURL('ACME-SEC')
image_url = self.get_url(
quote(u"/unsafe/http://test.com/maracujá.jpg".encode('utf-8'))
)
url = crypto.generate(
image_url=quote(image_url)
)
response = self.fetch(url)
expect(response.code).to_equal(200)
class ImagingOperationsTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.file_storage"
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.QUALITY = 'keep'
cfg.SVG_DPI = 200
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_can_get_image(self):
response = self.fetch('/unsafe/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_can_get_image_without_extension(self):
response = self.fetch('/unsafe/smart/image')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_get_unknown_image_returns_not_found(self):
response = self.fetch('/unsafe/smart/imag')
expect(response.code).to_equal(404)
def test_can_get_unicode_image(self):
response = self.fetch(u'/unsafe/%s' % quote(u'15967251_212831_19242645_АгатавЗоопарке.jpg'.encode('utf-8')))
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_can_get_signed_regular_image(self):
response = self.fetch('/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_url_without_unsafe_or_hash_fails(self):
response = self.fetch('/alabama1_ap620%C3%A9.jpg')
expect(response.code).to_equal(400)
def test_url_without_image(self):
response = self.fetch('/unsafe/')
expect(response.code).to_equal(400)
def test_utf8_encoded_image_name_with_encoded_url(self):
url = '/lc6e3kkm_2Ww7NWho8HPOe-sqLU=/smart/alabama1_ap620%C3%A9.jpg'
response = self.fetch(url)
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(alabama1())
def test_url_with_encoded_hash(self):
url = '/%D1%80=/alabama1_ap620%C3%A9.jpg'
response = self.fetch(url)
expect(response.code).to_equal(400)
def test_image_with_spaces_on_url(self):
response = self.fetch(u'/unsafe/image%20space.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(space_image())
def test_can_get_image_with_filter(self):
response = self.fetch('/5YRxzS2yxZxj9SZ50SoZ11eIdDI=/filters:fill(blue)/image.jpg')
expect(response.code).to_equal(200)
def test_can_get_image_with_invalid_quantization_table(self):
response = self.fetch('/unsafe/invalid_quantization.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(invalid_quantization())
def test_getting_invalid_image_returns_bad_request(self):
response = self.fetch('/unsafe/image_invalid.jpg')
expect(response.code).to_equal(400)
def test_can_read_monochromatic_jpeg(self):
response = self.fetch('/unsafe/grayscale.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_jpeg()
def test_can_read_image_with_small_width_and_no_height(self):
response = self.fetch('/unsafe/0x0:1681x596/1x/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_jpeg()
def test_can_read_cmyk_jpeg(self):
response = self.fetch('/unsafe/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_jpeg()
def test_can_read_cmyk_jpeg_as_png(self):
response = self.fetch('/unsafe/filters:format(png)/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
def test_can_read_image_svg_with_px_units_and_convert_png(self):
response = self.fetch('/unsafe/Commons-logo.svg')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
engine = Engine(self.context)
engine.load(response.body, '.png')
expect(engine.size).to_equal((1024, 1376))
def test_can_read_image_svg_with_inch_units_and_convert_png(self):
response = self.fetch('/unsafe/Commons-logo-inches.svg')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
engine = Engine(self.context)
engine.load(response.body, '.png')
expect(engine.size).to_equal((2000, 2600))
def test_can_read_8bit_tiff_as_png(self):
response = self.fetch('/unsafe/gradient_8bit.tif')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
def test_can_read_16bit_lsb_tiff_as_png(self):
response = self.fetch('/unsafe/gradient_lsb_16bperchannel.tif')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
def test_can_read_16bit_msb_tiff_as_png(self):
response = self.fetch('/unsafe/gradient_msb_16bperchannel.tif')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
class ImageOperationsWithoutUnsafeTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.ALLOW_UNSAFE_URL = False
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8890, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_can_get_image_with_signed_url(self):
response = self.fetch('/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_getting_unsafe_image_fails(self):
response = self.fetch('/unsafe/smart/image.jpg')
expect(response.code).to_equal(400)
class ImageOperationsWithStoredKeysTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='MYKEY')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.ALLOW_UNSAFE_URL = False
cfg.ALLOW_OLD_URLS = True
cfg.STORES_CRYPTO_KEY_FOR_EACH_IMAGE = True
cfg.STORAGE = 'thumbor.storages.file_storage'
cfg.STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8891, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'MYKEY'
return Context(server, cfg, importer)
def test_stored_security_key_with_regular_image(self):
storage = self.context.modules.storage
self.context.server.security_key = 'MYKEY'
storage.put_crypto('image.jpg') # Write a file on the file storage containing the security key
self.context.server.security_key = 'MYKEY2'
try:
response = self.fetch('/nty7gpBIRJ3GWtYDLLw6q1PgqTo=/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
finally:
self.context.server.security_key = 'MYKEY'
def test_stored_security_key_with_regular_image_with_querystring(self):
storage = self.context.modules.storage
self.context.server.security_key = 'MYKEY'
storage.put_crypto('image.jpg%3Fts%3D1') # Write a file on the file storage containing the security key
self.context.server.security_key = 'MYKEY2'
response = self.fetch('/Iw7LZGdr-hHj2gQ4ZzksP3llQHY=/smart/image.jpg%3Fts%3D1')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_stored_security_key_with_regular_image_with_hash(self):
storage = self.context.modules.storage
self.context.server.security_key = 'MYKEY'
storage.put_crypto('image.jpg%23something') # Write a file on the file storage containing the security key
self.context.server.security_key = 'MYKEY2'
response = self.fetch('/fxOHtHcTZMyuAQ1YPKh9KWg7nO8=/smart/image.jpg%23something')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
class ImageOperationsWithAutoWebPTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def get_as_webp(self, url):
return self.fetch(url, headers={
"Accept": 'image/webp,*/*;q=0.8'
})
def test_can_auto_convert_jpeg(self):
response = self.get_as_webp('/unsafe/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
def test_should_bad_request_if_bigger_than_75_megapixels(self):
response = self.get_as_webp('/unsafe/16384x16384.png')
expect(response.code).to_equal(400)
def test_should_bad_request_if_bigger_than_75_megapixels_jpeg(self):
response = self.get_as_webp('/unsafe/9643x10328.jpg')
expect(response.code).to_equal(400)
def test_should_not_convert_animated_gifs_to_webp(self):
response = self.get_as_webp('/unsafe/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Vary')
expect(response.body).to_be_gif()
def test_should_convert_image_with_small_width_and_no_height(self):
response = self.get_as_webp('/unsafe/0x0:1681x596/1x/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_webp()
def test_should_convert_monochromatic_jpeg(self):
response = self.get_as_webp('/unsafe/grayscale.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_webp()
def test_should_convert_cmyk_jpeg(self):
response = self.get_as_webp('/unsafe/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_webp()
def test_shouldnt_convert_cmyk_jpeg_if_format_specified(self):
response = self.get_as_webp('/unsafe/filters:format(png)/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
def test_shouldnt_convert_cmyk_jpeg_if_gif(self):
response = self.get_as_webp('/unsafe/filters:format(gif)/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_gif()
def test_shouldnt_convert_cmyk_if_format_specified(self):
response = self.get_as_webp('/unsafe/filters:format(gif)/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_gif()
def test_converting_return_etags(self):
response = self.get_as_webp('/unsafe/image.jpg')
expect(response.headers).to_include('Etag')
class ImageOperationsWithAutoWebPWithResultStorageTestCase(BaseImagingTestCase):
def get_request(self, *args, **kwargs):
return RequestParameters(*args, **kwargs)
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.request = self.get_request()
ctx.server.gifsicle_path = which('gifsicle')
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
def get_as_webp(self, url):
return self.fetch(url, headers={
"Accept": 'image/webp,*/*;q=0.8'
})
@patch('thumbor.handlers.Context')
def test_can_auto_convert_jpeg_from_result_storage(self, context_mock):
context_mock.return_value = self.context
crypto = CryptoURL('ACME-SEC')
url = crypto.generate(image_url=quote("http://test.com/smart/image.jpg"))
self.context.request = self.get_request(url=url, accepts_webp=True)
with open('./tests/fixtures/images/image.webp', 'r') as f:
self.context.modules.result_storage.put(f.read())
response = self.get_as_webp(url)
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
expect(self.context.request.engine.extension).to_equal('.webp')
@patch('thumbor.handlers.Context')
def test_can_auto_convert_unsafe_jpeg_from_result_storage(self, context_mock):
context_mock.return_value = self.context
self.context.request = self.get_request(accepts_webp=True)
response = self.get_as_webp('/unsafe/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
expect(self.context.request.engine.extension).to_equal('.webp')
class ImageOperationsWithoutEtagsTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.ENABLE_ETAGS = False
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_can_get_image_without_etags(self):
response = self.fetch('/unsafe/image.jpg', headers={
"Accept": 'image/webp,*/*;q=0.8'
})
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Etag')
class ImageOperationsWithLastModifiedTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.SEND_IF_MODIFIED_LAST_MODIFIED_HEADERS = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
@property
def result_storage(self):
return self.context.modules.result_storage
def write_image(self):
expected_path = self.result_storage.normalize_path('_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg')
if not os.path.exists(dirname(expected_path)):
os.makedirs(dirname(expected_path))
if not os.path.exists(expected_path):
with open(expected_path, 'w') as img:
img.write(default_image())
def test_can_get_304_with_last_modified(self):
self.write_image()
response = self.fetch(
'/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg',
headers={
"Accept": 'image/webp,*/*;q=0.8',
"If-Modified-Since":
(datetime.utcnow() + timedelta(seconds=1))
.replace(tzinfo=pytz.utc).strftime("%a, %d %b %Y %H:%M:%S GMT"), # NOW +1 sec UTC
})
expect(response.code).to_equal(304)
def test_can_get_image_with_last_modified(self):
self.write_image()
response = self.fetch(
'/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg',
headers={
"Accept": 'image/webp,*/*;q=0.8',
"If-Modified-Since": (datetime.utcnow() - timedelta(days=365))
.replace(tzinfo=pytz.utc).strftime("%a, %d %b %Y %H:%M:%S GMT"), # Last Year
}
)
expect(response.code).to_equal(200)
expect(response.headers).to_include('Last-Modified')
class ImageOperationsWithGifVTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.FFMPEG_PATH = which('ffmpeg')
cfg.OPTIMIZERS = [
'thumbor.optimizers.gifv',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_should_convert_animated_gif_to_mp4_when_filter_without_params(self):
response = self.fetch('/unsafe/filters:gifv()/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('video/mp4')
def test_should_convert_animated_gif_to_webm_when_filter_with_gifv_webm_param(self):
response = self.fetch('/unsafe/filters:gifv(webm)/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('video/webm')
class ImageOperationsImageCoverTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.AUTO_WEBP = True
cfg.USE_GIFSICLE_ENGINE = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_can_get_image_cover(self):
response = self.fetch('/unsafe/filters:cover()/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('image/gif')
class ImageOperationsWithResultStorageTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.USE_GIFSICLE_ENGINE = True
cfg.FFMPEG_PATH = which('ffmpeg')
cfg.AUTO_WEBP = True
cfg.OPTIMIZERS = [
'thumbor.optimizers.gifv',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
@patch('tornado.ioloop.IOLoop.instance')
def test_saves_image_to_result_storage(self, instance_mock):
instance_mock.return_value = self.io_loop
response = self.fetch('/gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
expect(response.code).to_equal(200)
self.context.request = Mock(
accepts_webp=False,
)
expected_path = self.result_storage.normalize_path('gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
expect(expected_path).to_exist()
expect(response.body).to_be_similar_to(animated_image())
class ImageOperationsResultStorageOnlyTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = '/tmp/path/that/does/not/exist'
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.FFMPEG_PATH = which('ffmpeg')
cfg.USE_GIFSICLE_ENGINE = True
cfg.AUTO_WEBP = True
cfg.OPTIMIZERS = [
'thumbor.optimizers.gifv',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
def test_loads_image_from_result_storage(self):
self.context.request = Mock(
accepts_webp=False,
)
expected_path = self.result_storage.normalize_path('gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
os.makedirs(dirname(expected_path))
with open(expected_path, 'w') as img:
img.write(animated_image())
response = self.fetch('/gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(animated_image())
class ImageOperationsWithGifWithoutGifsicle(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
def test_should_be_ok_with_single_frame_gif(self):
response = self.fetch('/5Xr8gyuWE7jL_VB72K0wvzTMm2U=/animated-one-frame.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('image/gif')
expect(response.body).to_be_similar_to(not_so_animated_image())
class ImageOperationsWithGifWithoutGifsicleOnResultStorage(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = '/tmp/path/that/does/not/exist'
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
def test_loads_image_from_result_storage(self):
self.context.request = Mock(
accepts_webp=False,
)
expected_path = self.result_storage.normalize_path('5Xr8gyuWE7jL_VB72K0wvzTMm2U=/animated-one-frame.gif')
os.makedirs(dirname(expected_path))
with open(expected_path, 'w') as img:
img.write(not_so_animated_image())
response = self.fetch('/5Xr8gyuWE7jL_VB72K0wvzTMm2U=/animated-one-frame.gif')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(not_so_animated_image())
class ImageOperationsWithMaxWidthAndMaxHeight(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
cfg.MAX_WIDTH = 150
cfg.MAX_HEIGHT = 150
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
def test_should_be_ok_but_150x150(self):
response = self.fetch('/unsafe/200x200/grayscale.jpg')
engine = Engine(self.context)
engine.load(response.body, '.jpg')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('image/jpeg')
expect(engine.size).to_equal((150, 150))
class ImageOperationsWithMaxPixels(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.MAX_PIXELS = 1000
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_should_error(self):
response = self.fetch('/unsafe/200x200/grayscale.jpg')
expect(response.code).to_equal(400)
class ImageOperationsWithRespectOrientation(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.RESPECT_ORIENTATION = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
self.context = Context(server, cfg, importer)
self.context.server.gifsicle_path = which('gifsicle')
return self.context
def test_should_be_ok_when_orientation_exif(self):
response = self.fetch('/unsafe/10_years_of_Wikipedia_by_Guillaume_Paumier.jpg')
expect(response.code).to_equal(200)
engine = Engine(self.context)
engine.load(response.body, '.jpg')
expect(engine.size).to_equal((4052, 3456))
def test_should_be_ok_without_orientation_exif(self):
response = self.fetch('/unsafe/20x20.jpg')
expect(response.code).to_equal(200)
engine = Engine(self.context)
engine.load(response.body, '.jpg')
expect(engine.size).to_equal((20, 20))
class EngineLoadException(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.FILTERS = []
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
@unittest.skip("For some strange reason, this test breaks on Travis.")
def test_should_error_on_engine_load_exception(self):
with patch.object(Engine, 'load', side_effect=ValueError):
response = self.fetch('/unsafe/image.jpg')
expect(response.code).to_equal(504)
def test_should_release_ioloop_on_error_on_engine_exception(self):
response = self.fetch('/unsafe/fit-in/134x134/940x2.png')
expect(response.code).to_equal(200)
def test_should_exec_other_operations_on_error_on_engine_exception(self):
response = self.fetch('/unsafe/fit-in/134x134/filters:equalize()/940x2.png')
expect(response.code).to_equal(200)
class StorageOverride(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.file_storage"
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_shouldnt_call_put_when_storage_overridden_to_nostorage(self):
old_load = Engine.load
old_put = FileStorage.put
def load_override(self, foo, bar):
self.context.modules.storage = NoStorage(None)
return old_load(self, foo, bar)
def put_override(self, path, contents):
expect.not_to_be_here()
Engine.load = load_override
FileStorage.put = put_override
response = self.fetch('/unsafe/image.jpg')
Engine.load = old_load
FileStorage.put = old_put
expect(response.code).to_equal(200)
class ImageOperationsWithJpegtranTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.JPEGTRAN_PATH = which('jpegtran')
cfg.PROGRESSIVE_JPEG = True,
cfg.RESULT_STORAGE_STORES_UNSAFE = True,
cfg.OPTIMIZERS = [
'thumbor.optimizers.jpegtran',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
def test_should_optimize_jpeg(self):
response = self.fetch('/unsafe/200x200/image.jpg')
tmp_fd, tmp_file_path = tempfile.mkstemp(suffix='.jpg')
f = os.fdopen(tmp_fd, 'w')
f.write(response.body)
f.close()
exiftool = which('exiftool')
if not exiftool:
raise AssertionError('exiftool was not found. Please install it to run thumbor\'s tests.')
command = [
exiftool,
tmp_file_path,
'-DeviceModel',
'-EncodingProcess'
]
try:
with open(os.devnull) as null:
output = subprocess.check_output(command, stdin=null)
expect(response.code).to_equal(200)
expect(output).to_equal('Encoding Process : Progressive DCT, Huffman coding\n')
finally:
os.remove(tmp_file_path)
def test_with_meta(self):
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
def test_with_meta_cached(self):
self.fetch('/unsafe/meta/800x400/image.jpg')
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
class ImageOperationsWithoutStorage(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
cfg.USE_GIFSICLE_ENGINE = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_meta(self):
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
def test_meta_with_unicode(self):
response = self.fetch('/unsafe/meta/200x300/alabama1_ap620%C3%A9.jpg')
expect(response.code).to_equal(200)
obj = loads(response.body)
expect(obj['thumbor']['target']['width']).to_equal(200)
expect(obj['thumbor']['target']['height']).to_equal(300)
def test_meta_frame_count(self):
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
obj = loads(response.body)
expect(obj['thumbor']['source']['frameCount']).to_equal(1)
def test_meta_frame_count_with_gif(self):
response = self.fetch('/unsafe/meta/animated.gif')
expect(response.code).to_equal(200)
obj = loads(response.body)
expect(obj['thumbor']['source']['frameCount']).to_equal(2)
def test_max_bytes(self):
response = self.fetch('/unsafe/filters:max_bytes(35000)/Giunchedi%2C_Filippo_January_2015_01.jpg')
expect(response.code).to_equal(200)
expect(len(response.body)).to_be_lesser_or_equal_to(35000)
def test_max_bytes_impossible(self):
response = self.fetch('/unsafe/filters:max_bytes(1000)/Giunchedi%2C_Filippo_January_2015_01.jpg')
expect(response.code).to_equal(200)
expect(len(response.body)).to_be_greater_than(1000)
class TranslateCoordinatesTestCase(TestCase):
def setUp(self, *args, **kwargs):
super(TranslateCoordinatesTestCase, self).setUp(*args, **kwargs)
coords = self.get_coords()
self.translate_crop_coordinates = BaseHandler.translate_crop_coordinates(
original_width=coords['original_width'],
original_height=coords['original_height'],
width=coords['width'],
height=coords['height'],
crop_left=coords['crop_left'],
crop_top=coords['crop_top'],
crop_right=coords['crop_right'],
crop_bottom=coords['crop_bottom']
)
def get_coords(self):
return {
'original_width': 3000,
'original_height': 2000,
'width': 1200,
'height': 800,
'crop_left': 100,
'crop_top': 100,
'crop_right': 200,
'crop_bottom': 200,
'expected_crop': (40, 40, 80, 80)
}
def test_should_be_a_list_of_coords(self):
expect(self.translate_crop_coordinates).to_be_instance_of(tuple)
def test_should_translate_from_original_to_resized(self):
expect(self.translate_crop_coordinates).to_equal(self.get_coords()['expected_crop'])
|
py | 1a431498b5b0314abea7eb511e5c1c2514999fa4 | from distutils.version import LooseVersion
import pytest
import torch
from mmcls.models.utils import channel_shuffle, is_tracing, make_divisible
def test_make_divisible():
# test min_value is None
result = make_divisible(34, 8, None)
assert result == 32
# test when new_value > min_ratio * value
result = make_divisible(10, 8, min_ratio=0.9)
assert result == 16
# test min_value = 0.8
result = make_divisible(33, 8, min_ratio=0.8)
assert result == 32
def test_channel_shuffle():
x = torch.randn(1, 24, 56, 56)
with pytest.raises(AssertionError):
# num_channels should be divisible by groups
channel_shuffle(x, 7)
groups = 3
batch_size, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
out = channel_shuffle(x, groups)
# test the output value when groups = 3
for b in range(batch_size):
for c in range(num_channels):
c_out = c % channels_per_group * groups + c // channels_per_group
for i in range(height):
for j in range(width):
assert x[b, c, i, j] == out[b, c_out, i, j]
@pytest.mark.skipif(
LooseVersion(torch.__version__) < LooseVersion('1.6.0'),
reason='torch.jit.is_tracing is not available before 1.6.0')
def test_is_tracing():
def foo(x):
if is_tracing():
return x
else:
return x.tolist()
x = torch.rand(3)
# test without trace
assert isinstance(foo(x), list)
# test with trace
traced_foo = torch.jit.trace(foo, (torch.rand(1), ))
assert isinstance(traced_foo(x), torch.Tensor)
|
py | 1a43153089ed01da9ef62098c3b6e9e5ff2e26a7 | # Copyright 2017 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_looper.core.algebraic as Algebraic
from test_looper.core.hash import sha_hash
import unittest
expr = Algebraic.Alternative("Expr")
expr.Constant = {'value': int}
expr.Add = {'l': expr, 'r': expr}
expr.Sub = {'l': expr, 'r': expr}
expr.Mul = {'l': expr, 'r': expr}
class AlgebraicTests(unittest.TestCase):
def test_basic(self):
X = Algebraic.Alternative('X', A = {}, B = {})
xa = X.A()
xb = X.B()
self.assertTrue(xa.matches.A)
self.assertFalse(xa.matches.B)
self.assertTrue(xb.matches.B)
self.assertFalse(xb.matches.A)
def test_stable_sha_hashing(self):
#adding default values to a type shouldn't disrupt its hashes
leaf = Algebraic.Alternative("Leaf")
leaf.A = {'a': int}
leaf.B = {'b': int}
leaf.setCreateDefault(lambda: leaf.A(0))
not_leaf = Algebraic.Alternative("NotLeaf")
not_leaf.A = {'z': float, 'leaf': leaf}
not_leaf2 = Algebraic.Alternative("NotLeaf")
not_leaf2.A = {'z': float, 'leaf': leaf, 'int': int}
a_simple_notleaf = not_leaf.A(z=10.0,_fill_in_missing=True)
a_simple_notleaf2 = not_leaf2.A(z=10.0,_fill_in_missing=True)
a_different_notleaf = not_leaf.A(z=10.0, leaf=leaf.B(b=10),_fill_in_missing=True)
a_different_notleaf2 = not_leaf2.A(z=10.0, leaf=leaf.B(b=10),_fill_in_missing=True)
a_final_different_notleaf = not_leaf2.A(z=10.0, leaf=leaf.B(b=10),int=123,_fill_in_missing=True)
self.assertEqual(sha_hash(a_simple_notleaf), sha_hash(a_simple_notleaf2))
self.assertNotEqual(sha_hash(a_simple_notleaf), sha_hash(a_different_notleaf))
self.assertEqual(sha_hash(a_different_notleaf), sha_hash(a_different_notleaf2))
self.assertNotEqual(sha_hash(a_simple_notleaf), sha_hash(a_final_different_notleaf))
self.assertNotEqual(sha_hash(a_different_notleaf), sha_hash(a_final_different_notleaf))
def test_field_lookup(self):
X = Algebraic.Alternative('X', A = {'a': int}, B = {'b': float})
self.assertEqual(X.A(10).a, 10)
with self.assertRaises(AttributeError):
X.A(10).b
self.assertEqual(X.B(11.0).b, 11.0)
with self.assertRaises(AttributeError):
X.B(11.0).a
def test_lists(self):
X = Algebraic.Alternative('X')
X.A = {'val': int}
X.B = {'val': Algebraic.List(X)}
xa = X.A(10)
xb = X.B([xa, X.A(11)])
self.assertTrue(xa.matches.A)
self.assertTrue(xb.matches.B)
self.assertTrue(isinstance(xb.val, tuple))
self.assertTrue(len(xb.val) == 2)
def test_stringification(self):
self.assertEqual(
repr(expr.Add(l = expr(10), r = expr(20))),
"Expr.Add(l=Expr.Constant(value=10),r=Expr.Constant(value=20))"
)
def test_isinstance(self):
self.assertTrue(isinstance(expr(10), Algebraic.AlternativeInstance))
self.assertTrue(isinstance(expr(10), expr.Constant))
def test_coercion(self):
Sub = Algebraic.Alternative('Sub', I={}, S={})
with self.assertRaises(Exception):
Sub.I(Sub.S)
X = Algebraic.Alternative('X', A={'val': Sub})
X.A(val=Sub.S())
with self.assertRaises(Exception):
X.A(val=Sub.S)
def test_coercion_null(self):
Sub = Algebraic.Alternative('Sub', I={}, S={})
X = Algebraic.Alternative('X', I={'val': Algebraic.Nullable(Sub)})
self.assertTrue(X(Sub.I()).val.matches.Value)
def test_equality(self):
for i in range(10):
self.assertEqual(expr.Constant(i).__sha_hash__(), expr.Constant(i).__sha_hash__())
self.assertEqual(hash(expr.Constant(i)), hash(expr.Constant(i)))
self.assertEqual(expr.Constant(i), expr.Constant(i))
self.assertEqual(
expr.Add(l=expr.Constant(i),r=expr.Constant(i+1)),
expr.Add(l=expr.Constant(i),r=expr.Constant(i+1))
)
self.assertNotEqual(
expr.Add(l=expr.Constant(i),r=expr.Constant(i+1)),
expr.Add(l=expr.Constant(i),r=expr.Constant(i+2))
)
self.assertNotEqual(expr.Constant(i), expr.Constant(i+1))
def test_algebraics_in_dicts(self):
d = {}
for i in range(10):
d[expr.Constant(i)] = i
d[expr.Add(l=expr.Constant(i),r=expr.Constant(i+1))] = 2*i+1
for i in range(10):
self.assertEqual(d[expr.Constant(i)], i)
self.assertEqual(d[expr.Add(l=expr.Constant(i),r=expr.Constant(i+1))], 2*i+1)
|
py | 1a4315521e410535022480f1787d8082ba26bce9 | from . import preprocess |
py | 1a4315fc54e284bc8aff3b8ae0e6f942a3cfc74d | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resource_requirements.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topology_sdk.model.container import resource_list_pb2 as topology__sdk_dot_model_dot_container_dot_resource__list__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='resource_requirements.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x30topology_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[topology__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])
_RESOURCEREQUIREMENTS = _descriptor.Descriptor(
name='ResourceRequirements',
full_name='container.ResourceRequirements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limits', full_name='container.ResourceRequirements.limits', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requests', full_name='container.ResourceRequirements.requests', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=198,
)
_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = topology__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = topology__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEREQUIREMENTS,
'__module__' : 'resource_requirements_pb2'
# @@protoc_insertion_point(class_scope:container.ResourceRequirements)
})
_sym_db.RegisterMessage(ResourceRequirements)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | 1a43166670a4993a29869806db950ee63e3287ac | from django.contrib import admin
from profiles.models import Profile, Favorite
admin.site.register(Profile)
admin.site.register(Favorite)
|
py | 1a43168ff3bcb8c6ce0c70fa3ffa26bab18f07e0 | from discord.ext import commands
from os import getenv
from src.internal.bot import Bot
from src.internal.context import Context
from src.internal.checks import in_channel
class Trivia(commands.Cog):
"""Trivia questions."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.group(name="trivia", invoke_without_command=True)
@commands.cooldown(rate=1, per=60, type=commands.BucketType.member)
@in_channel(int(getenv("CHANNEL")))
async def trivia(self, ctx: Context, unique: bool = False):
"""Get a trivia question."""
pass
@trivia.command(name="add")
@commands.check_any(commands.is_owner(), commands.has_role(337442104026595329))
async def trivia_add(self, ctx: Context, *, question: str):
"""Add a new trivia question."""
await ctx.reply("Enter valid answers, and .stop to finish.")
answers = []
while True:
try:
msg = await self.bot.wait_for("message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=15)
except:
return await ctx.reply("Selection timed out, exiting.")
answer = msg.content
if answer == ".stop":
break
else:
answers.append(answer)
answers = "`".join(answers)
await self.bot.db.create_trivia_question(ctx.author.id, question, answers)
def setup(bot: Bot):
bot.add_cog(Trivia(bot))
|
py | 1a4316abe3fe5ebdb0a9c3c6a7918a65a8b5e454 | from auth import RSA_SHA256Auth
import requests
import json
import random
import sys
import httplib
from optparse import OptionParser
def rsa_auth_merchant_simple_test(
merchant_id, merchant_user, pemfilename, testbed_token):
print "setting up a RSA auth session with merchant_user private RSA key"
s = requests.Session()
s.auth = RSA_SHA256Auth(pemfilename)
# from this point all requests through s use rsa auth, eg.:
url_base = 'https://mcashtestbed.appspot.com'
headers = {
'Accept': 'application/vnd.mcash.api.merchant.v1+json',
'Content-Type': 'application/json',
'X-Mcash-Merchant': merchant_id,
'X-Mcash-User': merchant_user,
'X-Testbed-Token': testbed_token
}
print "checking if we have a point of sale"
req = requests.Request("GET",
url_base + '/merchant/v1/pos/',
headers=headers
)
r = s.send(s.prepare_request(req))
print "r.status_code =", r.status_code, " ", httplib.responses[r.status_code]
if len(r.json()[u'uris']) == 0:
print "creating a POS (point of sale) with pos_id '1'..."
payload = {
"id": "1",
"name": "Kasse 1",
"type": "store"
}
req = requests.Request("POST",
url_base + '/merchant/v1/pos/',
data=json.dumps(payload),
headers=headers
)
r = s.send(s.prepare_request(req))
print "r.status_code =", r.status_code, " ", httplib.responses[r.status_code]
print "requesting auth for a payment..."
pos_tid = random.randint(0, sys.maxsize)
payload = {
"customer": merchant_id + "-alice",
"pos_id": "1",
"pos_tid": str(pos_tid),
"action": "auth",
"currency": "NOK",
"amount": "100.00",
"additional_amount": "0",
"additional_edit": False,
"allow_credit": False,
"expires_in": 21600,
"text":
"Thanks for your business here at Acme Inc! \nYour payment is being processed.",
"display_message_uri": "https://www.acmeinc.com/pos/3/display/",
"callback_uri": "https://www.acmeinc.com/pos/3/payment/h93d458qo4685/"
}
req = requests.Request("POST",
url_base + '/merchant/v1/payment_request/',
data=json.dumps(payload),
headers=headers
)
r = s.send(s.prepare_request(req))
print "r.status_code =", r.status_code, " ", httplib.responses[r.status_code]
tid = r.json()['id']
print "mocking costumer clicking pay in app"
testbedheaders = {
'X-Testbed-Token': testbed_token
}
url = url_base + \
'/testbed/merchant/{merchant_id}/txn/{tid}/pay/'.format(merchant_id=merchant_id,
tid=tid)
r = requests.post(url, data=json.dumps(payload), headers=testbedheaders)
print "r.status_code =", r.status_code, " ", httplib.responses[r.status_code]
assert r.status_code == 200, "Expected r.status_code to be 200, actually is %i = %s" % (
r.status_code, httplib.responses[r.status_code])
# or setup an eventlistner"
print "Polling of /outcome/ while status <> auth"
status = None
while status != 'auth':
print " getting /outcome/ "
req = requests.Request("GET",
url_base +
'/merchant/v1/payment_request/{tid}/outcome/'.format(tid=tid),
headers=headers)
r = s.send(s.prepare_request(req))
print "r.status_code =", r.status_code, " ", httplib.responses[r.status_code]
assert r.status_code == 200, "Expected r.status_code to be 200, actually is %i = %s" % (
r.status_code, httplib.responses[r.status_code])
d = json.loads(r.text)
status = d['status']
print "status =", status
print "Merchant capturing payment..."
payload = {
"action": "capture",
"display_message_uri": "https://www.acmeinc.com/pos/3/display/",
"callback_uri": "https://www.acmeinc.com/pos/3/payment/h93d458qo4685/"
}
req = requests.Request("PUT",
url_base +
'/merchant/v1/payment_request/{tid}/'.format(tid=tid),
data=json.dumps(payload),
headers=headers
)
r = s.send(s.prepare_request(req))
print "r.status_code =", r.status_code, " ", httplib.responses[r.status_code]
assert r.status_code == 204, "Expected r.status_code to be 204"
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-u", "--user", dest="merchant_user",
help="set the user name")
parser.add_option("-m", "--merchant", dest="merchant_id",
help="set the merchant id")
parser.add_option("-f", "--filename", dest="pemfilename",
help="set RSA private key file. Should be a .pem file created when the user is created")
parser.add_option("-t", "--testbed_token", dest="testbed_token",
help="set the testbed_token. Should be recieved in an email")
(options, args) = parser.parse_args()
if options.merchant_id and options.merchant_user and options.pemfilename and options.testbed_token:
rsa_auth_merchant_simple_test(options.merchant_id,
options.merchant_user,
options.pemfilename,
options.testbed_token)
else:
print "All command line options not given. Exiting.."
sys.exit(1)
|
py | 1a4316c465de0eab03311b840a28ab1c9b23e536 | import cv2
class rovio:
chase = None
detector = None
rovioConrol = None
def __init__(self, chase, detector, rovioControl):
self.chase = chase
self.detector = detector
self.rovioConrol = rovioControl
def action(self):
# ROVIO detect start here
# keep rotate right to search for Rovio
boxes = self.rovio_detector.predict(ori_frame)
if len(boxes) < 1:
self.rovio.rotate_right(angle=15, speed=1)
else:
# Get the nearest one to move to (Biggest Area)
x, y, w, h = 0, 0, 0, 0
max_box_i = 0
max_area = 0
for index, box in enumerate(boxes):
width = box.w + box.x
height = box.h + box.y
area = (box.w + box.x) * (box.h + box.y)
print(width / height)
if max_area < area and (width / height > 1.1 and width / height < 1.2):
max_area = area
max_box_i = index
x, y, w, h = boxes[max_box_i].get_position()
# get center point of the box
xmin = int((box.x - box.w / 2) * frame.shape[1])
xmax = int((box.x + box.w / 2) * frame.shape[1])
ymin = int((box.y - box.h / 2) * frame.shape[0])
ymax = int((box.y + box.h / 2) * frame.shape[0])
cv2.rectangle(ori_frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)
cv2.putText(ori_frame,
labels[box.get_label()] + ' ' + str(box.get_score()),
(xmin, ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * frame.shape[0],
(0, 255, 0), 2)
cv2.imshow('detector', ori_frame)
# Assume x and y is the center point
if (x * frame.shape[0] >= 213 and x * frame.shape[0] <= 426):
self.rovio.forward()
elif x * frame.shape[1] > frame.shape[1] / 2:
self.rovio.rotate_right(angle=15, speed=1)
else:
self.rovio.rotate_left(angle=15, speed=1)
#####################################################
# Perform Floor floor_finder #
#####################################################
# If safe zone is more than 80 then check for infrared detection
if self.floor_finder() > 80:
pass
# if(not self.rovio.ir()):
# self.rovio.api.set_ir(1)
# if (not self.rovio.obstacle()):
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# else:
# self.rovio.rotate_right(angle=20, speed=2)
# Rotate right is safe zone is smaller than 80 pixels
else:
pass
# self.rovio.rotate_right(angle=20, speed=2)
# If Button Pressed, onAction
# Use ASCII for decode
self.key = cv2.waitKey(20)
if self.key > 0:
# print self.key
pass
if self.key == 114: # r
self.rovio.turn_around()
elif self.key == 63233 or self.key == 115: # down or s
self.rovio.backward(speed=7)
elif self.key == 63232 or self.key == 119: # up or w
self.rovio.forward(speed=1)
elif self.key == 63234 or self.key == 113: # left or a
self.rovio.rotate_left(angle=12, speed=5)
elif self.key == 63235 or self.key == 101: # right or d
self.rovio.rotate_right(angle=12, speed=5)
elif self.key == 97: # left or a
self.rovio.left(speed=1)
elif self.key == 100: # right or d
self.rovio.right(speed=1)
elif self.key == 44: # comma
self.rovio.head_down()
elif self.key == 46: # period
self.rovio.head_middle()
elif self.key == 47: # slash
self.rovio.head_up()
elif self.key == 32: # Space Bar, pressed then perform face detection
flag = False
# self.rovio.stop()
# self.face_detection()
|
py | 1a4317f07cf2606efe6766a0b22615cf2e666411 | import sys
sys.stdin = open('input.txt')
while True:
a, b, c, d = map(int, raw_input().split())
if sum([a, b, c, d]) == 0:
break
print 360 * 2 + (a - b + 40) % 40 * 9 + 360 + \
(c - b + 40) % 40 * 9 + (c - d + 40) % 40 * 9
|
py | 1a43183faab3c2b7ad851a24f26ad983235e6b29 | """Various sources for providing generalized Beaver triples for the Pond
protocol."""
import abc
import logging
import random
import tensorflow as tf
from ...config import get_config
from ...utils import wrap_in_variables, reachable_nodes, unwrap_fetches
logger = logging.getLogger('tf_encrypted')
class TripleSource(abc.ABC):
"""Base class for triples sources."""
@abc.abstractmethod
def cache(self, a, cache_updater):
pass
@abc.abstractmethod
def initializer(self):
pass
@abc.abstractmethod
def generate_triples(self, fetches):
pass
class BaseTripleSource(TripleSource):
"""
Partial triple source adding graph nodes for constructing and keeping track
of triples and their use. Subclasses must implement `_build_queues`.
"""
def __init__(self, player0, player1, producer):
config = get_config()
self.player0 = config.get_player(player0) if player0 else None
self.player1 = config.get_player(player1) if player1 else None
self.producer = config.get_player(producer) if producer else None
def mask(self, backing_dtype, shape):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
a0 = backing_dtype.sample_uniform(shape)
a1 = backing_dtype.sample_uniform(shape)
a = a0 + a1
d0, d1 = self._build_queues(a0, a1)
return a, d0, d1
def mul_triple(self, a, b):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
ab = a * b
ab0, ab1 = self._share(ab)
return self._build_queues(ab0, ab1)
def square_triple(self, a):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
aa = a * a
aa0, aa1 = self._share(aa)
return self._build_queues(aa0, aa1)
def matmul_triple(self, a, b):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
ab = a.matmul(b)
ab0, ab1 = self._share(ab)
return self._build_queues(ab0, ab1)
def conv2d_triple(self, a, b, strides, padding):
with tf.device(self.producer.device_name):
with tf.name_scope("triple"):
ab = a.conv2d(b, strides, padding)
ab0, ab1 = self._share(ab)
return self._build_queues(ab0, ab1)
def indexer_mask(self, a, slc):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_sliced = a[slc]
return a_sliced
def transpose_mask(self, a, perm):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_t = a.transpose(perm=perm)
return a_t
def strided_slice_mask(self, a, args, kwargs):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_slice = a.strided_slice(args, kwargs)
return a_slice
def split_mask(self, a, num_split, axis):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
bs = a.split(num_split=num_split, axis=axis)
return bs
def stack_mask(self, bs, axis):
factory = bs[0].factory
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
b_stacked = factory.stack(bs, axis=axis)
return b_stacked
def concat_mask(self, bs, axis):
factory = bs[0].factory
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
b_stacked = factory.concat(bs, axis=axis)
return b_stacked
def reshape_mask(self, a, shape):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_reshaped = a.reshape(shape)
return a_reshaped
def expand_dims_mask(self, a, axis):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_e = a.expand_dims(axis=axis)
return a_e
def squeeze_mask(self, a, axis):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_squeezed = a.squeeze(axis=axis)
return a_squeezed
def _share(self, secret):
with tf.name_scope("share"):
share0 = secret.factory.sample_uniform(secret.shape)
share1 = secret - share0
# randomized swap to distribute who gets the seed
if random.random() < 0.5:
share0, share1 = share1, share0
return share0, share1
@abc.abstractmethod
def _build_queues(self, c0, c1):
"""
Method used to inject buffers between mask generating and use
(ie online vs offline). `c0` and `c1` represent the generated
masks and the method is expected to return a similar pair of
of tensors.
"""
class OnlineTripleSource(BaseTripleSource):
"""
This triple source will generate triples as part of the online phase
using a dedicated third-party `producer`.
There is no need to call `generate_triples` nor `initialize`.
"""
def __init__(self, producer):
super().__init__(None, None, producer)
def cache(self, a, cache_updater):
with tf.device(self.producer.device_name):
updater, [a_cached] = wrap_in_variables(a)
return updater, a_cached
def initializer(self):
return tf.no_op()
def generate_triples(self, fetches):
return []
def _build_queues(self, c0, c1):
return c0, c1
class QueuedOnlineTripleSource(BaseTripleSource):
"""
Similar to `OnlineTripleSource` but with in-memory buffering backed by
`tf.FIFOQueue`s.
"""
def __init__(self, player0, player1, producer, capacity=10):
super().__init__(player0, player1, producer)
self.capacity = capacity
self.queues = list()
self.triggers = dict()
def cache(self, a, cache_updater):
with tf.device(self.producer.device_name):
offline_updater, [a_cached] = wrap_in_variables(a)
self.triggers[cache_updater] = offline_updater
return tf.no_op(), a_cached
def initializer(self):
return tf.no_op()
def generate_triples(self, fetches):
if isinstance(fetches, (list, tuple)) and len(fetches) > 1:
logger.warning("Generating triples for a run involving more than "
"one fetch may introduce non-determinism that can "
"break the correspondence between the two phases "
"of the computation.")
unwrapped_fetches = unwrap_fetches(fetches)
reachable_operations = [node
for node in reachable_nodes(unwrapped_fetches)
if isinstance(node, tf.Operation)]
reachable_triggers = [self.triggers[op]
for op in reachable_operations
if op in self.triggers]
return reachable_triggers
def _build_triple_store(self, mask, player_id):
"""
Adds a tf.FIFOQueue to store mask locally on player.
"""
# TODO(Morten) taking `value` doesn't work for int100
raw_mask = mask.value
factory = mask.factory
dtype = mask.factory.native_type
shape = mask.shape
with tf.name_scope("triple-store-{}".format(player_id)):
q = tf.queue.FIFOQueue(
capacity=self.capacity,
dtypes=[dtype],
shapes=[shape],
)
e = q.enqueue(raw_mask)
d = q.dequeue()
d_wrapped = factory.tensor(d)
self.queues += [q]
self.triggers[d.op] = e
return d_wrapped
def _build_queues(self, c0, c1):
with tf.device(self.player0.device_name):
d0 = self._build_triple_store(c0, "0")
with tf.device(self.player1.device_name):
d1 = self._build_triple_store(c1, "1")
return d0, d1
"""
class PlaceholderTripleSource(BaseTripleSource):
# TODO(Morten) manually unwrap and re-wrap of values, should be hidden away
def __init__(self, player0, player1, producer):
super().__init__(player0, player1, producer)
self.placeholders = list()
def _build_queues(self, c0, c1):
with tf.device(self.player0.device_name):
r0 = tf.placeholder(
dtype=c0.factory.native_type,
shape=c0.shape,
)
d0 = c0.factory.tensor(r0)
with tf.device(self.player1.device_name):
r1 = tf.placeholder(
dtype=c1.factory.native_type,
shape=c1.shape,
)
d1 = c1.factory.tensor(r1)
self.placeholders += [r0, r1]
return d0, d1
""" #pylint: disable=pointless-string-statement
"""
class DatasetTripleSource(BaseTripleSource):
# TODO(Morten) manually unwrap and re-wrap of values, should be hidden away
def __init__(
self,
player0,
player1,
producer,
capacity=10,
directory="/tmp/triples/",
support_online_running=False,
):
super().__init__(player0, player1, producer)
self.capacity = capacity
self.dequeuers = list()
self.enqueuers = list()
self.initializers = list()
self.directory = directory
self.support_online_running = support_online_running
if support_online_running:
self.dequeue_from_file = tf.placeholder_with_default(True,
shape=[])
def _build_queues(self, c0, c1):
def dataset_from_file(filename, dtype, shape):
def parse(x):
res = tf.parse_tensor(x, out_type=dtype)
res = tf.reshape(res, shape)
return res
iterator = tf.data.TFRecordDataset(filename) \
.map(parse) \
.make_initializable_iterator()
return iterator.get_next(), iterator.initializer
def dataset_from_queue(queue, dtype, shape):
dummy = tf.data.Dataset.from_tensors(0).repeat(None)
iterator = (dummy.map(lambda _: queue.dequeue())
.make_initializable_iterator())
return iterator.get_next(), iterator.initializer
# gen = lambda: queue.dequeue()
# dataset = tf.data.Dataset.from_generator(gen, [dtype], [shape])
# iterator = dataset.make_one_shot_iterator()
# return iterator.get_next(), iterator.initializer
def sanitize_filename(filename):
return filename.replace('/', '__')
def build_triple_store(mask):
raw_mask = mask.value
factory = mask.factory
dtype = mask.factory.native_type
shape = mask.shape
with tf.name_scope("triple-store"):
q = tf.queue.FIFOQueue(
capacity=self.capacity,
dtypes=[dtype],
shapes=[shape],
)
e = q.enqueue(raw_mask)
f = os.path.join(self.directory, sanitize_filename(q.name))
if self.support_online_running:
r, i = tf.cond(
self.dequeue_from_file,
true_fn=lambda: dataset_from_file(f, dtype, shape),
false_fn=lambda: dataset_from_queue(q, dtype, shape),
)
else:
r, i = dataset_from_file(f, dtype, shape)
d = factory.tensor(r)
return f, q, e, d, i
with tf.device(self.player0.device_name):
f0, q0, e0, d0, i0 = build_triple_store(c0)
with tf.device(self.player1.device_name):
f1, q1, e1, d1, i1 = build_triple_store(c1)
self.dequeuers += [(f0, q0.dequeue()), (f1, q1.dequeue())]
self.enqueuers += [(e0, e1)]
self.initializers += [(i0, i1)]
return d0, d1
def initialize(self, sess, tag=None):
sess.run(self.initializers, tag=tag)
def generate_triples(self, sess, num=1, tag=None, save_to_file=True):
for _ in range(num):
sess.run(self.enqueuers, tag=tag)
if save_to_file:
self.save_triples_to_file(sess, num=num, tag=tag)
def save_triples_to_file(self, sess, num, tag=None):
if not os.path.exists(self.directory):
os.makedirs(self.directory)
for filename, dequeue in self.dequeuers:
with tf.io.TFRecordWriter(filename) as writer:
# size = sess.run(queue.size(), tag=tag)
for _ in range(num):
serialized = tf.io.serialize_tensor(dequeue)
triple = sess.run(serialized, tag=tag)
writer.write(triple)
""" #pylint: disable=pointless-string-statement
|
py | 1a4318a78671cf964a21014c6528ba9a7edfb38e | from app import app
import os
if __name__ == '__main__':
env_name = os.getenv('FLASK_ENV')
app = app(env_name)
app.run()
|
py | 1a43191060d57fff6a371d84dc066611fae4342e | """pystore URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import debug_toolbar
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("products/", include("products.urls")),
path("cart/", include("cart.urls")),
path("orders/", include("orders.urls")),
path("payments/", include("payments.urls")),
path("", include("pages.urls")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [path("__debug__/", include(debug_toolbar.urls))]
|
py | 1a431a9387a58fb35ff0052474ad33f7398ca6da | __AUTHOR__ = "hugsy"
__VERSION__ = 0.1
import os
import gdb
def fastbin_index(sz):
return (sz >> 4) - 2 if gef.arch.ptrsize == 8 else (sz >> 3) - 2
def nfastbins():
return fastbin_index( (80 * gef.arch.ptrsize // 4)) - 1
def get_tcache_count():
if get_libc_version() < (2, 27):
return 0
count_addr = gef.heap.base_address + 2*gef.arch.ptrsize
count = p8(count_addr) if get_libc_version() < (2, 30) else p16(count_addr)
return count
@lru_cache(128)
def collect_known_values() -> dict:
arena = get_glibc_arena()
result = {} # format is { 0xaddress : "name" ,}
# tcache
if get_libc_version() >= (2, 27):
tcache_addr = GlibcHeapTcachebinsCommand.find_tcache()
for i in range(GlibcHeapTcachebinsCommand.TCACHE_MAX_BINS):
chunk, _ = GlibcHeapTcachebinsCommand.tcachebin(tcache_addr, i)
j = 0
while True:
if chunk is None:
break
result[chunk.data_address] = "tcachebins[{}/{}] (size={:#x})".format(i, j, (i+1)*0x10+0x10)
next_chunk_address = chunk.get_fwd_ptr(True)
if not next_chunk_address: break
next_chunk = GlibcChunk(next_chunk_address)
j += 1
chunk = next_chunk
# fastbins
for i in range(nfastbins()):
chunk = arena.fastbin(i)
j = 0
while True:
if chunk is None:
break
result[chunk.data_address] = "fastbins[{}/{}]".format(i, j)
next_chunk_address = chunk.get_fwd_ptr(True)
if not next_chunk_address: break
next_chunk = GlibcChunk(next_chunk_address)
j += 1
chunk = next_chunk
# other bins
for name in ["unorderedbins", "smallbins", "largebins"]:
fw, bk = arena.bin(i)
if bk==0x00 and fw==0x00: continue
head = GlibcChunk(bk, from_base=True).fwd
if head == fw: continue
chunk = GlibcChunk(head, from_base=True)
j = 0
while True:
if chunk is None: break
result[chunk.data_address] = "{}[{}/{}]".format(name, i, j)
next_chunk_address = chunk.get_fwd_ptr(True)
if not next_chunk_address: break
next_chunk = GlibcChunk(next_chunk_address, from_base=True)
j += 1
chunk = next_chunk
return result
@lru_cache(128)
def collect_known_ranges()->list:
result = []
for entry in get_process_maps():
if not entry.path:
continue
path = os.path.basename(entry.path)
result.append( (range(entry.page_start, entry.page_end), path) )
return result
@register_external_command
class VisualizeHeapChunksCommand(GenericCommand):
"""Visual helper for glibc heap chunks"""
_cmdline_ = "visualize-libc-heap-chunks"
_syntax_ = "{:s}".format(_cmdline_)
_aliases_ = ["heap-view",]
_example_ = "{:s}".format(_cmdline_)
def __init__(self):
super(VisualizeHeapChunksCommand, self).__init__(complete=gdb.COMPLETE_SYMBOL)
return
@only_if_gdb_running
def do_invoke(self, argv):
ptrsize = gef.arch.ptrsize
heap_base_address = gef.heap.base_address
arena = get_glibc_arena()
if not arena.top:
err("The heap has not been initialized")
return
top = align_address(int(arena.top))
base = align_address(heap_base_address)
colors = [ "cyan", "red", "yellow", "blue", "green" ]
cur = GlibcChunk(base, from_base=True)
idx = 0
known_ranges = collect_known_ranges()
known_values = collect_known_values()
while True:
base = cur.base_address
aggregate_nuls = 0
if base == top:
gef_print("{} {} {}".format(format_address(addr), format_address(gef.memory.read_integer(addr)) , Color.colorify(LEFT_ARROW + "Top Chunk", "red bold")))
gef_print("{} {} {}".format(format_address(addr+ptrsize), format_address(gef.memory.read_integer(addr+ptrsize)) , Color.colorify(LEFT_ARROW + "Top Chunk Size", "red bold")))
break
if cur.size == 0:
warn("incorrect size, heap is corrupted")
break
for off in range(0, cur.size, cur.ptrsize):
addr = base + off
value = gef.memory.read_integer(addr)
if value == 0:
if off != 0 and off != cur.size - cur.ptrsize:
aggregate_nuls += 1
if aggregate_nuls > 1:
continue
if aggregate_nuls > 2:
gef_print(" ↓")
gef_print(" [...]")
gef_print(" ↓")
aggregate_nuls = 0
text = "".join([chr(b) if 0x20 <= b < 0x7F else "." for b in gef.memory.read(addr, cur.ptrsize)])
line = "{} {}".format(format_address(addr), Color.colorify(format_address(value), colors[idx % len(colors)]))
line+= " {}".format(text)
derefs = dereference_from(addr)
if len(derefs) > 2:
line+= " [{}{}]".format(LEFT_ARROW, derefs[-1])
if off == 0:
line+= " Chunk[{}]".format(idx)
if off == cur.ptrsize:
line+= " {}{}{}{}".format(value&~7, "|NON_MAIN_ARENA" if value&4 else "", "|IS_MMAPED" if value&2 else "", "|PREV_INUSE" if value&1 else "")
# look in mapping
for x in known_ranges:
if value in x[0]:
line+= " (in {})".format(Color.redify(x[1]))
# look in known values
if value in known_values:
line += "{}{}".format(RIGHT_ARROW, Color.cyanify(known_values[value]))
gef_print(line)
next_chunk = cur.get_next_chunk()
if next_chunk is None:
break
next_chunk_addr = Address(value=next_chunk.data_address)
if not next_chunk_addr.valid:
warn("next chunk probably corrupted")
break
cur = next_chunk
idx += 1
return
|
py | 1a431b7263f546ec2b96058542b9f674b440256c | from http import HTTPStatus
import pytest
from store.utils.testing.orders_testing import (
generate_order, get_order,
import_orders, compare_orders,
)
from store.utils.testing.couriers_testing import (
patch_courier, generate_courier, import_couriers, compare_couriers
)
CASES = (
(
{'courier_id': 1, 'courier_type': 'foot', 'regions': [], 'working_hours': []},
list([]),
dict({'regions': [12, 24, 25]}),
HTTPStatus.OK,
dict({'courier_id': 1, 'courier_type': 'foot', 'regions': [12, 24, 25], 'working_hours': []}),
list([])
),
(
{'courier_id': 1, 'courier_type': 'foot', 'regions': [], 'working_hours': []},
list([]),
dict({}),
HTTPStatus.OK,
dict({'courier_id': 1, 'courier_type': 'foot', 'regions': [], 'working_hours': []}),
list([])
),
)
@pytest.mark.parametrize('courier, orders, patch, expected_status, expected_courier, orders_left', CASES)
async def test_couriers_patch(api_client, courier, orders, patch, expected_status, expected_courier, orders_left):
await import_couriers(api_client, [courier], HTTPStatus.CREATED)
await import_orders(api_client, orders, HTTPStatus.CREATED)
actual_courier = await patch_courier(api_client, courier['courier_id'], patch, expected_status)
if expected_status == HTTPStatus.OK:
assert compare_couriers(actual_courier, expected_courier)
|
py | 1a431c2f5d420ffff18395e1edc72769ed329148 | from collections import namedtuple
monochrome_bounds_saturation = (0, 15)
white_bounds_value = (95, 100)
black_bounds_value = (0, 25)
colored_bounds_saturation = (monochrome_bounds_saturation[1], 100)
colored_bounds_value = (black_bounds_value[1], white_bounds_value[0])
ColorRGB = namedtuple("ColorRGB", ["R", "G", "B"])
ColorHSV = namedtuple("ColorHSV", ["H", "S", "V"])
|
py | 1a431d47a31748ecf5655dbb486e86c99cb42793 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: lineout.py #
# Tests: plots - Curve
# operators - Lineout
#
# Defect ID: none
#
# Programmer: Brad Whitlock
# Date: Fri Jan 3 14:22:41 PST 2003
#
# Modifications:
# Kathleen Bonnell, Mon Mar 17 09:54:14 PST 2003
# Added TestMultiVarLineout2D.
#
# Kathleen Bonnell, Tue Dec 23 09:29:29 PST 2003
# Added TestSAMRAI.
#
# Kathleen Bonnell, Thu Jul 29 11:59:35 PDT 2004
# Added tests for no-sampling version, renamed old Curve* tests to
# indicate they were generated with-sampling.
#
# Kathleen Bonnell, Thu Aug 5 10:44:22 PDT 2004
# Added calls to ResetPickLetter() and ResetLineoutColor() at the end of
# each test, so that failure on any one test won't necessarily affect the
# tests that follow.
#
# Kathleen Bonnell, Wed Nov 24 11:38:55 PST 2004
# Modified the way that sampling gets turned on due to changes in Lineout
# Attributes and GlobalLineoutAttributes. Use global version to turn
# sampling on and off.
#
# Kathleen Bonnell, Fri Feb 4 11:17:56 PST 2005
# Added TestDynamic, to test new global atts: curveOption and colorOption.
#
# Hank Childs, Wed Feb 16 07:34:07 PST 2005
# Rename variables that have unsupported characters.
#
# Kathleen Bonnell, Wed Mar 23 17:58:20 PST 2005
# Added TestDynamic2.
#
# Kathleen Bonnell, hu May 19 11:26:39 PDT 2005
# Added TestTecPlot.
#
# Jeremy Meredith, Wed Sep 7 12:06:04 PDT 2005
# Allowed spaces in variable names.
#
# Kathleen Bonnell, Tue Jun 20 16:02:38 PDT 2006
# Added tests for GetOutputArray to Lineout2D.
#
# Kathleen Bonnell, Wed Jun 28 15:57:58 PDT 2006
# Added tests to TestDynamicLineout, testing having curves from different
# time-varying databases (same originating window and different originating
# window) in same curve window, and update the curves via the originating
# plots time-slider (bug '7002).
#
# Brad Whitlock, Wed Jan 14 16:12:10 PST 2009
# I changed the call to GetOutputArray. It's no longer a built-in function
# in the CLI.
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Cyrus Harrison, Thu Mar 25 09:57:34 PDT 2010
# Added call(s) to DrawPlots() b/c of changes to the default plot state
# behavior when an operator is added.
#
# Brad Whitlock, Tue Mar 26 12:06:51 PDT 2013
# I added TestOperatorCreatedVariables.
#
# ----------------------------------------------------------------------------
def GetOutputArray(plotID = -1, winID = -1):
gInfo = GetGlobalAttributes()
oldWin = gInfo.windows[gInfo.activeWindow]
# Set the active window
if winID != -1:
SetActiveWindow(winID)
# Get the active plots
active = []
if plotID != -1:
pL = GetPlotList()
for i in range(pL.GetNumPlots()):
if pL.GetPlots(i).activeFlag:
active = active + [i]
SetActivePlots(plotID)
pInfo = GetPlotInformation()
# Restore the old active plots
if len(active) > 0:
SetActivePlots(tuple(active))
# Restore the old active window
if winID != -1:
SetActiveWindow(oldWin)
return pInfo["Curve"]
def InitAnnotation():
a = AnnotationAttributes()
TurnOffAllAnnotations(a)
a.axes2D.visible = 1
a.axes2D.xAxis.label.visible = 0
a.axes2D.yAxis.label.visible = 0
a.axes2D.xAxis.title.visible = 0
a.axes2D.yAxis.title.visible = 0
SetAnnotationAttributes(a)
def TestLineout2D(time, suffix):
OpenDatabase(silo_data_path("curv2d.silo"))
AddPlot("Pseudocolor", "d")
DrawPlots()
# Set the colortable to one that has white at the bottom values.
SetActiveContinuousColorTable("calewhite")
pc = PseudocolorAttributes()
pc.colorTableName = "Default"
SetPlotOptions(pc)
# Create the variable list.
vars = ("default")
# Do some lineouts.
p0 = (-4.01261, 1.91818)
p1 = (-0.693968, 4.448759)
p2 = (4.144392, 1.713066)
nsteps = 15
for i in range(nsteps):
t = float(i) / float(nsteps - 1)
p3x = t * p2[0] + (1. - t) * p1[0]
p3y = t * p2[1] + (1. - t) * p1[1]
SetActiveWindow(1)
Lineout(p0, (p3x, p3y), vars)
if (time == 1):
SetActiveWindow(1)
Test("Lineout2d")
if (time == 2):
SetActiveWindow(1)
oa = GetOutputArray(4, 2)
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_04", s)
oa = GetOutputArray(8, 2)
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_08", s)
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFrom2d" + suffix)
if (time == 2):
oa = GetOutputArray(2)
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_02", s)
oa = GetOutputArray()
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_15", s)
# Delete the second window.
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestLineout3D(time, suffix):
OpenDatabase(silo_data_path("noise.silo"))
AddPlot("Pseudocolor", "hardyglobal")
DrawPlots()
# Set the view
v = View3DAttributes()
v.viewNormal = (-0.65577, 0.350079, 0.668888)
v.focus = (0, 0, 0)
v.viewUp = (0.218553, 0.936082, -0.275655)
v.viewAngle = 30
v.parallelScale = 17.3205
v.nearPlane = -34.641
v.farPlane = 34.641
v.perspective = 1
SetView3D(v)
# Do some lineouts
vars = ("default")
p0 = (-10., -10., -10.)
P = ((-10., -10., 10.), (-10., 10., -10.), (-10., 10., 10.),\
(10., -10., -10.), (10., -10., 10.), (10., 10., -10.), (10., 10., 10.))
for p in P:
SetActiveWindow(1)
Lineout(p0, p, vars)
if (time == 1):
SetActiveWindow(1)
pc = PseudocolorAttributes()
pc.colorTableName = "xray"
pc.SetOpacityType(pc.Constant)
pc.opacity = 0.5
SetPlotOptions(pc)
Test("Lineout3d")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFrom3d" + suffix)
# Delete the second window.
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestMultiVarLineout2D(time, suffix):
OpenDatabase(silo_data_path("curv2d.silo"))
AddPlot("Pseudocolor", "d")
DrawPlots()
# Do some lineouts
vars = ("p", "u", "v")
Y = (2, 3, 4)
x1 = -4.5
x2 = 4.5
for y in Y:
SetActiveWindow(1)
Lineout((x1, y), (x2, y), vars)
if (time == 1):
SetActiveWindow(1)
Test("MultiVarLineout2d")
SetActiveWindow(2)
InitAnnotation()
Test("MultiVarCurvesFrom2d" + suffix)
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestSAMRAI(time, suffix):
OpenDatabase(data_path("samrai_test_data/sil_changes/dumps.visit"))
AddPlot("Pseudocolor", "Primitive Var _number_0")
DrawPlots()
# Set the colortable to one that has white at the bottom values.
SetActiveContinuousColorTable("rainbow")
pc = PseudocolorAttributes()
pc.colorTableName = "Default"
SetPlotOptions(pc)
AddOperator("Slice", 1)
slice = SliceAttributes()
slice.originType = slice.Percent
slice.originPercent = 18
slice.axisType = slice.ZAxis
slice.project2d = 1
SetOperatorOptions(slice, 0, 1)
DrawPlots()
ResetView()
SetTimeSliderState(1)
#Do some lineouts
p0 = (3, 3)
p1 = (0, 20)
p2 = (30, 0)
nsteps = 15
for i in range(nsteps):
t = float(i) / float(nsteps - 1)
p3x = t * p2[0] + (1. - t) * p1[0]
p3y = t * p2[1] + (1. - t) * p1[1]
SetActiveWindow(1)
Lineout(p0, (p3x, p3y))
if (time == 1):
SetActiveWindow(1)
Test("LineoutSAMRAI")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFromSAMRAI" + suffix)
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestSpecifyLineoutWindow(time, suffix):
#window 1
OpenDatabase(data_path("pdb_test_data/dbA00.pdb"))
AddPlot("Pseudocolor", "mesh/ireg")
DrawPlots()
ResetView()
Lineout((0, 2.5), (5, 2.5))
if (time == 1):
SetActiveWindow(1)
InitAnnotation()
Test("LineoutSpecifyWindow_01")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFromSpecifyWindow_01" + suffix)
SetActiveWindow(1)
CloneWindow()
#window 3
SetTimeSliderState(4)
DrawPlots()
gla = GetGlobalLineoutAttributes()
gla.createWindow = 0
gla.windowId = 4
SetGlobalLineoutAttributes(gla)
Lineout((0, 2.5), (5, 2.5))
if (time == 1):
SetActiveWindow(3)
InitAnnotation()
Test("LineoutSpecifyWindow_02")
SetActiveWindow(4)
InitAnnotation()
Test("CurvesFromSpecifyWindow_02" + suffix)
DeleteWindow()
SetActiveWindow(3)
DeleteWindow()
SetActiveWindow(2)
DeleteWindow()
DeleteAllPlots()
gla.createWindow = 1
gla.windowId = 2
SetGlobalLineoutAttributes(gla)
ResetPickLetter()
ResetLineoutColor()
def TestDynamicLineout(time, suffix):
if (time == 1):
return
#window 1
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
ResetView()
Lineout((0, 0.5, 2.5), (10, 0.5, 2.5))
gla = GetGlobalLineoutAttributes()
gla.Dynamic = 1
gla.curveOption = gla.UpdateCurve
SetGlobalLineoutAttributes(gla)
SetActiveWindow(1)
t = 0
for i in range (10):
t += 5
SetTimeSliderState(t)
SetActiveWindow(2)
InitAnnotation()
ResetView()
Test("CurvesFromDynamic_01")
# go back to the beginning time state
# and have new curves created for each new time
SetActiveWindow(1)
t = 0
SetTimeSliderState(t)
gla.curveOption = gla.CreateCurve
SetGlobalLineoutAttributes(gla)
for i in range (7):
t += 5
SetTimeSliderState(t)
# now have each new curve have its own color.
gla.colorOption = gla.CreateColor
SetGlobalLineoutAttributes(gla)
for i in range (7):
t += 5
SetTimeSliderState(t)
SetActiveWindow(2)
InitAnnotation()
ResetView()
Test("CurvesFromDynamic_02")
ResetPickLetter()
ResetLineoutColor()
# delete window 2
DeleteWindow()
# clear all plots from window 1
DeleteAllPlots()
dbs = (data_path("pdb_test_data/dbA00.pdb"),
data_path("pdb_test_data/dbB00.pdb"),
data_path("pdb_test_data/dbC00.pdb"))
OpenDatabase(dbs[0])
AddPlot("Pseudocolor", "mesh/ireg")
OpenDatabase(dbs[1])
AddPlot("Pseudocolor", "mesh/ireg")
DrawPlots()
AddWindow()
SetActiveWindow(2)
DeleteAllPlots()
OpenDatabase(dbs[2])
AddPlot("Pseudocolor", "mesh/ireg")
DrawPlots()
gla.Dynamic = 1
gla.curveOption = gla.UpdateCurve
SetGlobalLineoutAttributes(gla)
#Lineout for dbC00.pdb in window 2
Lineout((5.0, 7.5, 0.), (10, 7.5, 0.))
SetActiveWindow(1)
SetActivePlots(1)
#Lineout for dbB00.pdb in window 1
Lineout((0, 8, 0), (5, 8, 0))
SetActivePlots(0)
#Lineout for dbA00.pdb in window 1
Lineout((0, 3, 0), (5, 3, 0))
SetActiveWindow(3)
InitAnnotation()
Test("CurvesFromDynamic_03")
SetActiveWindow(1)
SetActiveTimeSlider(dbs[1])
SetTimeSliderState(15)
SetActiveWindow(3)
Test("CurvesFromDynamic_04")
SetActiveWindow(1)
SetActiveTimeSlider(dbs[0])
SetTimeSliderState(3)
SetActiveWindow(3)
Test("CurvesFromDynamic_05")
SetActiveWindow(2)
SetTimeSliderState(29)
SetActiveWindow(3)
Test("CurvesFromDynamic_06")
ResetLineoutColor()
ResetPickLetter()
# delete window 3
DeleteWindow()
# delete window 2
SetActiveWindow(2)
DeleteWindow()
# clear all plots from window 1
DeleteAllPlots()
def TestDynamic2():
# VisIt00006006 -- ensure that 'ClearRefLines' will 'disconnect' the lineout
# from its originating plot, and won't update when orig plot changes time.
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
ResetView()
SetTimeSliderState(0)
Lineout((0, 0.5, 2.5), (10, 0.5, 2.5))
SetActiveWindow(2)
InitAnnotation()
Test("Dynamic2_01")
SetActiveWindow(1)
gla = GetGlobalLineoutAttributes()
gla.Dynamic = 1
gla.curveOption = gla.UpdateCurve
SetGlobalLineoutAttributes(gla)
SetTimeSliderState(27)
SetActiveWindow(2)
Test("Dynamic2_02")
SetActiveWindow(1)
gla.Dynamic = 0
SetGlobalLineoutAttributes(gla)
SetTimeSliderState(52)
SetActiveWindow(2)
Test("Dynamic2_03")
ResetPickLetter()
ResetLineoutColor()
DeleteWindow()
DeleteAllPlots()
def TestTecPlot():
# VisIt00006243 -- curve generated from Lineout looks reversed in X
OpenDatabase(data_path("tecplot_test_data/T3L3CLS17u.plt"))
AddPlot("Mesh", "mesh")
AddPlot("Pseudocolor", "k")
DrawPlots()
ResetView()
v = GetView2D()
v.windowCoords = (0.340063, 0.340868, 0.00512584, 0.00572613 )
SetView2D(v)
Lineout((0.340505, 0.00565604, 0), (0.340291, 0.00514717, 0))
InitAnnotation()
Test("LineoutTecPlot_01")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFromTecPlot_01")
ResetPickLetter()
ResetLineoutColor()
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
def TestOperatorCreatedVariables():
def SetCurveAtts():
c = CurveAttributes(1)
c.lineWidth = 2
c.curveColor = (255,0,0,255)
c.curveColorSource = c.Custom
c.showLabels = 0
SetPlotOptions(c)
TestSection("Operator-Created Variables")
OpenDatabase(silo_data_path("noise.silo"))
# Do lineout on a data binning variable.
AddPlot("Pseudocolor", "operators/DataBinning/2D/Mesh", 1, 1)
DataBinningAtts = DataBinningAttributes()
DataBinningAtts.numDimensions = DataBinningAtts.Two # One, Two, Three
DataBinningAtts.dim1BinBasedOn = DataBinningAtts.X # X, Y, Z, Variable
DataBinningAtts.dim1Var = "default"
DataBinningAtts.dim1SpecifyRange = 0
DataBinningAtts.dim1MinRange = 0
DataBinningAtts.dim1MaxRange = 1
DataBinningAtts.dim1NumBins = 50
DataBinningAtts.dim2BinBasedOn = DataBinningAtts.Y # X, Y, Z, Variable
DataBinningAtts.dim2Var = "default"
DataBinningAtts.dim2SpecifyRange = 0
DataBinningAtts.dim2MinRange = 0
DataBinningAtts.dim2MaxRange = 1
DataBinningAtts.dim2NumBins = 50
DataBinningAtts.dim3BinBasedOn = DataBinningAtts.Variable # X, Y, Z, Variable
DataBinningAtts.dim3Var = "default"
DataBinningAtts.dim3SpecifyRange = 0
DataBinningAtts.dim3MinRange = 0
DataBinningAtts.dim3MaxRange = 1
DataBinningAtts.dim3NumBins = 50
DataBinningAtts.outOfBoundsBehavior = DataBinningAtts.Clamp # Clamp, Discard
DataBinningAtts.reductionOperator = DataBinningAtts.Maximum # Average, Minimum, Maximum, StandardDeviation, Variance, Sum, Count, RMS, PDF
DataBinningAtts.varForReduction = "hardyglobal"
DataBinningAtts.emptyVal = 0
DataBinningAtts.outputType = DataBinningAtts.OutputOnBins # OutputOnBins, OutputOnInputMesh
DataBinningAtts.removeEmptyValFromCurve = 1
SetOperatorOptions(DataBinningAtts, 1)
DrawPlots()
Lineout((9, 9), (4.5, -9))
SetActiveWindow(1)
ResetView()
Test("lineout_op_vars_00")
SetActiveWindow(2)
InitAnnotation()
ResetView()
SetCurveAtts()
Test("lineout_op_vars_01")
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
# Do lineout on a data binning variable that had other operators
OpenDatabase(silo_data_path("noise.silo"))
AddPlot("Pseudocolor", "operators/DataBinning/2D/Mesh", 1, 1)
SetOperatorOptions(DataBinningAtts, 1)
AddOperator("Transform")
AddOperator("Project")
DrawPlots()
Lineout((9, 9), (4.5, -9))
SetActiveWindow(2)
InitAnnotation()
SetCurveAtts()
Test("lineout_op_vars_02")
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
def DoTests(t,s):
TestLineout2D(t,s)
TestLineout3D(t,s)
TestMultiVarLineout2D(t,s)
TestSAMRAI(t,s)
TestSpecifyLineoutWindow(t,s)
TestDynamicLineout(t,s)
def LineoutMain():
InitAnnotation()
la = GetGlobalLineoutAttributes()
la.samplingOn = 1
SetGlobalLineoutAttributes(la)
DoTests(1, "_withSampling")
la.samplingOn = 0
SetGlobalLineoutAttributes(la)
DoTests(2, "_noSampling")
TestDynamic2()
TestTecPlot()
TestOperatorCreatedVariables()
# Call the main function
LineoutMain()
Exit()
|
py | 1a431e2dcd46c750cc401843367692c00ce845ab | import unittest
from easypysa.easypysa import EasyPysa
class UnitTests(unittest.TestCase):
def test_can_load_executable(self):
easy = EasyPysa()
self.assertTrue(easy._check_executable() == "OK")
if __name__ == "__main__":
unittest.main()
|
py | 1a431f7cccc5a941fc631da67bede191140dc61b | # Copyright (c) OpenMMLab. All rights reserved.
import os
import warnings
from collections import OrderedDict
import json_tricks as json
import numpy as np
from mmcv import Config
from mmpose.core.evaluation.top_down_eval import keypoint_epe
from mmpose.datasets.builder import DATASETS
from ..base import Kpt3dSviewRgbImgTopDownDataset
@DATASETS.register_module()
class InterHand3DDataset(Kpt3dSviewRgbImgTopDownDataset):
"""InterHand2.6M 3D dataset for top-down hand pose estimation.
`InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose
Estimation from a Single RGB Image' Moon, Gyeongsik etal. ECCV'2020
More details can be found in the `paper
<https://arxiv.org/pdf/2008.09309.pdf>`__ .
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
InterHand2.6M keypoint indexes::
0: 'r_thumb4',
1: 'r_thumb3',
2: 'r_thumb2',
3: 'r_thumb1',
4: 'r_index4',
5: 'r_index3',
6: 'r_index2',
7: 'r_index1',
8: 'r_middle4',
9: 'r_middle3',
10: 'r_middle2',
11: 'r_middle1',
12: 'r_ring4',
13: 'r_ring3',
14: 'r_ring2',
15: 'r_ring1',
16: 'r_pinky4',
17: 'r_pinky3',
18: 'r_pinky2',
19: 'r_pinky1',
20: 'r_wrist',
21: 'l_thumb4',
22: 'l_thumb3',
23: 'l_thumb2',
24: 'l_thumb1',
25: 'l_index4',
26: 'l_index3',
27: 'l_index2',
28: 'l_index1',
29: 'l_middle4',
30: 'l_middle3',
31: 'l_middle2',
32: 'l_middle1',
33: 'l_ring4',
34: 'l_ring3',
35: 'l_ring2',
36: 'l_ring1',
37: 'l_pinky4',
38: 'l_pinky3',
39: 'l_pinky2',
40: 'l_pinky1',
41: 'l_wrist'
Args:
ann_file (str): Path to the annotation file.
camera_file (str): Path to the camera file.
joint_file (str): Path to the joint file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
use_gt_root_depth (bool): Using the ground truth depth of the wrist
or given depth from rootnet_result_file.
rootnet_result_file (str): Path to the wrist depth file.
dataset_info (DatasetInfo): A class containing all dataset info.
test_mode (str): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
ann_file,
camera_file,
joint_file,
img_prefix,
data_cfg,
pipeline,
use_gt_root_depth=True,
rootnet_result_file=None,
dataset_info=None,
test_mode=False):
if dataset_info is None:
warnings.warn(
'dataset_info is missing. '
'Check https://github.com/open-mmlab/mmpose/pull/663 '
'for details.', DeprecationWarning)
cfg = Config.fromfile('configs/_base_/datasets/interhand3d.py')
dataset_info = cfg._cfg_dict['dataset_info']
super().__init__(
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=dataset_info,
test_mode=test_mode)
self.ann_info['heatmap3d_depth_bound'] = data_cfg[
'heatmap3d_depth_bound']
self.ann_info['heatmap_size_root'] = data_cfg['heatmap_size_root']
self.ann_info['root_depth_bound'] = data_cfg['root_depth_bound']
self.ann_info['use_different_joint_weights'] = False
self.camera_file = camera_file
self.joint_file = joint_file
self.use_gt_root_depth = use_gt_root_depth
if not self.use_gt_root_depth:
assert rootnet_result_file is not None
self.rootnet_result_file = rootnet_result_file
self.db = self._get_db()
print(f'=> num_images: {self.num_images}')
print(f'=> load {len(self.db)} samples')
@staticmethod
def _encode_handtype(hand_type):
if hand_type == 'right':
return np.array([1, 0], dtype=np.float32)
elif hand_type == 'left':
return np.array([0, 1], dtype=np.float32)
elif hand_type == 'interacting':
return np.array([1, 1], dtype=np.float32)
else:
assert 0, f'Not support hand type: {hand_type}'
def _get_db(self):
"""Load dataset.
Adapted from 'https://github.com/facebookresearch/InterHand2.6M/'
'blob/master/data/InterHand2.6M/dataset.py'
Copyright (c) FaceBook Research, under CC-BY-NC 4.0 license.
"""
with open(self.camera_file, 'r') as f:
cameras = json.load(f)
with open(self.joint_file, 'r') as f:
joints = json.load(f)
if not self.use_gt_root_depth:
rootnet_result = {}
with open(self.rootnet_result_file, 'r') as f:
rootnet_annot = json.load(f)
for i in range(len(rootnet_annot)):
rootnet_result[str(
rootnet_annot[i]['annot_id'])] = rootnet_annot[i]
gt_db = []
bbox_id = 0
for img_id in self.img_ids:
num_joints = self.ann_info['num_joints']
ann_id = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)
ann = self.coco.loadAnns(ann_id)[0]
img = self.coco.loadImgs(img_id)[0]
capture_id = str(img['capture'])
camera_name = img['camera']
frame_idx = str(img['frame_idx'])
image_file = os.path.join(self.img_prefix, self.id2name[img_id])
camera_pos = np.array(
cameras[capture_id]['campos'][camera_name], dtype=np.float32)
camera_rot = np.array(
cameras[capture_id]['camrot'][camera_name], dtype=np.float32)
focal = np.array(
cameras[capture_id]['focal'][camera_name], dtype=np.float32)
principal_pt = np.array(
cameras[capture_id]['princpt'][camera_name], dtype=np.float32)
joint_world = np.array(
joints[capture_id][frame_idx]['world_coord'], dtype=np.float32)
joint_cam = self._world2cam(
joint_world.transpose(1, 0), camera_rot,
camera_pos.reshape(3, 1)).transpose(1, 0)
joint_img = self._cam2pixel(joint_cam, focal, principal_pt)[:, :2]
joint_valid = np.array(
ann['joint_valid'], dtype=np.float32).flatten()
hand_type = self._encode_handtype(ann['hand_type'])
hand_type_valid = ann['hand_type_valid']
if self.use_gt_root_depth:
bbox = np.array(ann['bbox'], dtype=np.float32)
# extend the bbox to include some context
center, scale = self._xywh2cs(*bbox, 1.25)
abs_depth = [joint_cam[20, 2], joint_cam[41, 2]]
else:
rootnet_ann_data = rootnet_result[str(ann_id[0])]
bbox = np.array(rootnet_ann_data['bbox'], dtype=np.float32)
# the bboxes have been extended
center, scale = self._xywh2cs(*bbox, 1.0)
abs_depth = rootnet_ann_data['abs_depth']
# 41: 'l_wrist', left hand root
# 20: 'r_wrist', right hand root
rel_root_depth = joint_cam[41, 2] - joint_cam[20, 2]
# if root is not valid, root-relative 3D depth is also invalid.
rel_root_valid = joint_valid[20] * joint_valid[41]
# if root is not valid -> root-relative 3D pose is also not valid.
# Therefore, mark all joints as invalid
joint_valid[:20] *= joint_valid[20]
joint_valid[21:] *= joint_valid[41]
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d[:, :2] = joint_img
joints_3d[:21, 2] = joint_cam[:21, 2] - joint_cam[20, 2]
joints_3d[21:, 2] = joint_cam[21:, 2] - joint_cam[41, 2]
joints_3d_visible[...] = np.minimum(1, joint_valid.reshape(-1, 1))
gt_db.append({
'image_file': image_file,
'center': center,
'scale': scale,
'rotation': 0,
'joints_3d': joints_3d,
'joints_3d_visible': joints_3d_visible,
'hand_type': hand_type,
'hand_type_valid': hand_type_valid,
'rel_root_depth': rel_root_depth,
'rel_root_valid': rel_root_valid,
'abs_depth': abs_depth,
'joints_cam': joint_cam,
'focal': focal,
'princpt': principal_pt,
'dataset': self.dataset_name,
'bbox': bbox,
'bbox_score': 1,
'bbox_id': bbox_id
})
bbox_id = bbox_id + 1
gt_db = sorted(gt_db, key=lambda x: x['bbox_id'])
return gt_db
def evaluate(self, outputs, res_folder, metric='MPJPE', **kwargs):
"""Evaluate interhand2d keypoint results. The pose prediction results
will be saved in `${res_folder}/result_keypoints.json`.
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
outputs (list(dict))
:preds (np.ndarray[N,K,3]): The first two dimensions are
coordinates, score is the third dimension of the array.
:hand_type (np.ndarray[N, 4]): The first two dimensions are
hand type, scores is the last two dimensions.
:rel_root_depth (np.ndarray[N]): The relative depth of left
wrist and right wrist.
:boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]
, scale[1],area, score]
:image_paths (list[str]): For example, ['Capture6/
0012_aokay_upright/cam410061/image4996.jpg']
:output_heatmap (np.ndarray[N, K, H, W]): model outpus.
res_folder (str): Path of directory to save the results.
metric (str | list[str]): Metric to be performed.
Options: 'MRRPE', 'MPJPE', 'Handedness_acc'.
Returns:
dict: Evaluation results for evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['MRRPE', 'MPJPE', 'Handedness_acc']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
res_file = os.path.join(res_folder, 'result_keypoints.json')
kpts = []
for output in outputs:
preds = output.get('preds')
if preds is None and 'MPJPE' in metrics:
raise KeyError('metric MPJPE is not supported')
hand_type = output.get('hand_type')
if hand_type is None and 'Handedness_acc' in metrics:
raise KeyError('metric Handedness_acc is not supported')
rel_root_depth = output.get('rel_root_depth')
if rel_root_depth is None and 'MRRPE' in metrics:
raise KeyError('metric MRRPE is not supported')
boxes = output['boxes']
image_paths = output['image_paths']
bbox_ids = output['bbox_ids']
batch_size = len(image_paths)
for i in range(batch_size):
image_id = self.name2id[image_paths[i][len(self.img_prefix):]]
kpt = {
'center': boxes[i][0:2].tolist(),
'scale': boxes[i][2:4].tolist(),
'area': float(boxes[i][4]),
'score': float(boxes[i][5]),
'image_id': image_id,
'bbox_id': bbox_ids[i]
}
if preds is not None:
kpt['keypoints'] = preds[i, :, :3].tolist()
if hand_type is not None:
kpt['hand_type'] = hand_type[i][0:2].tolist()
kpt['hand_type_score'] = hand_type[i][2:4].tolist()
if rel_root_depth is not None:
kpt['rel_root_depth'] = float(rel_root_depth[i])
kpts.append(kpt)
kpts = self._sort_and_unique_bboxes(kpts)
self._write_keypoint_results(kpts, res_file)
info_str = self._report_metric(res_file, metrics)
name_value = OrderedDict(info_str)
return name_value
@staticmethod
def _get_accuracy(outputs, gts, masks):
"""Get accuracy of multi-label classification.
Note:
batch_size: N
label_num: C
Args:
outputs (np.array[N, C]): predicted multi-label.
gts (np.array[N, C]): Groundtruth muti-label.
masks (np.array[N, ]): masked outputs will be ignored for
accuracy calculation.
Returns:
accuracy (float)
"""
acc = (outputs == gts).all(axis=1)
return np.mean(acc[masks])
def _report_metric(self, res_file, metrics):
"""Keypoint evaluation.
Args:
res_file (str): Json file stored prediction results.
metrics (str | list[str]): Metric to be performed.
Options: 'MRRPE', 'MPJPE', 'Handedness_acc'.
Returns:
List: Evaluation results for evaluation metric.
"""
info_str = []
with open(res_file, 'r') as fin:
preds = json.load(fin)
assert len(preds) == len(self.db)
gts_rel_root = []
preds_rel_root = []
rel_root_masks = []
gts_joint_coord_cam = []
preds_joint_coord_cam = []
single_masks = []
interacting_masks = []
all_masks = []
gts_hand_type = []
preds_hand_type = []
hand_type_masks = []
for pred, item in zip(preds, self.db):
# mrrpe
if 'MRRPE' in metrics:
if item['hand_type'].all() and item['joints_3d_visible'][
20, 0] and item['joints_3d_visible'][41, 0]:
rel_root_masks.append(True)
pred_left_root_img = np.array(
pred['keypoints'][41], dtype=np.float32)[None, :]
pred_left_root_img[:, 2] += item['abs_depth'][0] + pred[
'rel_root_depth']
pred_left_root_cam = self._pixel2cam(
pred_left_root_img, item['focal'], item['princpt'])
pred_right_root_img = np.array(
pred['keypoints'][20], dtype=np.float32)[None, :]
pred_right_root_img[:, 2] += item['abs_depth'][0]
pred_right_root_cam = self._pixel2cam(
pred_right_root_img, item['focal'], item['princpt'])
preds_rel_root.append(pred_left_root_cam -
pred_right_root_cam)
gts_rel_root.append(
[item['joints_cam'][41] - item['joints_cam'][20]])
else:
rel_root_masks.append(False)
preds_rel_root.append([[0., 0., 0.]])
gts_rel_root.append([[0., 0., 0.]])
if 'MPJPE' in metrics:
pred_joint_coord_img = np.array(
pred['keypoints'], dtype=np.float32)
gt_joint_coord_cam = item['joints_cam'].copy()
pred_joint_coord_img[:21, 2] += item['abs_depth'][0]
pred_joint_coord_img[21:, 2] += item['abs_depth'][1]
pred_joint_coord_cam = self._pixel2cam(pred_joint_coord_img,
item['focal'],
item['princpt'])
pred_joint_coord_cam[:21] -= pred_joint_coord_cam[20]
pred_joint_coord_cam[21:] -= pred_joint_coord_cam[41]
gt_joint_coord_cam[:21] -= gt_joint_coord_cam[20]
gt_joint_coord_cam[21:] -= gt_joint_coord_cam[41]
preds_joint_coord_cam.append(pred_joint_coord_cam)
gts_joint_coord_cam.append(gt_joint_coord_cam)
mask = (np.array(item['joints_3d_visible'])[:, 0]) > 0
if item['hand_type'].all():
single_masks.append(
np.zeros(self.ann_info['num_joints'], dtype=bool))
interacting_masks.append(mask)
all_masks.append(mask)
else:
single_masks.append(mask)
interacting_masks.append(
np.zeros(self.ann_info['num_joints'], dtype=bool))
all_masks.append(mask)
if 'Handedness_acc' in metrics:
pred_hand_type = np.array(pred['hand_type'], dtype=int)
preds_hand_type.append(pred_hand_type)
gts_hand_type.append(item['hand_type'])
hand_type_masks.append(item['hand_type_valid'] > 0)
gts_rel_root = np.array(gts_rel_root, dtype=np.float32)
preds_rel_root = np.array(preds_rel_root, dtype=np.float32)
rel_root_masks = np.array(rel_root_masks, dtype=bool)[:, None]
gts_joint_coord_cam = np.array(gts_joint_coord_cam, dtype=np.float32)
preds_joint_coord_cam = np.array(
preds_joint_coord_cam, dtype=np.float32)
single_masks = np.array(single_masks, dtype=bool)
interacting_masks = np.array(interacting_masks, dtype=bool)
all_masks = np.array(all_masks, dtype=bool)
gts_hand_type = np.array(gts_hand_type, dtype=int)
preds_hand_type = np.array(preds_hand_type, dtype=int)
hand_type_masks = np.array(hand_type_masks, dtype=bool)
if 'MRRPE' in metrics:
info_str.append(('MRRPE',
keypoint_epe(preds_rel_root, gts_rel_root,
rel_root_masks)))
if 'MPJPE' in metrics:
info_str.append(('MPJPE_all',
keypoint_epe(preds_joint_coord_cam,
gts_joint_coord_cam, all_masks)))
info_str.append(('MPJPE_single',
keypoint_epe(preds_joint_coord_cam,
gts_joint_coord_cam, single_masks)))
info_str.append(
('MPJPE_interacting',
keypoint_epe(preds_joint_coord_cam, gts_joint_coord_cam,
interacting_masks)))
if 'Handedness_acc' in metrics:
info_str.append(('Handedness_acc',
self._get_accuracy(preds_hand_type, gts_hand_type,
hand_type_masks)))
return info_str
|
py | 1a4320f1e1738d9ccf4441e76941de8cd3773288 | # Author: Niels Nuyttens <[email protected]>
#
# License: Apache Software License 2.0
"""Statistical drift calculation using `Kolmogorov-Smirnov` and `chi2-contingency` tests."""
from typing import Any, Dict, List, cast
import numpy as np
import pandas as pd
from scipy.stats import chi2_contingency, ks_2samp
from nannyml.chunk import Chunker
from nannyml.drift.base import DriftCalculator
from nannyml.drift.model_inputs.univariate.statistical.results import UnivariateDriftResult
from nannyml.exceptions import CalculatorNotFittedException, MissingMetadataException
from nannyml.metadata import BinaryClassificationMetadata, MulticlassClassificationMetadata, RegressionMetadata
from nannyml.metadata.base import NML_METADATA_COLUMNS, NML_METADATA_PARTITION_COLUMN_NAME, ModelMetadata
from nannyml.preprocessing import preprocess
ALERT_THRESHOLD_P_VALUE = 0.05
class UnivariateStatisticalDriftCalculator(DriftCalculator):
"""A drift calculator that relies on statistics to detect drift."""
def __init__(
self,
model_metadata: ModelMetadata,
features: List[str] = None,
chunk_size: int = None,
chunk_number: int = None,
chunk_period: str = None,
chunker: Chunker = None,
):
"""Constructs a new UnivariateStatisticalDriftCalculator.
Parameters
----------
model_metadata: ModelMetadata
Metadata for the model whose data is to be processed.
features: List[str], default=None
An optional list of feature names to use during drift calculation. None by default, in this case
all features are used during calculation.
chunk_size: int
Splits the data into chunks containing `chunks_size` observations.
Only one of `chunk_size`, `chunk_number` or `chunk_period` should be given.
chunk_number: int
Splits the data into `chunk_number` pieces.
Only one of `chunk_size`, `chunk_number` or `chunk_period` should be given.
chunk_period: str
Splits the data according to the given period.
Only one of `chunk_size`, `chunk_number` or `chunk_period` should be given.
chunker : Chunker
The `Chunker` used to split the data sets into a lists of chunks.
Examples
--------
>>> import nannyml as nml
>>> ref_df, ana_df, _ = nml.load_synthetic_binary_classification_dataset()
>>> metadata = nml.extract_metadata(ref_df)
>>> # Create a calculator that will chunk by week
>>> drift_calc = nml.UnivariateStatisticalDriftCalculator(model_metadata=metadata, chunk_period='W')
"""
super(UnivariateStatisticalDriftCalculator, self).__init__(
model_metadata, features, chunk_size, chunk_number, chunk_period, chunker
)
self.__prediction_column_names: List[str] = []
self.__predicted_probability_column_names: List[str] = []
# add continuous predictions or predicted probabilities from metadata to the selected features
if isinstance(model_metadata, BinaryClassificationMetadata):
if model_metadata.predicted_probability_column_name is None:
raise MissingMetadataException(
"missing value for 'predicted_probability_column_name'. "
"Please update your model metadata accordingly."
)
self.__prediction_column_names = []
self.__predicted_probabilities_column_names = [
cast(BinaryClassificationMetadata, self.model_metadata).predicted_probability_column_name
]
elif isinstance(model_metadata, MulticlassClassificationMetadata):
if model_metadata.predicted_probabilities_column_names is None:
raise MissingMetadataException(
"missing value for 'predicted_probability_column_name'. "
"Please update your model metadata accordingly."
)
md = cast(MulticlassClassificationMetadata, self.model_metadata)
self.__prediction_column_names = []
self.__predicted_probabilities_column_names = list(md.predicted_probabilities_column_names.values())
elif isinstance(model_metadata, RegressionMetadata):
if model_metadata.prediction_column_name is None:
raise MissingMetadataException(
"missing value for 'prediction_column_name'. " "Please update your model metadata accordingly."
)
self.__prediction_column_names = [model_metadata.prediction_column_name]
self.__predicted_probabilities_column_names = []
self.selected_features += self.__predicted_probabilities_column_names + self.__prediction_column_names
self._reference_data = None
def fit(self, reference_data: pd.DataFrame):
"""Fits the drift calculator using a set of reference data.
Parameters
----------
reference_data : pd.DataFrame
A reference data set containing predictions (labels and/or probabilities) and target values.
Returns
-------
calculator: DriftCalculator
The fitted calculator.
Examples
--------
>>> import nannyml as nml
>>> ref_df, ana_df, _ = nml.load_synthetic_binary_classification_dataset()
>>> metadata = nml.extract_metadata(ref_df, model_type=nml.ModelType.CLASSIFICATION_BINARY)
>>> # Create a calculator and fit it
>>> drift_calc = nml.UnivariateStatisticalDriftCalculator(model_metadata=metadata, chunk_period='W').fit(ref_df)
"""
reference_data = preprocess(data=reference_data, metadata=self.model_metadata, reference=True)
self._reference_data = reference_data.copy(deep=True)
return self
def calculate(
self,
data: pd.DataFrame,
) -> UnivariateDriftResult:
"""Calculates the data reconstruction drift for a given data set.
Parameters
----------
data : pd.DataFrame
The dataset to calculate the reconstruction drift for.
Returns
-------
reconstruction_drift: UnivariateDriftResult
A :class:`result<nannyml.drift.model_inputs.univariate.statistical.results.UnivariateDriftResult>`
object where each row represents a :class:`~nannyml.chunk.Chunk`,
containing :class:`~nannyml.chunk.Chunk` properties and the reconstruction_drift calculated
for that :class:`~nannyml.chunk.Chunk`.
Examples
--------
>>> import nannyml as nml
>>> ref_df, ana_df, _ = nml.load_synthetic_binary_classification_dataset()
>>> metadata = nml.extract_metadata(ref_df, model_type=nml.ModelType.CLASSIFICATION_BINARY)
>>> # Create a calculator and fit it
>>> drift_calc = nml.UnivariateStatisticalDriftCalculator(model_metadata=metadata, chunk_period='W').fit(ref_df)
>>> drift = drift_calc.calculate(data)
"""
data = preprocess(data=data, metadata=self.model_metadata)
# Get lists of categorical <-> categorical features
categorical_column_names = [f.column_name for f in self.model_metadata.categorical_features]
continuous_column_names = (
[f.column_name for f in self.model_metadata.continuous_features]
+ self.__predicted_probabilities_column_names
+ self.__prediction_column_names
)
features_and_metadata = NML_METADATA_COLUMNS + self.selected_features
chunks = self.chunker.split(data, columns=features_and_metadata, minimum_chunk_size=500)
chunk_drifts = []
# Calculate chunk-wise drift statistics.
# Append all into resulting DataFrame indexed by chunk key.
for chunk in chunks:
chunk_drift: Dict[str, Any] = {
'key': chunk.key,
'start_index': chunk.start_index,
'end_index': chunk.end_index,
'start_date': chunk.start_datetime,
'end_date': chunk.end_datetime,
'partition': 'analysis' if chunk.is_transition else chunk.partition,
}
present_categorical_column_names = list(set(chunk.data.columns) & set(categorical_column_names))
for column in present_categorical_column_names:
statistic, p_value, _, _ = chi2_contingency(
pd.concat(
[
self._reference_data[column].value_counts(), # type: ignore
chunk.data[column].value_counts(),
],
axis=1,
).fillna(0)
)
chunk_drift[f'{column}_chi2'] = statistic
chunk_drift[f'{column}_p_value'] = np.round(p_value, decimals=3)
chunk_drift[f'{column}_alert'] = (p_value < ALERT_THRESHOLD_P_VALUE) and (
chunk.data[NML_METADATA_PARTITION_COLUMN_NAME] == 'analysis'
).all()
chunk_drift[f'{column}_threshold'] = ALERT_THRESHOLD_P_VALUE
present_continuous_column_names = list(set(chunk.data.columns) & set(continuous_column_names))
for column in present_continuous_column_names:
statistic, p_value = ks_2samp(self._reference_data[column], chunk.data[column]) # type: ignore
chunk_drift[f'{column}_dstat'] = statistic
chunk_drift[f'{column}_p_value'] = np.round(p_value, decimals=3)
chunk_drift[f'{column}_alert'] = (p_value < ALERT_THRESHOLD_P_VALUE) and (
chunk.data[NML_METADATA_PARTITION_COLUMN_NAME] == 'analysis'
).all()
chunk_drift[f'{column}_threshold'] = ALERT_THRESHOLD_P_VALUE
chunk_drifts.append(chunk_drift)
res = pd.DataFrame.from_records(chunk_drifts)
res = res.reset_index(drop=True)
res.attrs['nml_drift_calculator'] = __name__
if self.chunker is None:
raise CalculatorNotFittedException(
'chunker has not been set. '
'Please ensure you run ``calculator.fit()`` '
'before running ``calculator.calculate()``'
)
return UnivariateDriftResult(analysis_data=chunks, drift_data=res, model_metadata=self.model_metadata)
|
py | 1a4321dab2b77828223d449f8dfdc609d50021a7 | import discord
from discord.ext import commands
class ServerUtils(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def message_from_link(self, link):
"""Returns a Discord message given a link to the message."""
split_link = link.split("/")
channel = self.bot.get_channel(int(split_link[-2]))
message = await self.bot.get_message(channel, int(split_link[-1]))
return message
async def add_star(self, message):
"""Adds an image to #night-sky for posterity."""
author = message.author
channel = self.bot.get_channel(483357571756064782)
description = message.clean_content
if len(message.clean_content) == 0 and len(message.embeds) > 0 and "description" in message.embeds[0]:
description += message.embeds[0]["description"]
embed = discord.Embed(
description = description,
timestamp = message.created_at,
colour = author.colour
)
if len(message.embeds) > 0 and message.embeds[0].type == "image":
print(message.embeds[0].url)
embed.set_image(url=message.embeds[0].url)
elif len(message.attachments) > 0:
embed.set_image(url=message.attachments[0].url)
url = "https://discordapp.com/channels/{0}/{1}/{2}".format(
str(message.guild.id),
str(message.channel.id),
str(message.id)
)
embed.set_author(name=author.name + " in #" + message.channel.name, url=url, icon_url=author.avatar_url)
await channel.send(embed = embed)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
"""Detects #night-sky add requests."""
if payload.emoji.name == "⭐":
message = await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id)
await self.add_star(message)
@commands.command()
async def star(self, message : message_from_link):
"""Manually adds to #night-sky given link."""
await self.add_star(message)
def setup(bot):
bot.add_cog(ServerUtils(bot))
|
py | 1a4322dea32d7d5a5ad40b32adc79d42d07d87a3 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import warnings
from abc import abstractmethod
from .base import maybe_requirement
from .compatibility import PY3, AbstractClass
if PY3:
import urllib.parse as urlparse
from urllib.parse import urljoin
else:
import urlparse
from urlparse import urljoin
class FetcherBase(AbstractClass):
"""
A fetcher takes a Requirement and tells us where to crawl to find it.
"""
@abstractmethod
def urls(self, req):
raise NotImplementedError
class Fetcher(FetcherBase):
def __init__(self, urls):
self._urls = urls
def urls(self, _):
return self._urls
def __eq__(self, other):
if not isinstance(other, Fetcher):
return False
return self._urls == other._urls
class PyPIFetcher(FetcherBase):
PYPI_BASE = 'https://pypi.python.org/simple/'
def __init__(self, pypi_base=PYPI_BASE, use_mirrors=False):
if use_mirrors:
warnings.warn('use_mirrors is now deprecated.')
if not pypi_base.endswith('/'):
pypi_base += '/'
pypi_url = urlparse.urlparse(pypi_base)
if not pypi_url.scheme:
self._pypi_base = 'http://' + pypi_base
else:
self._pypi_base = pypi_base
def urls(self, req):
req = maybe_requirement(req)
return [urljoin(self._pypi_base, '%s/' % req.project_name)]
def __eq__(self, other):
if not isinstance(other, PyPIFetcher):
return False
return self._pypi_base == other._pypi_base
def __repr__(self):
return 'PyPIFetcher(%r)' % self._pypi_base
|
py | 1a43238e3b837361ee5e0ef05f662e5fe68b101a | from qcor import qjit, qalloc
import qiskit
# Generate 3-qubit GHZ state with Qiskit
circ = qiskit.QuantumCircuit(3)
circ.h(0)
circ.cx(0, 1)
circ.cx(1, 2)
circ.measure_all()
# Creates a kernel parameterized on a qreg
qcor_kernel = qjit(circ)
# Allocate the qreg
q = qalloc(3)
# Convert to MLIR and print
mlir = qcor_kernel.mlir(q)
print(mlir)
# Convert to QIR and print
qir = qcor_kernel.qir(q)
print(qir)
from pyquil import Program
from pyquil.gates import CNOT, H, MEASURE
p = Program()
p += H(0)
p += CNOT(0, 1)
ro = p.declare('ro', 'BIT', 2)
p += MEASURE(0, ro[0])
p += MEASURE(1, ro[1])
# This requires rigetti/quilc docker image
qcor_kernel_pyquil = qjit(p)
r = qalloc(2)
# Convert to MLIR and print
mlir = qcor_kernel_pyquil.mlir(r)
print(mlir)
# Convert to QIR and print
qir = qcor_kernel_pyquil.qir(r)
print(qir) |
py | 1a43244ec371981c7cda4e47bfa173511bd44436 | import numpy as np
from .Composition import Composition
from morpheus.utils import debug_print
VERBOSITY = 1
class SequentialComposition(Composition):
def __init__(self):
super().__init__()
self.all_desc_ids = np.array([])
return
def predict(self, X, **kwargs):
n_rows, n_atts = X.shape
s_pred = np.zeros((n_rows, self.n_outputs_))
D = np.empty((n_rows, len(self.all_desc_ids))) # D is extended input matrix
idx_map = self._map_elements_idx(
self.desc_ids, self.all_desc_ids, return_array=True
)
X_idx, D_idx = idx_map[:, 0], idx_map[:, 1]
D[:, D_idx] = X[:, X_idx] # We fill up some entries of the D-matrix.
for e in self.estimators_:
idx_map = self._map_elements_idx(
e.desc_ids, self.all_desc_ids, return_array=True
)
d_idx = idx_map[:, 1]
e_outcome = self._predict_estimator_tidy(e, D[:, d_idx], **kwargs)
msg = """
e_outcome.shape: {}
""".format(
e_outcome.shape
)
debug_print(msg, V=VERBOSITY)
c_idx_map = self._map_elements_idx(
e.targ_ids, self.all_desc_ids, return_array=True
) # Map of connections
# If I predict one of the connections
if c_idx_map.size > 0:
c_idx_e, c_idx_s = c_idx_map[:, 0], c_idx_map[:, 1]
D[:, c_idx_s] = e_outcome[:, c_idx_e]
t_idx_map = self._map_elements_idx(
e.targ_ids, self.targ_ids, return_array=True
) # Map of targets
# If I predict one of the targets
if t_idx_map.size > 0:
msg = """
t_idx_map: {}
""".format(
t_idx_map
)
debug_print(msg, V=VERBOSITY)
t_idx_e, t_idx_s = t_idx_map[:, 0], t_idx_map[:, 1]
s_pred[:, t_idx_s] = e_outcome[:, t_idx_e]
if s_pred.shape[1] == 1:
return s_pred.ravel()
else:
return s_pred
def predict_numeric(self, X, **kwargs):
n_rows, n_atts = X.shape
s_numeric = np.zeros((n_rows, len(self.numeric_targ_ids)))
s_weights = [
t_weight
for t_idx, t_weight in enumerate(self.targ_weights)
if self.targ_types[t_idx] == "numeric"
]
D = np.empty((n_rows, len(self.all_desc_ids))) # D is extended input matrix
idx_map = self._map_elements_idx(
self.desc_ids, self.all_desc_ids, return_array=True
)
X_idx, D_idx = idx_map[:, 0], idx_map[:, 1]
D[:, D_idx] = X[:, X_idx] # We fill up some entries of the D-matrix.
for e in self.estimators_:
idx_map = self._map_elements_idx(
e.desc_ids, self.all_desc_ids, return_array=True
)
d_idx = idx_map[:, 1]
c_idx_map = self._map_elements_idx(
e.targ_ids, self.all_desc_ids, return_array=True
) # Map of connections
# If I predict one of the connections
if c_idx_map.size > 0:
e_outcome = self._predict_estimator_tidy(e, D[:, d_idx], **kwargs)
c_idx_e, c_idx_s = c_idx_map[:, 0], c_idx_map[:, 1]
D[:, c_idx_s] = e_outcome[:, c_idx_e]
t_idx_map = self._map_elements_idx(
e.targ_ids, self.numeric_targ_ids, return_array=True
) # Map of targets
# If I predict one of the targets
if t_idx_map.size > 0:
e_numeric = self._predict_numeric_estimator_tidy(
e, D[:, d_idx], **kwargs
)
s_numeric = self._add_numeric_estimator_outcomes(
e, e_numeric, s_numeric
)
# Normalize
s_numeric /= s_weights
if s_numeric.shape[1] == 1:
return s_numeric.ravel()
else:
return s_numeric
def predict_nominal(self, X, **kwargs):
n_rows, n_atts = X.shape
s_nominal = [np.zeros((n_rows, n_clas)) for n_clas in self.n_classes_]
s_weights = [
t_weight
for t_idx, t_weight in enumerate(self.targ_weights)
if self.targ_types[t_idx] == "nominal"
]
D = np.empty((n_rows, len(self.all_desc_ids))) # D is extended input matrix
idx_map = self._map_elements_idx(
self.desc_ids, self.all_desc_ids, return_array=True
)
X_idx, D_idx = idx_map[:, 0], idx_map[:, 1]
D[:, D_idx] = X[:, X_idx] # We fill up some entries of the D-matrix.
for e in self.estimators_:
idx_map = self._map_elements_idx(
e.desc_ids, self.all_desc_ids, return_array=True
)
d_idx = idx_map[:, 1]
c_idx_map = self._map_elements_idx(
e.targ_ids, self.all_desc_ids, return_array=True
) # Map of connections
# If I predict one of the connections
if c_idx_map.size > 0:
e_outcome = self._predict_estimator_tidy(e, D[:, d_idx], **kwargs)
c_idx_e, c_idx_s = c_idx_map[:, 0], c_idx_map[:, 1]
D[:, c_idx_s] = e_outcome[:, c_idx_e]
t_idx_map = self._map_elements_idx(
e.targ_ids, self.nominal_targ_ids, return_array=True
) # Map of targets
# If I predict one of the targets
if t_idx_map.size > 0:
e_nominal = self._predict_nominal_estimator_tidy(
e, D[:, d_idx], **kwargs
)
s_nominal = self._add_nominal_estimator_outcomes(
e, e_nominal, s_nominal
)
# Normalize
s_nominal = [
s_nominal[t_idx] / s_weights[t_idx]
for t_idx in range(len(self.nominal_targ_ids))
]
# redo sklearn convention from hell
if len(s_nominal) == 1:
return s_nominal[0]
else:
return s_nominal
# Add (i.e., incremental update)
def _add_estimator(self, e, location="out"):
def check_connection(model_a, model_b):
connecting_attributes = np.intersect1d(model_a.targ_ids, model_b.desc_ids)
msg = """
Connecting attributes: {}
""".format(
connecting_attributes
)
debug_print(msg, V=VERBOSITY)
return connecting_attributes.size > 0
if len(self.estimators_) == 0:
# No estimator yet, everything is OK.
self.estimators_.insert(0, e)
elif location in {"out", "output", "append", "back", "end"}:
msg = """
Trying to add a model to end of the chain.
Current chain targ_ids: {}
New estimator desc_ids: {}
""".format(
self.targ_ids, e.desc_ids
)
debug_print(msg, V=VERBOSITY)
if check_connection(self, e):
self.estimators_.append(e)
else:
msg = """
Failed to connect the new estimator to the existing chain.
Current chain has target attributes: {}
New estimator has descriptive attributes: {}
Since you decided to add this estimator to the end of the
current chain, there should be an overlap between the two
in order to connect them. This is not the case.
""".format(
self.targ_ids, e.desc_ids
)
raise ValueError(msg)
elif location in {"in", "input", "prepend", "front", "begin"}:
if check_connection(e, self):
self.estimators_.insert(0, e)
else:
msg = """
Failed to connect the new estimator to the existing chain.
New estimator has target attributes: {}
Current chain has descriptive attributes: {}
Since you decided to add this estimator to the beginning of the
current chain, there should be an overlap between the two
in order to connect them. This is not the case.
""".format(
e.desc_ids, self.targ_ids
)
raise ValueError(msg)
else:
msg = """
An estimator can only be added to a sequential composition if at
least one of its input attributes is an output attribute of the
current sequential composition so far.
Input attributes new estimator: {}
Output attributes current sequential composition: {}
""".format(
e.desc_ids, self.targ_ids
)
raise ValueError(msg)
return
def _add_ids_estimator(self, e):
conn_ids = np.intersect1d(self.targ_ids, e.desc_ids)
self.all_desc_ids = np.unique(np.concatenate((self.all_desc_ids, e.desc_ids)))
self.desc_ids = np.unique(np.concatenate((self.desc_ids, e.desc_ids)))
self.targ_ids = np.unique(np.concatenate((self.targ_ids, e.targ_ids)))
# Remove the connection ids
self.desc_ids = self.desc_ids[~np.in1d(self.desc_ids, conn_ids)]
self.targ_ids = self.targ_ids[~np.in1d(self.targ_ids, conn_ids)]
return
|
py | 1a4324d2dde06579fc2fd6e992a7c00eb9371eff | #!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
from dlab.meta_lib import *
from dlab.actions_lib import *
import boto3
import argparse
import sys
def stop_notebook(nb_tag_value, bucket_name, tag_name, ssh_user, key_path):
print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
try:
clusters_list = get_emr_list(nb_tag_value, 'Value')
if clusters_list:
for cluster_id in clusters_list:
client = boto3.client('emr')
cluster = client.describe_cluster(ClusterId=cluster_id)
cluster = cluster.get("Cluster")
emr_name = cluster.get('Name')
emr_version = cluster.get('ReleaseLabel')
s3_cleanup(bucket_name, emr_name, os.environ['edge_user_name'])
print("The bucket {} has been cleaned successfully".format(bucket_name))
terminate_emr(cluster_id)
print("The EMR cluster {} has been terminated successfully".format(emr_name))
remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version)
print("{} kernels have been removed from notebook successfully".format(emr_name))
else:
print("There are no EMR clusters to terminate.")
except:
sys.exit(1)
print("Stopping data engine cluster")
try:
cluster_list = []
master_ids = []
cluster_instances_list = get_ec2_list('dataengine_notebook_name', nb_tag_value)
for instance in cluster_instances_list:
for tag in instance.tags:
if tag['Key'] == 'Type' and tag['Value'] == 'master':
master_ids.append(instance.id)
for id in master_ids:
for tag in get_instance_attr(id, 'tags'):
if tag['Key'] == 'Name':
cluster_list.append(tag['Value'].replace(' ', '')[:-2])
stop_ec2('dataengine_notebook_name', nb_tag_value)
except:
sys.exit(1)
print("Stopping notebook")
try:
stop_ec2(tag_name, nb_tag_value)
except:
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
create_aws_config_files()
print('Generating infrastructure names and tags')
notebook_config = dict()
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['bucket_name'] = (notebook_config['service_base_name'] + '-ssn-bucket').lower().replace('_', '-')
notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
logging.info('[STOP NOTEBOOK]')
print('[STOP NOTEBOOK]')
try:
stop_notebook(notebook_config['notebook_name'], notebook_config['bucket_name'], notebook_config['tag_name'],
os.environ['conf_os_user'], notebook_config['key_path'])
except Exception as err:
append_result("Failed to stop notebook.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Tag_name": notebook_config['tag_name'],
"user_own_bucket_name": notebook_config['bucket_name'],
"Action": "Stop notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
py | 1a4325855bfc4a00ca6d37a2b893788c3130bb03 | # coding=utf-8
# !/usr/bin/env python
"""
:mod:"IKA_RET_Control_Visc" -- API for IKA RET Control Visc remote controllable hotplate stirrer
===================================
.. module:: IKA_RET_Control_Visc
:platform: Windows
:synopsis: Control IKA RET Control Visc hotplate stirrer.
.. moduleauthor:: Sebastian Steiner <[email protected]>
.. moduleauthor:: Stefan Glatzel <[email protected]>
(c) 2017 The Cronin Group, University of Glasgow
This provides a python class for the IKA RET Control Visc Hotplates
based on software developed by Stefan Glatzel.
The command implementation is based on the english manual:
English manual version: 20000004159, RET control-visc_112015
Pages 31 - 34, the german version, same file pages 15 - 18 appears
to contain more and better information.
For style guide used see http://xkcd.com/1513/
"""
# system imports
import re
import serial
import os
import inspect
import sys
HERE = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(os.path.join(HERE, '..'))
# additional module imports
from SerialDevice.serial_labware import SerialDevice, command
class IKARETControlVisc(SerialDevice):
"""
This provides a python class for the IKA RET Control Visc Hotplates
The command implementation is based on the english manual:
English manual version: 20000004159, RET control-visc_112015, Pages 31 - 34,
the german version, same file pages 15 - 18 appears to contain more and better information.
"""
def __init__(self, port=None, device_name=None, connect_on_instantiation=False, soft_fail_for_testing=False):
"""
Initializer of the IKARETControlVisc class.
Args:
port (str): The port name/number of the hotplate
device_name (str): A descriptive name for the device, used mainly in debug prints.
connect_on_instantiation (bool): (optional) determines if the connection is established on instantiation of
the class. Default: Off
soft_fail_for_testing (bool): (optional) determines if an invalid serial port raises an error or merely logs
a message. Default: Off
"""
super().__init__(port, device_name, soft_fail_for_testing)
# serial settings
self.baudrate = 9600
self.bytesize = serial.SEVENBITS
self.parity = serial.PARITY_EVEN
self.rtscts = True
self.write_delay = 0.1
self.read_delay = 0.1
# answer patterns
self.stranswer = re.compile("([0-9A-Z_]+)\r\n")
self.valueanswer = re.compile("(\d+\.\d+) (\d)\r\n")
self.wdanswer = re.compile("(\d+\.\d+)\r\n")
# other settings
self.IKA_default_name = "IKARET"
# DOCUMENTED COMMANDS for easier maintenance
self.GET_STIR_RATE_PV = "IN_PV_4"
self.GET_STIR_RATE_SP = "IN_SP_4"
self.SET_STIR_RATE_SP = "OUT_SP_4"
self.GET_TEMP_PV = "IN_PV_1"
self.GET_TEMP_SP = "IN_SP_1"
self.SET_TEMP_SP = "OUT_SP_1"
self.START_TEMP = "START_1"
self.STOP_TEMP = "STOP_1"
self.START_STIR = "START_4"
self.STOP_STIR = "STOP_4"
self.START_PH = "START_80"
self.STOP_PH = "STOP_80"
self.START_WEIGHING = "START_90"
self.STOP_WEIGHING = "STOP_90"
self.RESET = "RESET"
self.GET_NAME = "IN_NAME"
self.SET_NAME = "OUT_NAME"
self.GET_SOFTWARE_VERSION = "IN_SOFTWARE"
self.GET_MEDIUM_TEMPERATURE_SP = "IN_SP_7"
self.GET_HOT_PLATE_TEMPERATURE_PV = "IN_PV_2"
self.GET_HOT_PLATE_TEMPERATURE_SP = "IN_SP_2"
self.SET_HOT_PLATE_TEMPERATURE_SP = "OUT_SP_2"
self.GET_HOT_PLATE_SAFETY_TEMPERATURE_PV = "IN_PV_3"
self.GET_HOT_PLATE_SAFETY_TEMPERATURE_SP = "IN_SP_3"
self.GET_PH_PV = "IN_PV_80"
self.GET_WEIGHT_PV = "IN_PV_90"
self.launch_command_handler()
if connect_on_instantiation:
self.open_connection()
@property
@command
def stir_rate_pv(self):
"""
Reads the process variable (i.e. the current) stir rate
:return: call back to send_message with a request to return and check a value
"""
return self.send_message(self.GET_STIR_RATE_PV, True, self.valueanswer)
@property
@command
def stir_rate_sp(self):
"""
Reads the set point (target) for the stir rate
:return: call back to send_message with a request to return and check a value
"""
return self.send_message(self.GET_STIR_RATE_SP, True, self.valueanswer)
@stir_rate_sp.setter
@command
def stir_rate_sp(self, stir_rate=None):
"""
Sets the stirrer rate and return the set point from the hot plate so the user can verify that it was successful.
Args:
stir_rate (int): the target stir rate of the hot plate
Returns:
call back to get_stirrer_rate_set_point()
"""
try:
# type checking of the stir rate that the user provided
stir_rate = int(stir_rate)
except ValueError:
raise(ValueError("Error setting stir rate. Rate was not a valid integer \"{0}\"".format(stir_rate)))
self.logger.debug("Setting stir rate to {0} RPM...".format(stir_rate))
# actually sending the command
self.send_message("{0} {1}".format(self.SET_STIR_RATE_SP, stir_rate))
@property
@command
def temperature_pv(self):
# reading the process variable
return self.send_message(self.GET_TEMP_PV, True, self.valueanswer)
@property
@command
def temperature_sp(self):
return self.send_message(self.GET_TEMP_SP, True, self.valueanswer)
@temperature_sp.setter
@command
def temperature_sp(self, temperature=None):
"""
Sets the target temperature for sensor 1 (i.e. "medium temperature (external temperature sensor)"
Args:
temperature (float): the target temperature
"""
try:
temperature = float(temperature)
except ValueError:
raise(ValueError("Error setting temperature. Value was not a valid float \"{0}\"".format(temperature)))
self.logger.debug("Setting temperature setpoint to {0}°C...".format(temperature))
# actually sending the command
self.send_message("{0} {1}".format(self.SET_TEMP_SP, temperature))
@command
def start_heater(self):
self.logger.debug("Starting heater...")
return self.send_message(self.START_TEMP)
@command
def stop_heater(self):
self.logger.debug("Stopping heater...")
return self.send_message(self.STOP_TEMP)
@command
def start_stirrer(self):
self.logger.debug("Starting stirrer...")
return self.send_message(self.START_STIR)
@command
def stop_stirrer(self):
self.logger.debug("Stopping heater...")
return self.send_message(self.STOP_STIR)
@command
def start_ph_meter(self):
return self.send_message(self.START_PH)
@command
def stop_ph_meter(self):
return self.send_message(self.STOP_PH)
@command
def start_weighing(self):
return self.send_message(self.START_WEIGHING)
@command
def stop_weighing(self):
return self.send_message(self.STOP_WEIGHING)
@command
def reset_hot_plate(self):
return self.send_message(self.RESET)
@property
@command
def name(self):
"""
Returns the name of the hot plate
:return: call back to send_message with a request to return the name
"""
return self.send_message(self.GET_NAME, True)
@name.setter
@command
def name(self, name=None):
"""
Sets the name of the hotplate to "name". Resets to default (self.IKA_default_name) if no name is passed.
Warns that names longer than 6 characters get truncated upon restart of the hotplate.
Args:
name (str): the new name
"""
if name is None:
name = self.IKA_default_name
if len(name) > 6:
self.logger.debug("Warning name will be shortened to \"{}\" by the hot plate, after restart.".format(name[0:6]))
self.send_message("{0} {1}".format(self.SET_NAME, name))
@property
@command
def software_version(self):
"""
Returns the software version of the firmware
!!!WARNING!!! Despite being documented this does not seem to work as intended, it just returns an empty string
:return: (supposed to...) software version of the firmware
"""
return self.send_message(self.GET_SOFTWARE_VERSION, True)
@command
def set_watch_dog_temp(self):
# TODO handle echo!
pass
@command
def set_watch_dog_stir_rate(self):
# TODO handle echo!
pass
@command
def get_hot_plate_temp_current(self):
pass
@property
@command
def temperature_heat_transfer_medium_sp(self):
return self.send_message(self.GET_MEDIUM_TEMPERATURE_SP, True, self.valueanswer)
@property
@command
def temperature_hot_plate_pv(self):
return self.send_message(self.GET_HOT_PLATE_TEMPERATURE_PV, True, self.valueanswer)
@property
@command
def temperature_hot_plate_sp(self):
return self.send_message(self.GET_HOT_PLATE_TEMPERATURE_SP, True, self.valueanswer)
@temperature_hot_plate_sp.setter
@command
def temperature_hot_plate_sp(self, temperature):
"""
Sets the target temperature for sensor 2 (i.e. "hot plate temperature"
Args:
temperature (float): the target temperature
"""
try:
temperature = float(temperature)
except ValueError:
raise(ValueError("Error setting hot plate temperature. "
"Value was not a valid float \"{0}\"".format(temperature)
))
self.send_message("{0} {1}".format(self.SET_HOT_PLATE_TEMPERATURE_SP, temperature))
@property
@command
def temperature_hot_plate_safety_pv(self):
"""
This is a documented function and does return values, but I cannot figure out what it's supposed to be...
:return: excellent question...
"""
self.logger.debug("WARNING! Don't use temperature_hot_plate_safety_pv! (see docstring)")
return self.send_message(self.GET_HOT_PLATE_SAFETY_TEMPERATURE_PV, True, self.valueanswer)
@property
@command
def temperature_hot_plate_safety_sp(self):
"""
This returns the current safety temperature set point. There is no equivalent setter function (for obvious
safety reasons, it actually does not exist in the firmware)
:return: The current setting of the hot plate safety temperature
"""
return self.send_message(self.GET_HOT_PLATE_SAFETY_TEMPERATURE_SP, True, self.valueanswer)
@command
def get_viscosity_trend(self):
pass
@command
def get_ph(self):
return self.send_message(self.GET_PH_PV, True, self.valueanswer)
@command
def get_weight(self):
# only works with start weight, takes about 4 sec to calibrate
return self.send_message(self.GET_WEIGHT_PV, True, self.valueanswer)
if __name__ == '__main__':
hp = IKARETControlVisc(port="COM5", connect_on_instantiation=True)
hp.temperature_sp = 40 # setting temperature to 100 °C
print("temperature_pv {}".format(hp.temperature_pv))
hp.start_heater() # starting the heater
hp.stop_heater() # stopping heater
print("temperature_hot_plate_safety_pv {}".format(hp.temperature_hot_plate_pv))
print("temperature_hot_plate_safety_sp {}".format(hp.temperature_hot_plate_sp))
print("temperature_hot_plate_safety_pv {}".format(hp.temperature_hot_plate_safety_pv))
print("temperature_hot_plate_safety_sp {}".format(hp.temperature_hot_plate_safety_sp))
print("software_version {}".format(hp.software_version))
while True:
pass
|
py | 1a4326950583dbab76404a4988d087d98cf66be9 | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
def GetChromiumSrcDir():
return os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
def GetGpuTestDir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def AddDirToPathIfNeeded(*path_parts):
path = os.path.abspath(os.path.join(*path_parts))
if os.path.isdir(path) and path not in sys.path:
sys.path.append(path)
|
py | 1a4326e720534f428a6a16c05a7255476f1ff652 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for google_research.google_research.cold_posterior_bnn.core.statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from cold_posterior_bnn.core import statistics as stats
tfd = tfp.distributions
TOL = 1e-7
class StatisticsTest(parameterized.TestCase, tf.test.TestCase):
def test_classification_prob(self):
cprob = stats.ClassificationLogProb()
logits1 = tf.math.log([[0.3, 0.7], [0.6, 0.4]])
logits2 = tf.math.log([[0.2, 0.8], [0.5, 0.5]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
cprob.reset()
cprob.update(logits1)
cprob.update(logits2)
cprob.update(logits3)
log_prob = cprob.result()
self.assertAlmostEqual(math.log(0.3), float(log_prob[0, 0]), delta=TOL)
self.assertAlmostEqual(math.log(0.7), float(log_prob[0, 1]), delta=TOL)
self.assertAlmostEqual(math.log(0.5), float(log_prob[1, 0]), delta=TOL)
self.assertAlmostEqual(math.log(0.5), float(log_prob[1, 1]), delta=TOL)
def test_brier_score(self):
logits1 = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
logits2 = tf.math.log([[0.2, 0.8], [0.6, 0.4]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
brier = stats.BrierScore()
brier.reset()
brier.update(logits1, labels)
brier.update(logits2, labels)
brier.update(logits3, labels)
brier_score = brier.result()
brier_score_true_0 = 0.3*0.3 + 0.7*0.7 - 2.0*0.3
brier_score_true_1 = (1.3/3.0)**2.0 + (1.7/3.0)**2.0 - 2.0*(1.7/3.0)
self.assertAlmostEqual(float(brier_score[0]), brier_score_true_0, delta=TOL)
self.assertAlmostEqual(float(brier_score[1]), brier_score_true_1, delta=TOL)
def _generate_perfect_calibration_logits(self, nsamples, nclasses,
inv_temp=2.0):
"""Generate well distributed and well calibrated probabilities.
Args:
nsamples: int, >= 1, number of samples to generate.
nclasses: int, >= 2, number of classes.
inv_temp: float, >= 0.0, inverse temperature parameter.
Returns:
logits: Tensor, shape (nsamples, nclasses), tf.float32, unnormalized log
probabilities (logits) of the probabilistic predictions.
labels: Tensor, shape (nsamples,), tf.int32, the true class labels. Each
element is in the range 0,..,nclasses-1.
"""
logits = inv_temp*tf.random.normal((nsamples, nclasses))
logits = tf.math.log_softmax(logits)
py = tfp.distributions.Categorical(logits=logits)
labels = py.sample()
return logits, labels
def _generate_random_calibration_logits(self, nsamples, nclasses):
"""Generate well distributed and poorly calibrated probabilities.
Args:
nsamples: int, >= 1, number of samples to generate.
nclasses: int, >= 2, number of classes.
Returns:
logits: Tensor, shape (nsamples, nclasses), tf.float32, unnormalized log
probabilities (logits) of the probabilistic predictions.
labels: Tensor, shape (nsamples,), tf.int32, the true class labels. Each
element is in the range 0,..,nclasses-1.
"""
logits = 2.0*tf.random.normal((nsamples, nclasses))
logits = tf.math.log_softmax(logits)
py = tfp.distributions.Categorical(logits=logits)
labels = py.sample()
logits_other = 2.0*tf.random.normal((nsamples, nclasses))
logits_other = tf.math.log_softmax(logits_other)
return logits_other, labels
@parameterized.parameters(
(5, 3, 50000), (10, 5, 50000)
)
def test_ece_calibrated(self, num_bins, nclasses, nsamples):
logits, labels = self._generate_perfect_calibration_logits(
nsamples, nclasses)
ece_stat = stats.ECE(num_bins)
ece_stat.reset()
ece_stat.update(logits, labels)
ece = float(ece_stat.result())
ece_tolerance = 0.01
self.assertLess(ece, ece_tolerance, msg="ECE %.5f > %.2f for perfectly "
"calibrated logits" % (ece, ece_tolerance))
@parameterized.parameters(
(True, 3, 50000), (True, 5, 50000), (True, 10, 50000),
(False, 3, 50000), (False, 5, 50000), (False, 10, 50000),
)
def test_brier_decomposition(self, well_calib, nclasses, nsamples):
"""Recompose the Brier decomposition and compare it to the Brier score."""
if well_calib:
logits, labels = self._generate_perfect_calibration_logits(
nsamples, nclasses, inv_temp=0.25)
else:
logits, labels = self._generate_random_calibration_logits(
nsamples, nclasses)
score = stats.BrierScore()
uncert = stats.BrierUncertainty()
resol = stats.BrierResolution()
reliab = stats.BrierReliability()
for stat in [score, uncert, resol, reliab]:
stat.reset()
stat.update(logits, labels)
score = float(tf.reduce_mean(score.result()))
uncert = float(uncert.result())
resol = float(resol.result())
reliab = float(reliab.result())
self.assertGreaterEqual(resol, 0.0, "Brier resolution is negative, this "
"should not happen.")
self.assertGreaterEqual(reliab, 0.0, "Brier reliability is negative, this "
"should not happen.")
score_from_decomposition = uncert - resol + reliab
if well_calib:
calib_str = "calibrated"
else:
calib_str = "uncalibrated"
logging.info("Brier decomposition (%s) (n=%d, K=%d), "
"%.5f = %.5f - %.5f + %.5f (%.5f, diff %.5f)",
calib_str, nsamples, nclasses, score, uncert, resol, reliab,
score_from_decomposition, score - score_from_decomposition)
self.assertAlmostEqual(score, score_from_decomposition, delta=0.025,
msg="Brier decomposition sums to %.5f which "
"deviates from Brier score %.5f" % (
score_from_decomposition, score))
@parameterized.parameters(
(3, 50000), (5, 50000)
)
def test_brierreliab_poorly_calibrated(self, nclasses, nsamples):
logits, labels = self._generate_random_calibration_logits(
nsamples, nclasses)
brierreliab_stat = stats.BrierReliability()
brierreliab_stat.reset()
brierreliab_stat.update(logits, labels)
reliab = float(brierreliab_stat.result())
reliab_lower = 0.2
self.assertGreater(reliab, reliab_lower,
msg="Brier reliability %.5f < %.2f for random "
"logits" % (reliab, reliab_lower))
@parameterized.parameters(
(3, 50000), (5, 50000)
)
def test_brierreliab_calibrated(self, nclasses, nsamples):
logits, labels = self._generate_perfect_calibration_logits(
nsamples, nclasses)
brierreliab_stat = stats.BrierReliability()
brierreliab_stat.reset()
brierreliab_stat.update(logits, labels)
reliab = float(brierreliab_stat.result())
reliab_tolerance = 0.1
self.assertLess(reliab, reliab_tolerance,
msg="Brier reliability %.5f > %.2f for perfectly "
"calibrated logits" % (reliab, reliab_tolerance))
@parameterized.parameters(
(5, 3, 50000), (10, 5, 50000)
)
def test_ece_poorly_calibrated(self, num_bins, nclasses, nsamples):
logits, labels = self._generate_random_calibration_logits(
nsamples, nclasses)
ece_stat = stats.ECE(num_bins)
ece_stat.reset()
ece_stat.update(logits, labels)
ece = float(ece_stat.result())
ece_lower = 0.2
self.assertGreater(ece, ece_lower, msg="ECE %.5f < %.2f for random "
"logits" % (ece, ece_lower))
def test_standarddeviation(self):
logits = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
caccuracy = stats.Accuracy()
caccuracy.reset()
caccuracy.update(logits, labels)
accuracy = caccuracy.result()
self.assertEqual(0.0, float(accuracy[0]))
self.assertEqual(1.0, float(accuracy[1]))
accstddev = stats.StandardDeviation(stats.Accuracy())
accstddev.reset()
accstddev.update(logits, labels)
stddev = accstddev.result()
self.assertAlmostEqual(0.5*math.sqrt(2.0), float(stddev), delta=TOL)
def test_standarderror(self):
logits = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
accsem = stats.StandardError(stats.Accuracy())
accsem.reset()
accsem.update(logits, labels)
sem = accsem.result()
self.assertAlmostEqual(0.5, float(sem), delta=TOL)
def test_classification_accuracy(self):
logits1 = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
logits2 = tf.math.log([[0.2, 0.8], [0.6, 0.4]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
caccuracy = stats.Accuracy()
caccuracy.reset()
caccuracy.update(logits1, labels)
caccuracy.update(logits2, labels)
caccuracy.update(logits3, labels)
accuracy = caccuracy.result()
self.assertEqual(0.0, float(accuracy[0]))
self.assertEqual(1.0, float(accuracy[1]))
gaccuracy = stats.GibbsAccuracy()
gaccuracy.reset()
gaccuracy.update(logits1, labels)
gaccuracy.update(logits2, labels)
gaccuracy.update(logits3, labels)
accuracy = gaccuracy.result()
self.assertEqual(0.0, float(accuracy[0]))
self.assertAlmostEqual(0.666666667, float(accuracy[1]), delta=TOL)
def test_classification_ce(self):
cce = stats.ClassificationCrossEntropy()
logits1 = tf.math.log([[0.3, 0.7], [0.6, 0.4]])
logits2 = tf.math.log([[0.2, 0.8], [0.5, 0.5]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
labels = tf.convert_to_tensor([1, 0], dtype=tf.int32)
cce.reset()
cce.update(logits1, labels)
cce.update(logits2, labels)
cce.update(logits3, labels)
ce = cce.result()
self.assertAlmostEqual(-math.log(0.7), float(ce[0]), delta=TOL)
self.assertAlmostEqual(-math.log(0.5), float(ce[1]), delta=TOL)
ces = []
gce = stats.ClassificationGibbsCrossEntropy()
gce.reset()
for logits in [logits1, logits2, logits3]:
cce.reset()
cce.update(logits, labels)
ces.append(cce.result())
gce.update(logits, labels)
self.assertAllClose(
tf.reduce_mean(tf.stack(ces, axis=0), axis=0),
gce.result(),
atol=TOL,
msg="Gibbs cross entropy does not match mean CE.")
REGRESSION_MODEL_OUTPUT_TYPES = ["tensors", "dists"]
def NewRegressionModelOutputs(tensor_model_outputs, model_output_type="tensors",
outputs_with_log_stddevs=False, stddev=1.0):
model_outputs = None
if model_output_type == "tensors":
model_outputs = tensor_model_outputs
elif model_output_type == "dists":
if outputs_with_log_stddevs:
n_targets = tensor_model_outputs.shape[-1] // 2
model_outputs = tfd.Normal(tensor_model_outputs[:, :, :n_targets],
tf.exp(tensor_model_outputs[:, :, n_targets:]))
else:
model_outputs = tfd.Normal(tensor_model_outputs, stddev)
else:
raise Exception("Unknown model_output_type: {}".format(model_output_type))
return model_outputs
class RegressionOutputsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_outputs_only_means_1d(self, model_output_type):
tensor_model_outputs = tf.constant([
[[0.3], [0.6]], # Member 0, Example 0 and 1
[[0.2], [0.5]], # Member 1, Example 0 and 1
[[0.4], [0.4]], # Member 2, Example 0 and 1
])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type)
ens_reg_outputs = stats.RegressionOutputs()
ens_reg_outputs.update(model_outputs[0])
ens_reg_outputs.update(model_outputs[1])
ens_reg_outputs.update(model_outputs[2])
means, variances = ens_reg_outputs.result()
self.assertAlmostEqual(0.3, float(means[0][0]), delta=TOL)
self.assertAlmostEqual(0.5, float(means[1][0]), delta=TOL)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.3, 0.2, 0.4],
stddevs=[1.0, 1.0, 1.0])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.6, 0.5, 0.4],
stddevs=[1.0, 1.0, 1.0])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][0]), delta=1e-5)
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_outputs_only_means_2d_diff_stddev(self,
model_output_type):
tensor_model_outputs = tf.constant([
[[0.3, 0.4], [1.6, 0.6]], # Member 0, Example 0 and 1
[[0.2, 0.2], [0.8, 0.5]], # Member 1, Example 0 and 1
[[0.4, 0.6], [2.4, 0.4]], # Member 2, Example 0 and 1
])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type,
stddev=0.1)
ens_reg_outputs = stats.RegressionOutputs(stddev=0.1)
ens_reg_outputs.update(model_outputs[0])
ens_reg_outputs.update(model_outputs[1])
ens_reg_outputs.update(model_outputs[2])
means, variances = ens_reg_outputs.result()
self.assertAlmostEqual(0.3, float(means[0][0]), delta=TOL)
self.assertAlmostEqual(0.4, float(means[0][1]), delta=TOL)
self.assertAlmostEqual(1.6, float(means[1][0]), delta=TOL)
self.assertAlmostEqual(0.5, float(means[1][1]), delta=TOL)
# Expected mixture, does not have to use normal distributions
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.3, 0.2, 0.4],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.4, 0.2, 0.6],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][1]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[1.6, 0.8, 2.4],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.6, 0.5, 0.4],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][1]), delta=1e-5)
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_outputs_means_and_variances_2d(self, model_output_type):
tensor_model_outputs = tf.constant([
[ # member 0 tensor_model_outputs
[0.3, 0.4, np.log(0.01), np.log(0.02)], # Example 0
[1.6, 0.6, np.log(2.0), np.log(0.01)], # Example 1
],
[ # member 1 tensor_model_outputs
[0.2, 0.2, np.log(0.1), np.log(0.2)], # Example 0
[0.8, 0.5, np.log(0.5), np.log(0.2)], # Example 1
],
[ # member 2 tensor_model_outputs
[0.4, 0.6, np.log(1.0), np.log(1.5)], # Example 0
[2.4, 0.4, np.log(0.05), np.log(0.1)], # Example 1
]
])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type,
outputs_with_log_stddevs=True)
ens_reg_outputs = stats.RegressionOutputs(outputs_with_log_stddevs=True)
ens_reg_outputs.update(model_outputs[0]) # Member 0 outputs
ens_reg_outputs.update(model_outputs[1]) # Member 1 outputs
ens_reg_outputs.update(model_outputs[2]) # Member 2 outputs
means, variances = ens_reg_outputs.result()
self.assertAlmostEqual(0.3, float(means[0][0]), delta=TOL)
self.assertAlmostEqual(0.4, float(means[0][1]), delta=TOL)
self.assertAlmostEqual(1.6, float(means[1][0]), delta=TOL)
self.assertAlmostEqual(0.5, float(means[1][1]), delta=TOL)
# Expected mixture, does not have to use normal distributions
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.3, 0.2, 0.4],
stddevs=[0.01, 0.1, 1.0])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.4, 0.2, 0.6],
stddevs=[0.02, 0.2, 1.5])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][1]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[1.6, 0.8, 2.4],
stddevs=[2.0, 0.5, 0.05])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.6, 0.5, 0.4],
stddevs=[0.01, 0.2, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][1]), delta=1e-5)
@staticmethod
def _get_mixture_variance(probs, means, stddevs):
assert len(probs) == len(means) == len(stddevs)
n = len(probs)
components = []
for i in range(n):
components.append(tfd.Normal(loc=means[i], scale=stddevs[i]))
mixture = tfd.Mixture(
cat=tfd.Categorical(probs=probs), components=components)
variance = mixture.variance()
return variance
class RegressionNormalLogProbTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_normal_log_prob_means_and_stddevs_2d(self,
model_output_type):
tensor_model_outputs = tf.constant([
[[0.3, 0.4, np.log(0.01), np.log(0.02)],
[1.6, 0.6, np.log(2.0), np.log(0.01)]],
[[0.2, 0.2, np.log(0.1), np.log(0.2)],
[0.8, 0.5, np.log(0.5), np.log(0.2)]],
[[0.4, 0.6, np.log(1.0), np.log(1.5)],
[2.4, 0.4, np.log(0.05), np.log(0.1)]],
])
labels = tf.constant([[0.2, 0.4], [1.4, 1.0]])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type,
outputs_with_log_stddevs=True)
ens_reg_outputs = stats.RegressionOutputs(outputs_with_log_stddevs=True)
ens_reg_outputs.update(model_outputs[0])
ens_reg_outputs.update(model_outputs[1])
ens_reg_outputs.update(model_outputs[2])
means, variances = ens_reg_outputs.result()
expected_nll = -tfd.Normal(means, variances**0.5).log_prob(labels)
rnlls = stats.RegressionNormalLogProb(outputs_with_log_stddevs=True)
rnlls.update(model_outputs[0], labels)
rnlls.update(model_outputs[1], labels)
rnlls.update(model_outputs[2], labels)
nlls = rnlls.result()
self.assertAllClose(expected_nll, nlls, atol=TOL)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
py | 1a43281e9fdca7af7efb2a4e74e82b1513bbd962 | from diffusion.model import energy
print(energy([5, 6, 7, 8, 0, 1]))
|
py | 1a4328ac9c9bf973b68eff9cfc06247a54f76afd | import biom
import pandas as pd
import numpy as np
import tensorflow as tf
from skbio import OrdinationResults
from qiime2.plugin import Metadata
from mmvec.multimodal import MMvec
from mmvec.util import split_tables
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import svds
def paired_omics(microbes: biom.Table,
metabolites: biom.Table,
metadata: Metadata = None,
training_column: str = None,
num_testing_examples: int = 5,
min_feature_count: int = 10,
epochs: int = 100,
batch_size: int = 50,
latent_dim: int = 3,
input_prior: float = 1,
output_prior: float = 1,
learning_rate: float = 1e-5,
summary_interval: int = 60) -> (
pd.DataFrame, OrdinationResults
):
if metadata is not None:
metadata = metadata.to_dataframe()
# Note: there are a couple of biom -> pandas conversions taking
# place here. This is currently done on purpose, since we
# haven't figured out how to handle sparse matrix multiplication
# in the context of this algorithm. That is a future consideration.
res = split_tables(
microbes, metabolites,
metadata=metadata, training_column=training_column,
num_test=num_testing_examples,
min_samples=min_feature_count)
(train_microbes_df, test_microbes_df,
train_metabolites_df, test_metabolites_df) = res
train_microbes_coo = coo_matrix(train_microbes_df.values)
test_microbes_coo = coo_matrix(test_microbes_df.values)
with tf.Graph().as_default(), tf.Session() as session:
model = MMvec(
latent_dim=latent_dim,
u_scale=input_prior, v_scale=output_prior,
learning_rate=learning_rate)
model(session,
train_microbes_coo, train_metabolites_df.values,
test_microbes_coo, test_metabolites_df.values)
loss, cv = model.fit(epoch=epochs, summary_interval=summary_interval)
ranks = pd.DataFrame(model.ranks(), index=train_microbes_df.columns,
columns=train_metabolites_df.columns)
u, s, v = svds(ranks - ranks.mean(axis=0), k=latent_dim)
ranks = ranks.T
ranks.index.name = 'featureid'
s = s[::-1]
u = u[:, ::-1]
v = v[::-1, :]
microbe_embed = u @ np.diag(s)
metabolite_embed = v.T
pc_ids = ['PC%d' % i for i in range(microbe_embed.shape[1])]
features = pd.DataFrame(
microbe_embed, columns=pc_ids,
index=train_microbes_df.columns)
samples = pd.DataFrame(
metabolite_embed, columns=pc_ids,
index=train_metabolites_df.columns)
short_method_name = 'mmvec biplot'
long_method_name = 'Multiomics mmvec biplot'
eigvals = pd.Series(s, index=pc_ids)
proportion_explained = pd.Series(s**2 / np.sum(s**2), index=pc_ids)
biplot = OrdinationResults(
short_method_name, long_method_name, eigvals,
samples=samples, features=features,
proportion_explained=proportion_explained)
return ranks, biplot
|
py | 1a432d2a501f49acdd528577aa67bc4a0ec44f76 | # coding=utf-8
# Copyright (c) DIRECT Contributors
"""Tests for the direct.common.subsample module"""
# Code and comments can be shared with code of FastMRI under the same MIT license:
# https://github.com/facebookresearch/fastMRI/
# The code has been adjusted to our needs.
import numpy as np
import pytest
import torch
from direct.common.subsample import FastMRIRandomMaskFunc, RadialMaskFunc, SpiralMaskFunc
@pytest.mark.parametrize(
"center_fracs, accelerations, batch_size, dim",
[
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
],
)
def test_fastmri_random_mask_reuse(center_fracs, accelerations, batch_size, dim):
mask_func = FastMRIRandomMaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask1 = mask_func(shape, seed=123)
mask2 = mask_func(shape, seed=123)
mask3 = mask_func(shape, seed=123)
assert torch.all(mask1 == mask2)
assert torch.all(mask2 == mask3)
@pytest.mark.parametrize(
"center_fracs, accelerations, batch_size, dim",
[
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
],
)
def test_fastmri_random_mask_low_freqs(center_fracs, accelerations, batch_size, dim):
mask_func = FastMRIRandomMaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask = mask_func(shape, seed=123)
mask_shape = [1] * (len(shape) + 1)
mask_shape[-2] = dim
mask_shape[-3] = dim
assert list(mask.shape) == mask_shape
num_low_freqs_matched = False
for center_frac in center_fracs:
num_low_freqs = int(round(dim * center_frac))
pad = (dim - num_low_freqs + 1) // 2
if np.all(mask[pad : pad + num_low_freqs].numpy() == 1):
num_low_freqs_matched = True
assert num_low_freqs_matched
@pytest.mark.parametrize(
"shape, center_fractions, accelerations",
[
([4, 32, 32, 2], [0.08], [4]),
([2, 64, 64, 2], [0.04, 0.08], [8, 4]),
],
)
def test_apply_mask_fastmri(shape, center_fractions, accelerations):
mask_func = FastMRIRandomMaskFunc(
center_fractions=center_fractions,
accelerations=accelerations,
uniform_range=False,
)
mask = mask_func(shape[1:], seed=123)
acs_mask = mask_func(shape[1:], seed=123, return_acs=True)
expected_mask_shape = (1, shape[1], shape[2], 1)
assert mask.max() == 1
assert mask.min() == 0
assert mask.shape == expected_mask_shape
assert np.allclose(mask & acs_mask, acs_mask)
@pytest.mark.parametrize(
"shape, center_fractions, accelerations",
[
([4, 32, 32, 2], [0.08], [4]),
([2, 64, 64, 2], [0.04, 0.08], [8, 4]),
],
)
def test_same_across_volumes_mask_fastmri(shape, center_fractions, accelerations):
mask_func = FastMRIRandomMaskFunc(
center_fractions=center_fractions,
accelerations=accelerations,
uniform_range=False,
)
num_slices = shape[0]
masks = [mask_func(shape[1:], seed=123) for _ in range(num_slices)]
assert all(np.allclose(masks[_], masks[_ + 1]) for _ in range(num_slices - 1))
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_apply_mask_radial(shape, accelerations):
mask_func = RadialMaskFunc(
accelerations=accelerations,
)
mask = mask_func(shape[1:], seed=123)
acs_mask = mask_func(shape[1:], seed=123, return_acs=True)
expected_mask_shape = (1, shape[1], shape[2], 1)
assert mask.max() == 1
assert mask.min() == 0
assert mask.shape == expected_mask_shape
assert np.allclose(mask & acs_mask, acs_mask)
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_same_across_volumes_mask_radial(shape, accelerations):
mask_func = RadialMaskFunc(
accelerations=accelerations,
)
num_slices = shape[0]
masks = [mask_func(shape[1:], seed=123) for _ in range(num_slices)]
assert all(np.allclose(masks[_], masks[_ + 1]) for _ in range(num_slices - 1))
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_apply_mask_spiral(shape, accelerations):
mask_func = SpiralMaskFunc(
accelerations=accelerations,
)
mask = mask_func(shape[1:], seed=123)
acs_mask = mask_func(shape[1:], seed=123, return_acs=True)
expected_mask_shape = (1, shape[1], shape[2], 1)
assert mask.max() == 1
assert mask.min() == 0
assert mask.shape == expected_mask_shape
assert np.allclose(mask & acs_mask, acs_mask)
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_same_across_volumes_mask_spiral(shape, accelerations):
mask_func = SpiralMaskFunc(
accelerations=accelerations,
)
num_slices = shape[0]
masks = [mask_func(shape[1:], seed=123) for _ in range(num_slices)]
assert all(np.allclose(masks[_], masks[_ + 1]) for _ in range(num_slices - 1))
|
py | 1a432d87fe77eb1044ad5655e5199368d7b35208 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020-2022 Francesco Di Lauro. All Rights Reserved.
See Licence file for details.
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import sys
sys.path.append('../')
from Likelihood import log_likelihood_models
from PDE_solver import SIR_PDEroutine
from Selkealgo import Sellke_algo
#This is a fully workout example that uses all the methods of this repo
#It is divided into three parts:
# 1) Generate some Data according to some parameters
# 2) Solve the PDE with the same parameters to compare it to data
# 3) Use the likelihood to infer the parameters from the data and plot them
#These three bits are independent, so if one is interested only in solving the
#PDE, they can copy and adapt the relative bit on a new code
if __name__ == "__main__":
np.random.seed(3) #Initialize random seed for reproducibility
#1) Generate some data. In this example, we choose gamma distribution for
#infectious period and exponential distribution for contact intervals
N=10000
T_f = 150
beta = 0.2 #infectiousness
mean = 9 #mean infectious period
variance =6 #variance of infectious period distribution
scale = variance/mean #inverserate
a = mean**2/variance #shape
I_0 = 20 #Initial number of infected people
tau = beta/(N-1) #This is because there is a factor N in the Sellke construction
check_data=False
#First thing one needs is to generate the infectious periods for each node
#Not all the nodes will use that, as not everyone will get infected.
T_recover = stats.gamma.rvs(a=a,scale=scale, size=N)
while check_data==False:
time,I,S,R=Sellke_algo(tau,I_0,N,T_f,T_recover,showplot=False,return_full=False)
if len(time)>200: #Make sure this epidemic is not dying out
check_data=True
plt.figure()
plt.plot(time,I/N, color='black', label='data') #Plot Data
plt.xlim(0,150)
plt.ylim(0)
#If you want to save the data in a Dataframe format
#time, I, S
#data= np.c_[np.array(time),np.array(I),np.array(S)]
#name = "Example data,csv"
#np.savetxt(name,zipped, header='time,I,S')
#======================================================================
#2) solution of the PDE with the true parameters
#We need two quantities:
#1) infectious period/recovery time hazard function
#2) contact interval hazard functions
#Note, in general one can use the fact that the hazard function is
# pdf/survival function
def rec_haz(u, *recovDistParams):
a = float(recovDistParams[0])
scale = float(recovDistParams[1])
tol = 1e-10
#Basically: use de l'hopital when the ratio becomes 0/0
#Otherwise go with definition. This regularises a lot the numerics
x = np.where(stats.gamma.cdf(u,a=a,scale=scale)>1-tol,
1/scale - (a-1)/u,
stats.gamma.pdf(u,a=a,scale=scale)/(1- stats.gamma.cdf(u,a=a,scale=scale)))
return x
def inf_haz(u,*CIdistParms):
beta = float(CIdistParms[0])
return beta*np.ones_like(u)
grids=T_f*20 #How fine the time solver grid (finder -> more precise but more time)
rho = I_0/(N-I_0)
pde = SIR_PDEroutine(rho, CIdist=inf_haz, CIdistParms=[beta],\
recovDist=rec_haz, recovDistParms=[a, scale],\
nTgrid=grids, nUgrid=grids, T=T_f)
#Initial condition is a vector as long as the grid that contains the distribution
#of recovery times of initially infected individuals
#In this case should be a delta in 0.
initial_condition=np.zeros_like(pde.tgrids)
initial_condition[0]=1
#Solve the PDE
S_pde,I_pde=pde.finDiffUpdate(intiial_condition=initial_condition)
plt.plot(pde.tgrids,I_pde, color='b', label= 'PDE')
#======================================================================
#3) Maximise the likelihood with infection and recovery times
#We use infection and recovery times from the data generated
infection_times=time[np.where(np.diff(I)>0)]
recovery_times=time[np.where(np.diff(I)<0)]
#We need also the recovery distribution to run the likelihood
def rec_distr(u, *recovDistParams):
a = float(recovDistParams[0])
scale = float(recovDistParams[1])
return stats.gamma.pdf(u,a=a,scale=scale)
ll=log_likelihood_models(grids,hazard_inf=inf_haz,hazard_rec=rec_haz,
rec_distr = rec_distr,
T=T_f, infect_times=infection_times,recov_times=recovery_times,hazard_inf_par=1,rec_parms=2)
result = ll.minimize_likelihood(np.array([5e-4,0.01,1,0.1]), np.array([1e-2,2,20,1]))
parameters=result.x
#Plot the MLE
pde = SIR_PDEroutine(parameters[0], CIdist=inf_haz, CIdistParms=[parameters[1]],\
recovDist=rec_haz, recovDistParms=[parameters[2], parameters[3]],\
nTgrid=grids, nUgrid=grids, T=T_f)
#Initial condition in this case should be a delta in 0.
initial_condition=np.zeros_like(pde.tgrids)
initial_condition[0]=1
#Solve the PDE
S_mle,I_mle=pde.finDiffUpdate(intiial_condition=initial_condition)
plt.plot(pde.tgrids,I_mle, color='r', label= 'MLE')
plt.legend()
|
py | 1a432deba35933a6c47c02da8075c8ee82b246a5 | import pickle
from datetime import date
from pytest import raises, fixture
from elasticsearch_dsl import response, Search, Document, Date, Object
from elasticsearch_dsl.aggs import Terms
from elasticsearch_dsl.response.aggs import AggResponse, BucketData, Bucket
@fixture
def agg_response(aggs_search, aggs_data):
return response.Response(aggs_search, aggs_data)
def test_agg_response_is_pickleable(agg_response):
agg_response.hits
r = pickle.loads(pickle.dumps(agg_response))
assert r == agg_response
assert r._search == agg_response._search
assert r.hits == agg_response.hits
def test_response_is_pickleable(dummy_response):
res = response.Response(Search(), dummy_response)
res.hits
r = pickle.loads(pickle.dumps(res))
assert r == res
assert r._search == res._search
assert r.hits == res.hits
def test_hit_is_pickleable(dummy_response):
res = response.Response(Search(), dummy_response)
hits = pickle.loads(pickle.dumps(res.hits))
assert hits == res.hits
assert hits[0].meta == res.hits[0].meta
def test_response_stores_search(dummy_response):
s = Search()
r = response.Response(s, dummy_response)
assert r._search is s
def test_attribute_error_in_hits_is_not_hidden(dummy_response):
def f(hit):
raise AttributeError()
s = Search().doc_type(employee=f)
r = response.Response(s, dummy_response)
with raises(TypeError):
r.hits
def test_interactive_helpers(dummy_response):
res = response.Response(Search(), dummy_response)
hits = res.hits
h = hits[0]
rhits = "[<Hit(test-index/company/elasticsearch): {}>, <Hit(test-index/employee/42): {}...}}>, <Hit(test-index/employee/47): {}...}}>, <Hit(test-index/employee/53): {{}}>]".format(
repr(dummy_response['hits']['hits'][0]['_source']),
repr(dummy_response['hits']['hits'][1]['_source'])[:60],
repr(dummy_response['hits']['hits'][2]['_source'])[:60],
)
assert res
assert '<Response: %s>' % rhits == repr(res)
assert rhits == repr(hits)
assert {'meta', 'city', 'name'} == set(dir(h))
assert "<Hit(test-index/company/elasticsearch): %r>" % dummy_response['hits']['hits'][0]['_source'] == repr(h)
def test_empty_response_is_false(dummy_response):
dummy_response['hits']['hits'] = []
res = response.Response(Search(), dummy_response)
assert not res
def test_len_response(dummy_response):
res = response.Response(Search(), dummy_response)
assert len(res) == 4
def test_iterating_over_response_gives_you_hits(dummy_response):
res = response.Response(Search(), dummy_response)
hits = list(h for h in res)
assert res.success()
assert 123 == res.took
assert 4 == len(hits)
assert all(isinstance(h, response.Hit) for h in hits)
h = hits[0]
assert 'test-index' == h.meta.index
assert 'company' == h.meta.doc_type
assert 'elasticsearch' == h.meta.id
assert 12 == h.meta.score
assert hits[1].meta.routing == 'elasticsearch'
def test_hits_get_wrapped_to_contain_additional_attrs(dummy_response):
res = response.Response(Search(), dummy_response)
hits = res.hits
assert 123 == hits.total
assert 12.0 == hits.max_score
def test_hits_provide_dot_and_bracket_access_to_attrs(dummy_response):
res = response.Response(Search(), dummy_response)
h = res.hits[0]
assert 'Elasticsearch' == h.name
assert 'Elasticsearch' == h['name']
assert 'Honza' == res.hits[2].name.first
with raises(KeyError):
h['not_there']
with raises(AttributeError):
h.not_there
def test_slicing_on_response_slices_on_hits(dummy_response):
res = response.Response(Search(), dummy_response)
assert res[0] is res.hits[0]
assert res[::-1] == res.hits[::-1]
def test_aggregation_base(agg_response):
assert agg_response.aggs is agg_response.aggregations
assert isinstance(agg_response.aggs, response.AggResponse)
def test_metric_agg_works(agg_response):
assert 25052.0 == agg_response.aggs.sum_lines.value
def test_aggregations_can_be_iterated_over(agg_response):
aggs = [a for a in agg_response.aggs]
assert len(aggs) == 3
assert all(map(lambda a: isinstance(a, AggResponse), aggs))
def test_aggregations_can_be_retrieved_by_name(agg_response, aggs_search):
a = agg_response.aggs['popular_files']
assert isinstance(a, BucketData)
assert isinstance(a._meta['aggs'], Terms)
assert a._meta['aggs'] is aggs_search.aggs.aggs['popular_files']
def test_bucket_response_can_be_iterated_over(agg_response):
popular_files = agg_response.aggregations.popular_files
buckets = [b for b in popular_files]
assert all(isinstance(b, Bucket) for b in buckets)
assert buckets == popular_files.buckets
def test_bucket_keys_get_deserialized(aggs_data, aggs_search):
class Commit(Document):
info = Object(properties={'committed_date': Date()})
class Index:
name = 'test-commit'
aggs_search = aggs_search.doc_type(Commit)
agg_response = response.Response(aggs_search, aggs_data)
per_month = agg_response.aggregations.per_month
for b in per_month:
assert isinstance(b.key, date)
|
py | 1a432ea65bf52451433d73c6e99305740106cf26 | SITEURL = ""
SITENAME = "pelican-jupyter-test"
PATH = "content"
LOAD_CONTENT_CACHE = False
TIMEZONE = "UTC"
DEFAULT_LANG = "en"
THEME = "notmyidea"
# Plugin config
MARKUP = ("md", "ipynb")
from pelican_jupyter import markup as nb_markup # noqa
PLUGINS = [nb_markup]
IPYNB_MARKUP_USE_FIRST_CELL = True
IGNORE_FILES = [".ipynb_checkpoints"]
|
py | 1a43300865da656e1e370dd883a42b61ccf81b1a | from app import create_app
from flask_script import Manager,Server
# Creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server',Server)
if __name__ == '__main__':
manager.run()
|
py | 1a4330d2e5dad80bb6cec72d4b966a8f73b03e7d | import ctypes
import struct
# 3p
import bson
from bson.codec_options import CodecOptions
from bson.son import SON
# project
from ...ext import net as netx
from ...internal.compat import to_unicode
from ...internal.logger import get_logger
log = get_logger(__name__)
# MongoDB wire protocol commands
# http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
OP_CODES = {
1: "reply",
1000: "msg", # DEV: 1000 was deprecated at some point, use 2013 instead
2001: "update",
2002: "insert",
2003: "reserved",
2004: "query",
2005: "get_more",
2006: "delete",
2007: "kill_cursors",
2010: "command",
2011: "command_reply",
2013: "msg",
}
# The maximum message length we'll try to parse
MAX_MSG_PARSE_LEN = 1024 * 1024
header_struct = struct.Struct("<iiii")
class Command(object):
"""Command stores information about a pymongo network command,"""
__slots__ = ["name", "coll", "db", "tags", "metrics", "query"]
def __init__(self, name, db, coll):
self.name = name
self.coll = coll
self.db = db
self.tags = {}
self.metrics = {}
self.query = None
def __repr__(self):
return ("Command(" "name=%s," "db=%s," "coll=%s)") % (self.name, self.db, self.coll)
def parse_msg(msg_bytes):
"""Return a command from a binary mongo db message or None if we shouldn't
trace it. The protocol is documented here:
http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
"""
# NOTE[matt] this is used for queries in pymongo <= 3.0.0 and for inserts
# in up to date versions.
msg_len = len(msg_bytes)
if msg_len <= 0:
return None
header = header_struct.unpack_from(msg_bytes, 0)
(length, req_id, response_to, op_code) = header
op = OP_CODES.get(op_code)
if not op:
log.debug("unknown op code: %s", op_code)
return None
db = None
coll = None
offset = header_struct.size
cmd = None
if op == "query":
# NOTE[matt] inserts, updates and queries can all use this opcode
offset += 4 # skip flags
ns = _cstring(msg_bytes[offset:])
offset += len(ns) + 1 # include null terminator
# note: here coll could be '$cmd' because it can be overridden in the
# query itself (like {'insert':'songs'})
db, coll = _split_namespace(ns)
offset += 8 # skip numberToSkip & numberToReturn
if msg_len <= MAX_MSG_PARSE_LEN:
# FIXME[matt] don't try to parse large messages for performance
# reasons. ideally we'd just peek at the first bytes to get
# the critical info (op type, collection, query, # of docs)
# rather than parse the whole thing. i suspect only massive
# inserts will be affected.
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command("command", db, "untraced_message_too_large")
# If the command didn't contain namespace info, set it here.
if not cmd.coll:
cmd.coll = coll
elif op == "msg":
# Skip header and flag bits
offset += 4
# Parse the msg kind
kind = ord(msg_bytes[offset : offset + 1])
offset += 1
# Kinds: https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#sections
# - 0: BSON Object
# - 1: Document Sequence
if kind == 0:
if msg_len <= MAX_MSG_PARSE_LEN:
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command("command", db, "untraced_message_too_large")
else:
# let's still note that a command happened.
cmd = Command("command", db, "unsupported_msg_kind")
if cmd:
cmd.metrics[netx.BYTES_OUT] = msg_len
return cmd
def parse_query(query):
"""Return a command parsed from the given mongo db query."""
db, coll = None, None
ns = getattr(query, "ns", None)
if ns:
# version < 3.1 stores the full namespace
db, coll = _split_namespace(ns)
else:
# version >= 3.1 stores the db and coll separately
coll = getattr(query, "coll", None)
db = getattr(query, "db", None)
# pymongo < 3.1 _Query does not have a name field, so default to 'query'
cmd = Command(getattr(query, "name", "query"), db, coll)
cmd.query = query.spec
return cmd
def parse_spec(spec, db=None):
"""Return a Command that has parsed the relevant detail for the given
pymongo SON spec.
"""
# the first element is the command and collection
items = list(spec.items())
if not items:
return None
name, coll = items[0]
cmd = Command(name, db or spec.get("$db"), coll)
if "ordered" in spec: # in insert and update
cmd.tags["mongodb.ordered"] = spec["ordered"]
if cmd.name == "insert":
if "documents" in spec:
cmd.metrics["mongodb.documents"] = len(spec["documents"])
elif cmd.name == "update":
updates = spec.get("updates")
if updates:
# FIXME[matt] is there ever more than one here?
cmd.query = updates[0].get("q")
elif cmd.name == "delete":
dels = spec.get("deletes")
if dels:
# FIXME[matt] is there ever more than one here?
cmd.query = dels[0].get("q")
return cmd
def _cstring(raw):
"""Return the first null terminated cstring from the buffer."""
return ctypes.create_string_buffer(raw).value
def _split_namespace(ns):
"""Return a tuple of (db, collection) from the 'db.coll' string."""
if ns:
# NOTE[matt] ns is unicode or bytes depending on the client version
# so force cast to unicode
split = to_unicode(ns).split(".", 1)
if len(split) == 1:
raise Exception("namespace doesn't contain period: %s" % ns)
return split
return (None, None)
|
py | 1a4331328d36fb06af8e4cb0a953ed60d313294c | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import encode, cstr, cint, flt, comma_or
import openpyxl
import re
from openpyxl.styles import Font
from openpyxl import load_workbook
from six import StringIO, BytesIO, string_types
ILLEGAL_CHARACTERS_RE = re.compile(r'[\000-\010]|[\013-\014]|[\016-\037]')
# return xlsx file object
def make_xlsx(data, sheet_name, wb=None):
if wb is None:
wb = openpyxl.Workbook(write_only=True)
ws = wb.create_sheet(sheet_name, 0)
row1 = ws.row_dimensions[1]
row1.font = Font(name='Calibri',bold=True)
for row in data:
clean_row = []
for item in row:
if isinstance(item, string_types) and (sheet_name not in ['Data Import Template', 'Data Export']):
value = handle_html(item)
else:
value = item
if isinstance(item, string_types) and next(ILLEGAL_CHARACTERS_RE.finditer(value), None):
# Remove illegal characters from the string
value = re.sub(ILLEGAL_CHARACTERS_RE, '', value)
clean_row.append(value)
ws.append(clean_row)
xlsx_file = BytesIO()
wb.save(xlsx_file)
return xlsx_file
def handle_html(data):
# return if no html tags found
data = frappe.as_unicode(data)
if '<' not in data:
return data
if '>' not in data:
return data
from html2text import unescape, HTML2Text
h = HTML2Text()
h.unicode_snob = True
h = h.unescape(data or "")
obj = HTML2Text()
obj.ignore_links = True
obj.body_width = 0
value = obj.handle(h)
value = ", ".join(value.split(' \n'))
value = " ".join(value.split('\n'))
value = ", ".join(value.split('# '))
return value
def read_xlsx_file_from_attached_file(file_id=None, fcontent=None, filepath=None):
if file_id:
from frappe.utils.file_manager import get_file_path
filename = get_file_path(file_id)
elif fcontent:
from io import BytesIO
filename = BytesIO(fcontent)
elif filepath:
filename = filepath
else:
return
rows = []
wb1 = load_workbook(filename=filename, read_only=True, data_only=True)
ws1 = wb1.active
for row in ws1.iter_rows():
tmp_list = []
for cell in row:
tmp_list.append(cell.value)
rows.append(tmp_list)
return rows
|
py | 1a433198aaa75db822e600d84b7deeb9809a46c8 |
# coding: utf-8
# # Nengo Example: A Single Neuron
# This demo shows you how to construct and manipulate a single leaky integrate-and-fire (LIF) neuron. The LIF neuron is a simple, standard neuron model, and here it resides inside a neural population, even though there is only one neuron.
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
import nengo
# ##Step 1: Create the Neuron
# In[ ]:
from nengo.dists import Uniform
model = nengo.Network(label='A Single Neuron')
with model:
neuron = nengo.Ensemble(1, dimensions=1, # Represent a scalar
intercepts=Uniform(-.5, -.5), # Set intercept to 0.5
max_rates=Uniform(100, 100), # Set the maximum firing rate of the neuron to 100hz
encoders=[[1]]) # Sets the neurons firing rate to increase for positive input
# ## Step 2: Provide Input to the Model
# Create an input node generating a cosine wave.
# In[ ]:
with model:
cos = nengo.Node(240)#lambda t: np.cos(8 * t))
# ##Step 3: Connect the Network Elements
# In[ ]:
with model:
# Connect the input signal to the neuron
nengo.Connection(cos, neuron)
# ##Step 4: Add Probes
# Anything that is probed will collect the data it produces over time, allowing us to analyze and visualize it later.
# In[ ]:
with model:
cos_probe = nengo.Probe(cos) # The original input
spikes = nengo.Probe(neuron.neurons) # The raw spikes from the neuron
voltage = nengo.Probe(neuron.neurons, 'voltage') # Subthreshold soma voltage of the neuron
filtered = nengo.Probe(neuron, synapse=0.01) # Spikes filtered by a 10ms post-synaptic filter
# ## Step 5: Run the Model
# In[ ]:
sim = nengo.Simulator(model) # Create the simulator
sim.run(1) # Run it for 1 seconds
# ##Step 6: Plot the Results
# In[ ]:
# Plot the decoded output of the ensemble
plt.plot(sim.trange(), sim.data[filtered])
plt.plot(sim.trange(), sim.data[cos_probe])
plt.xlim(0, 1)
# Plot the spiking output of the ensemble
from nengo.utils.matplotlib import rasterplot
plt.figure(figsize=(10, 8))
plt.subplot(221)
rasterplot(sim.trange(), sim.data[spikes])
plt.ylabel("Neuron")
plt.xlim(0, 1)
# Plot the soma voltages of the neurons
plt.subplot(222)
plt.plot(sim.trange(), sim.data[voltage][:,0], 'r')
plt.xlim(0, 1);
# The top graph shows that the input signal in green and the filtered output spikes from the single neuron population in blue. The spikes (that are filtered) from the neuron are shown in the bottom graph on the left. On the right is the subthreshold voltages for the neuron.
import pylab
pylab.show() |
py | 1a4331af636aea1c8c41681ae42a6b0bc5b1903a | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import spack.repo
import spack.paths
@pytest.fixture()
def extra_repo(tmpdir_factory):
repo_namespace = 'extra_test_repo'
repo_dir = tmpdir_factory.mktemp(repo_namespace)
repo_dir.ensure('packages', dir=True)
with open(str(repo_dir.join('repo.yaml')), 'w') as f:
f.write("""
repo:
namespace: extra_test_repo
""")
return spack.repo.Repo(str(repo_dir))
def test_repo_getpkg(mutable_mock_repo):
mutable_mock_repo.get('a')
mutable_mock_repo.get('builtin.mock.a')
def test_repo_multi_getpkg(mutable_mock_repo, extra_repo):
mutable_mock_repo.put_first(extra_repo)
mutable_mock_repo.get('a')
mutable_mock_repo.get('builtin.mock.a')
def test_repo_multi_getpkgclass(mutable_mock_repo, extra_repo):
mutable_mock_repo.put_first(extra_repo)
mutable_mock_repo.get_pkg_class('a')
mutable_mock_repo.get_pkg_class('builtin.mock.a')
def test_repo_pkg_with_unknown_namespace(mutable_mock_repo):
with pytest.raises(spack.repo.UnknownNamespaceError):
mutable_mock_repo.get('unknown.a')
def test_repo_unknown_pkg(mutable_mock_repo):
with pytest.raises(spack.repo.UnknownPackageError):
mutable_mock_repo.get('builtin.mock.nonexistentpackage')
def test_repo_anonymous_pkg(mutable_mock_repo):
with pytest.raises(spack.repo.UnknownPackageError):
mutable_mock_repo.get('+variant')
@pytest.mark.maybeslow
def test_repo_last_mtime():
latest_mtime = max(os.path.getmtime(p.module.__file__)
for p in spack.repo.path.all_packages())
assert spack.repo.path.last_mtime() == latest_mtime
def test_repo_invisibles(mutable_mock_repo, extra_repo):
with open(os.path.join(extra_repo.root, 'packages', '.invisible'), 'w'):
pass
extra_repo.all_package_names()
|
py | 1a4331de4217ab350d887cf2480fff28364727a7 | # Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Shared front-end analyzer specific presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import imp
import os.path
import subprocess
def runSmokeTest(input_api, output_api):
hasChangedFiles = False
for git_file in input_api.AffectedTextFiles():
filename = git_file.AbsoluteLocalPath()
if filename.endswith(".dart"):
hasChangedFiles = True
break
if hasChangedFiles:
local_root = input_api.change.RepositoryRoot()
utils = imp.load_source('utils',
os.path.join(local_root, 'tools', 'utils.py'))
dart = os.path.join(utils.CheckedInSdkPath(), 'bin', 'dart')
smoke_test = os.path.join(local_root, 'pkg', '_fe_analyzer_shared',
'tool', 'smoke_test_quick.dart')
windows = utils.GuessOS() == 'win32'
if windows:
dart += '.exe'
if not os.path.isfile(dart):
print('WARNING: dart not found: %s' % dart)
return []
if not os.path.isfile(smoke_test):
print('WARNING: _fe_analyzer_shared smoke test not found: %s' %
smoke_test)
return []
args = [dart, smoke_test]
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
outs, _ = process.communicate()
if process.returncode != 0:
return [
output_api.PresubmitError(
'_fe_analyzer_shared smoke test failure(s):',
long_text=outs)
]
return []
def CheckChangeOnCommit(input_api, output_api):
return runSmokeTest(input_api, output_api)
def CheckChangeOnUpload(input_api, output_api):
return runSmokeTest(input_api, output_api)
|
py | 1a43342c81337babc9c4fe55ce0034f11e143e54 | """
This file is part of the accompanying code to our manuscript:
Kratzert, F., Klotz, D., Shalev, G., Klambauer, G., Hochreiter, S., Nearing, G., "Benchmarking
a Catchment-Aware Long Short-Term Memory Network (LSTM) for Large-Scale Hydrological Modeling".
submitted to Hydrol. Earth Syst. Sci. Discussions (2019)
You should have received a copy of the Apache-2.0 license along with the code. If not,
see <https://opensource.org/licenses/Apache-2.0>
"""
import json
import pickle
from collections import defaultdict
from pathlib import PosixPath
from typing import Callable, Dict, List, Tuple
import numpy as np
import tqdm
import xarray
from scipy.stats import wilcoxon
def get_run_dirs(root_dir: PosixPath, model: str, loss: str) -> List:
"""Get all folders that are trained for a specific model configuration
Parameters
----------
root_dir : PosixPath
Path to the folder containing all model runs.
model : str
One of ['ealstm', 'lstm', 'lstm_no_static'], defining the model type to find.
loss : str
One of ['NSELoss', 'MSELoss'], defining the loss function that the model was trained for.
Returns
-------
List
List of PosixPaths, where each path points to the folder of one model run.
Raises
------
ValueError
If an invalid model type was passed.
ValueError
If an invalid loss type was passed.
RuntimeError
If root directory contains no subfolder.
"""
valid_models = ["ealstm", "lstm", "lstm_no_static"]
if not model in valid_models:
raise ValueError(f"`model` must be one of {valid_models}")
valid_loss = ['MSELoss', 'NSELoss']
if not loss in valid_loss:
raise ValueError(f"`loss` must be one of {valid_loss}")
folders = list(root_dir.glob('*/'))
if len(folders) == 0:
raise RuntimeError(f"No subfolders found in {root_dir}")
run_dirs = []
for folder in folders:
if folder.is_dir():
with open(folder / "cfg.json", "r") as fp:
cfg = json.load(fp)
if (model == "ealstm") and (not cfg["concat_static"]) and (not cfg["no_static"]):
if (loss == "NSELoss") and (not cfg["use_mse"]):
run_dirs.append(folder)
elif (loss == "MSELoss") and (cfg["use_mse"]):
run_dirs.append(folder)
else:
pass
if (model == "lstm") and (cfg["concat_static"]) and (not cfg["no_static"]):
if (loss == "NSELoss") and (not cfg["use_mse"]):
run_dirs.append(folder)
elif (loss == "MSELoss") and (cfg["use_mse"]):
run_dirs.append(folder)
else:
pass
if (model == "lstm_no_static") and (cfg["no_static"]):
if (loss == "NSELoss") and (not cfg["use_mse"]):
run_dirs.append(folder)
elif (loss == "MSELoss") and (cfg["use_mse"]):
run_dirs.append(folder)
else:
pass
return run_dirs
def eval_benchmark_models(netcdf_folder: PosixPath, func: Callable) -> dict:
"""Evaluate benchmark models on specific metric function.
Parameters
----------
netcdf_folder : PosixPath
Directory, containing basin-wise netcdf files, which contain the benchmark model simulations
func : Callable
The metric function to evaluate. Must satisfy the func(obs, sim) convention.
Returns
-------
dict
Dictionary, containing the metric values of each basin and each benchmark model.
"""
nc_files = list(netcdf_folder.glob('*.nc'))
benchmark_models = defaultdict(dict)
for nc_file in tqdm.tqdm(nc_files):
basin = nc_file.name[:8]
xr = xarray.open_dataset(nc_file)
for key in xr.keys():
if key != 'QObs':
obs = xr['QObs'].values
sim = xr[key].values
sim = sim[obs >= 0]
obs = obs[obs >= 0]
value = func(obs, sim)
if np.isnan(value):
print(f"{key}: {nc_file}")
else:
benchmark_models[key][basin] = value
return benchmark_models
def eval_lstm_models(run_dirs: List, func: Callable) -> dict:
"""Evaluate LSTM outputs on specific metric function.
Returns the metric for each basin in each seed, as well as the results of the ensemble mean.
Parameters
----------
run_dirs : List
List of PosixPaths pointing to the different model directories.
func : Callable
The metric function to evaluate. Must satisfy the func(obs, sim) convention.
Returns
-------
dict
Dictionary, containing the metric value for each basin of each random seed, as well as the
ensemble mean.
"""
single_models = {}
model_ensemble = defaultdict(dict)
for run_dir in tqdm.tqdm(run_dirs):
check_eval_file = list(run_dir.glob("*.p"))
if check_eval_file:
eval_file = check_eval_file[0]
parts = eval_file.name.split('_')
seed = parts[-1][:-2]
single_models[seed] = {}
with eval_file.open("rb") as fp:
data = pickle.load(fp)
for basin, df in data.items():
obs = df["qobs"].values
sim = df["qsim"].values
sim = sim[obs >= 0]
obs = obs[obs >= 0]
single_models[seed][basin] = func(obs, sim)
if basin not in model_ensemble.keys():
model_ensemble[basin]["df"] = df
else:
model_ensemble[basin]["df"]["qsim"] += df["qsim"]
ensemble_nse = {}
for basin, data in model_ensemble.items():
obs = data["df"]["qobs"].values
sim = data["df"]["qsim"].values / len(single_models.keys())
sim = sim[obs >= 0]
obs = obs[obs >= 0]
ensemble_nse[basin] = func(obs, sim)
single_models["ensemble"] = ensemble_nse
return single_models
def get_pvals(metrics: dict, model1: str, model2: str) -> Tuple[List, float]:
"""[summary]
Parameters
----------
metrics : dict
Dictionary, containing the metric values of both models for all basins.
model1 : str
String, defining the first model to take. Must be a key in `metrics`
model2 : str
String, defining the second model to take. Must be a key in `metrics`
Returns
-------
p_vals : List
List, containing the p-values of all possible seed combinations.
p_val : float
P-value between the ensemble means.
"""
# p-values between mean performance per basin of both models
metric_model1 = get_mean_basin_performance(metrics, model1)
metric_model2 = get_mean_basin_performance(metrics, model2)
_, p_val_single = wilcoxon(list(metric_model1.values()), list(metric_model2.values()))
# p-value between ensemble means
_, p_val_ensemble = wilcoxon(list(metrics[model1]["ensemble"].values()),
list(metrics[model2]["ensemble"].values()))
return p_val_single, p_val_ensemble
def get_mean_basin_performance(metrics: dict, model: str) -> Dict:
"""Get the mean performance per basin for a given model
Parameters
----------
metrics : dict
Dictionary containing all evaluation metrics
model : str
Model identifier string
Returns
-------
Dict
Dictionary containing for each basin a key and the value is the mean performance.
"""
seeds = [k for k in metrics[model].keys() if k != "ensemble"]
metric = defaultdict(list)
for seed in seeds:
for basin, nse in metrics[model][seed].items():
metric[basin].append(nse)
return {basin: np.mean(values) for basin, values in metric.items()}
def get_cohens_d(values1: List, values2: List) -> float:
"""Calculate Cohen's Effect size
Parameters
----------
values1 : List
List of model performances of model 1
values2 : List
List of model performances of model 2
Returns
-------
float
Cohen's d
"""
s = np.sqrt(((len(values1) - 1) * np.var(values1) + (len(values2) - 1) * np.var(values2)) /
(len(values1) + len(values2) - 2))
d = (np.abs(np.mean(values1) - np.mean(values2))) / s
return d
|
py | 1a4334457f2a01936cdf475e6bf761bd93a963fc | # AutoTransform
# Large scale, component based code modification library
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2022-present Nathan Rockenbach <http://github.com/nathro>
# @black_format
"""The implementation for the ChangeStateCondition."""
from __future__ import annotations
from typing import ClassVar
from autotransform.change.base import Change, ChangeState
from autotransform.step.condition.base import Condition, ConditionName
from autotransform.step.condition.comparison import ComparisonType, compare
class ChangeStateCondition(Condition):
"""A condition which checks the ChangeState against the state supplied using the supplied
comparison. Note: only equals and not equals are valid, all others willresult in an error.
Attributes:
comparison (ComparisonType): The type of comparison to perform.
state (ChangeState): The state to compare against.
name (ClassVar[ConditionName]): The name of the Component.
"""
comparison: ComparisonType
state: ChangeState
name: ClassVar[ConditionName] = ConditionName.CHANGE_STATE
@staticmethod
def get_type() -> ConditionName:
"""Used to map Condition components 1:1 with an enum, allowing construction from JSON.
Returns:
ConditionType: The unique type associated with this Condition.
"""
return ConditionName.CHANGE_STATE
def check(self, change: Change) -> bool:
"""Checks whether the Change's state passes the comparison.
Args:
change (Change): The Change the Condition is checking.
Returns:
bool: Whether the Change passes the Condition.
"""
assert self.comparison in [
ComparisonType.EQUAL,
ComparisonType.NOT_EQUAL,
], "ChangeStateCondition may only use equal or not_equal comparison"
return compare(change.get_state(), self.state, self.comparison)
|
py | 1a4336745e58e6c983df3d4633614c6baaed9920 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy
import numba
import time
from MiniFramework.ConvWeightsBias import *
from MiniFramework.ConvLayer import *
from MiniFramework.HyperParameters_4_2 import *
def calculate_output_size(input_h, input_w, filter_h, filter_w, padding, stride=1):
output_h = (input_h - filter_h + 2 * padding) // stride + 1
output_w = (input_w - filter_w + 2 * padding) // stride + 1
return (output_h, output_w)
def test_performance():
batch_size = 64
params = HyperParameters_4_2(
0.1, 1, batch_size,
net_type=NetType.MultipleClassifier,
init_method=InitialMethod.Xavier)
stride = 1
padding = 1
fh = 3
fw = 3
input_channel = 3
output_channel = 4
iw = 28
ih = 28
# 64 个 3 x 28 x 28 的图像输入(模拟 mnist)
x = np.random.randn(batch_size, input_channel, iw, ih)
c1 = ConvLayer((input_channel,iw,ih), (output_channel,fh,fw), (stride, padding), params)
c1.initialize("test", "test", False)
# dry run
for i in range(5):
f1 = c1.forward_numba(x)
delta_in = np.ones((f1.shape))
b1, dw1, db1 = c1.backward_numba(delta_in, 1)
# run
s1 = time.time()
for i in range(100):
f1 = c1.forward_numba(x)
b1, dw1, db1 = c1.backward_numba(delta_in, 1)
e1 = time.time()
print("method numba:", e1-s1)
# dry run
for i in range(5):
f2 = c1.forward_img2col(x)
b2, dw2, db2 = c1.backward_col2img(delta_in, 1)
# run
s2 = time.time()
for i in range(100):
f2 = c1.forward_img2col(x)
b2, dw2, db2 = c1.backward_col2img(delta_in, 1)
e2 = time.time()
print("method img2col:", e2-s2)
print("compare correctness of method 1 and method 2:")
print("forward:", np.allclose(f1, f2, atol=1e-7))
print("backward:", np.allclose(b1, b2, atol=1e-7))
print("dW:", np.allclose(dw1, dw2, atol=1e-7))
print("dB:", np.allclose(db1, db2, atol=1e-7))
if __name__ == '__main__':
test_performance()
|
py | 1a4337b387166d7b49401a0058dc1319b208ff85 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import random
import re
import time
import cv2
import numpy as np
from PIL import Image
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from app import get_random_float
from app.slider import base64_to_image
"""
jd 注册页面的
"""
class JD_Register(object):
def __init__(self, url, username, pwd=''):
super(JD_Register, self).__init__()
# 实际地址
self.url = url
options = ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
self.driver = webdriver.Chrome(options=options)
self.wait = WebDriverWait(self.driver, 10)
# 账户信息
self.username = username
self.password = pwd
# 下载图片的临时路径
self.target_path = "./static/temp/target_reg.png"
self.template_path = "./static/temp/template_reg.png"
# 网页图片缩放
self.zoom = 1
def open(self, url=None):
self.driver.get(url if url else self.url)
def close(self):
self.driver.close()
def refresh(self):
self.driver.refresh()
def main(self):
"""
程序入口
:return:
"""
print('是否打开页面?y:是;其它:跳过;')
is_open = input()
if is_open and is_open.lower() == 'y':
self.open()
print('是否开始程序?y:是;其它:退出;')
is_star = input()
if is_star and is_star.lower() == 'y':
self._init()
self._crack_slider()
def _init(self):
"""
登录
:return:
"""
print("填写账号")
input_ele = self.driver.find_element_by_id('form-phone')
input_ele.clear()
# username
time.sleep(random.uniform(0.1, 0.5))
input_ele.send_keys(self.username[0:3])
time.sleep(random.uniform(0.5, 0.8))
input_ele.send_keys(self.username[3:])
print("点击登录")
time.sleep(random.uniform(0.2, 0.8)) #
login_ele = self.driver.find_element_by_xpath('//*[@id="step1-wrap"]/div[2]/div[1]')
ActionChains(self.driver).move_to_element(login_ele).perform()
ActionChains(self.driver).move_by_offset(12, 5).perform()
login_ele.click()
# 滑块
def _crack_slider(self):
"""
解析滑块
:return:
"""
# 获取图片
pic_success = self._get_pic()
if pic_success:
# 模板匹配
target = cv2.imread(self.target_path)
template = cv2.imread(self.template_path)
distance = self._match_templet(target, template)
print("位移距离 distance = %d" % distance)
# 轨迹
tracks = self._get_tracks3(distance * self.zoom)
# 移动滑块
self._slider_action(tracks)
# 判断登录
print('是否继续测试?y:是;其它:退出')
is_go_on = input()
if is_go_on and is_go_on.lower() == 'y':
print("开始下一次尝试")
return self._crack_slider()
else:
return False
def _get_pic(self):
"""
下载图片到本地
:return:
"""
print("查找缺口图片")
time.sleep(1)
target = self.wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="slideAuthCode"]/div/div[1]/div[2]/div[1]/img')))
template = self.wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="slideAuthCode"]/div/div[1]/div[2]/div[2]/img')))
if target and template:
print("开始下载图片")
target_base64 = target.get_attribute('src')
template_base64 = template.get_attribute('src')
target_base64_str = re.sub(r'data:[a-z]*/[a-z]*;base64,', '', target_base64)
template_base64_str = re.sub(r'data:[a-z]*/[a-z]*;base64,', '', template_base64)
# save
base64_to_image(target_base64_str, self.target_path)
base64_to_image(template_base64_str, self.template_path)
time.sleep(1)
# zoom
local_img = Image.open(self.target_path)
size_loc = local_img.size
self.zoom = 364 / int(size_loc[0])
print("计算缩放比例 zoom = %f" % round(self.zoom, 4))
return True
else:
print("未找到缺口图片")
return False
def _slider_action(self, tracks):
"""
移动滑块
:return:
"""
print("开始移动滑块")
# 点击滑块
slider = self.wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="slideAuthCode"]/div/div[2]/div[3]')))
if slider:
ActionChains(self.driver).click_and_hold(slider).perform()
# 正向滑动
for track in tracks['forward_tracks']:
yoffset_random = random.uniform(-2, 4)
ActionChains(self.driver).move_by_offset(xoffset=track, yoffset=yoffset_random).perform()
time.sleep(random.uniform(0.06, 0.5))
# 反向滑动
for back_tracks in tracks['back_tracks']:
yoffset_random = random.uniform(-2, 2)
ActionChains(self.driver).move_by_offset(xoffset=back_tracks, yoffset=yoffset_random).perform()
# 抖动
ActionChains(self.driver).move_by_offset(
xoffset=get_random_float(0, -1.67),
yoffset=get_random_float(-1, 1)
).perform()
ActionChains(self.driver).move_by_offset(
xoffset=get_random_float(0, 1.67),
yoffset=get_random_float(-1, 1)
).perform()
time.sleep(get_random_float(0.2, 0.6))
ActionChains(self.driver).release().perform()
print("滑块移动成功")
return True
else:
print("未找到滑块")
return False
# test 测试验证方法
def _match_profile(self, image_path):
"""
通过轮廓识别来找到位置
:param image_path: 带有缺口的图片
:return:
"""
image = cv2.imread(image_path)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
# canny = cv2.Canny(blurred, 200, 400)
canny = cv2.Canny(blurred, 50, 370)
cv2.imshow('image2', blurred)
cv2.imshow('image3', canny)
cv2.imshow('image4', image)
"""
它返回了你所处理的图像,轮廓的点集,各层轮廓的索引
"""
binary, contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# binary, contours, hierarchy = cv2.findContours(canny, 3, cv2.CHAIN_APPROX_SIMPLE)
for i, contour in enumerate(contours):
M = cv2.moments(contour)
if M['m00'] == 0:
cx = cy = 0
else:
cx, cy = M['m10'] / M['m00'], M['m01'] / M['m00']
# 轮廓筛选
if 20 < cv2.contourArea(contour) < 2000 and 50 < cv2.arcLength(contour, True) < 350:
# if cx < 400:
# continue
x, y, w, h = cv2.boundingRect(contour) # 外接矩形
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.imshow('image1', image)
print("选择的值 :area = {}, length = {}, cx = {}, cy = {}".format(
cv2.contourArea(contour),
cv2.arcLength(contour, True),
cx,
cy
))
print("选择的值 :x = {}, y = {}, w = {}, h = {}".format(x, y, w, h))
cv2.imshow('image1-1', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return 0
def _match_templet(self, img_target, img_template):
"""
模板匹配(用于寻找缺口)
:param img_target: 带有缺口的背景图
:param img_template: 缺口的滑块图
:return: 缺口所在的位置的x轴距离
"""
print("图片缺口模板匹配")
# 滑块图片处理
tpl = self.__handle_slider_img(img_template) # 误差来源就在于滑块的背景图为白色
# cv2.imshow("template", tpl)
# 图片高斯滤波
blurred = cv2.GaussianBlur(img_target, (3, 3), 0)
# cv2.imshow("blurred2", blurred)
# 图片灰度化
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
# cv2.imshow("gray2", gray)
width, height = tpl.shape[:2]
# 图片二值化(针对jd,二值化后的效果不是很好)
# ret, target = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# ret, target = cv2.threshold(gray, 110, 255, cv2.THRESH_BINARY)
# cv2.imshow("target", target)
# 二值化模板匹配
# result = cv2.matchTemplate(target, tpl, cv2.TM_CCOEFF_NORMED) # 使用二值化图片
# 灰度化模板匹配
result = cv2.matchTemplate(gray, tpl, cv2.TM_CCOEFF_NORMED) # 使用灰度化图片
print("result = {}".format(len(np.where(result >= 0.5)[0])))
# 查找数组中匹配的最大值
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
left_up = max_loc
right_down = (left_up[0] + height, left_up[1] + width)
cv2.rectangle(img_target, left_up, right_down, (7, 279, 151), 2)
print('匹配结果区域起点x坐标为:%d' % max_loc[0])
# cv2.imshow('dectected', img_target)
return left_up[0]
def __handle_slider_img(self, image):
"""
对滑块进行二值化处理
:param image: cv类型的图片对象
:return:
"""
kernel = np.ones((8, 8), np.uint8) # 去滑块的前景噪声内核
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 灰度化
# 灰化背景
width, heigth = gray.shape
for h in range(heigth):
for w in range(width):
if gray[w, h] == 0:
gray[w, h] = 96
# cv2.imshow('gray', gray)
# 排除背景
binary = cv2.inRange(gray, 96, 96)
res = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel) # 开运算去除白色噪点
# cv2.imshow('res1', res)
return res
def _get_gap(self, img1, img2):
"""
获取缺口偏移量
:param img1: 不带缺口图片
:param img2: 带缺口图片
:return:
"""
left = 68
for i in range(left, img1.size[0]):
for j in range(img1.size[1]):
if not self.__is_pixel_equal(img1, img2, i, j):
left = i
return left
return left
def __is_pixel_equal(self, img1, img2, x, y):
"""
判断两个像素是否相同
:param img1:
:param img2:
:param x:
:param y:
:return:
"""
# 取两个图片的像素点
pix1 = img1.load()[x, y]
pix2 = img2.load()[x, y]
threshold = 60
if (abs(pix1[0] - pix2[0] < threshold) and abs(pix1[1] - pix2[1] < threshold) and abs(
pix1[2] - pix2[2] < threshold)):
return True
else:
return False
def _get_cookie(self):
cookie_items = self.driver.get_cookies()
ck_dict = {}
for cookie in cookie_items:
ck_dict[cookie['name']] = cookie['value']
print("cookie = %s" % ck_dict)
self._save_to_file(json.dumps(ck_dict, separators=(',', ':'), ensure_ascii=False))
# self.driver.quit()
def _save_to_file(self, str_data):
file = None
try:
file = open("../static/temp/cookie.txt", "w")
file.write(str_data)
except:
print("保存cookie异常")
finally:
if file:
file.close()
# ---- 拖拽轨迹计算 start ----
def _get_tracks0(self, distance):
"""
根据偏移量获取移动轨迹1
:param distance: 偏移量
:return: 移动轨迹
"""
trace = []
mid = distance * 3 / 5
# 设置初始位置、初始速度、时间间隔
current, v, t = 0, 0, 0.2
distance += 20
while current < distance:
if current < mid:
a = 2
else:
a = -3
s = v * t + 0.5 * a * (t ** 2)
v = v + a * t
current += s
trace.append(round(s))
back_tracks = [-3, -3, -2, -2, -2, -2, -2, -1, -1, -1]
return {'forward_tracks': trace, 'back_tracks': back_tracks}
def _get_tracks1(self, distance):
"""
根据偏移量获取移动轨迹1
:param distance: 偏移量
:return: 移动轨迹
"""
trace = []
mid = distance * round(random.uniform(3, 4), 4) / 5
# 设置初始位置、初始速度、时间间隔
current, v, t = 0, 500, 0.005
distance += 20
while current < distance:
if current < mid:
a = random.uniform(2.4, 2.8)
else:
a = random.uniform(-3, -2)
s = v * t + 0.5 * a * (t ** 2)
v = v + a * t
current += s
trace.append(round(s))
back_tracks = [-3, -3, -3, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1]
return {'forward_tracks': trace, 'back_tracks': back_tracks}
def _get_tracks3(self, distance):
"""
根据偏移量获取移动轨迹3
:param distance: 偏移量
:return: 移动轨迹
"""
track = []
mid1 = round(distance * random.uniform(0.1, 0.2))
mid2 = round(distance * random.uniform(0.65, 0.76))
mid3 = round(distance * random.uniform(0.84, 0.88))
# 设置初始位置、初始速度、时间间隔
current, v, t = 0, 0, 0.2
distance = round(distance)
while current < distance:
# 四段加速度
if current < mid1:
a = random.randint(10, 15)
elif current < mid2:
a = random.randint(30, 40)
elif current < mid3:
a = -70
else:
a = random.randint(-25, -18)
# 初速度 v0
v0 = v
# 当前速度 v = v0 + at
v = v0 + a * t
v = v if v >= 0 else 0
move = v0 * t + 1 / 2 * a * (t ** 2)
move = round(move if move >= 0 else 1)
# 当前位移
current += move
# 加入轨迹
track.append(move)
print("current={}, distance={}".format(current, distance))
# 超出范围
back_tracks = []
out_range = distance - current
if out_range < -8:
sub = int(out_range + 8)
back_tracks = [-1, sub, -3, -1, -1, -1, -1]
elif out_range < -2:
sub = int(out_range + 3)
back_tracks = [-1, -1, sub]
print("forward_tracks={}, back_tracks={}".format(track, back_tracks))
return {'forward_tracks': track, 'back_tracks': back_tracks}
def _get_tracks4(self, distance):
"""
根据偏移量和手动操作模拟计算移动轨迹
:param distance: 偏移量
:return: 移动轨迹
"""
# 移动轨迹
tracks = []
# 当前位移
current = 0
# 减速阈值
mid = distance * 4 / 5
# 时间间隔
t = 0.2
# 初始速度
v = 0
while current < distance:
if current < mid:
a = random.uniform(2, 5)
else:
a = -(random.uniform(12.5, 13.5))
v0 = v
v = v0 + a * t
x = v0 * t + 1 / 2 * a * t * t
current += x
if 0.6 < current - distance < 1:
x = x - 0.53
tracks.append(round(x, 2))
elif 1 < current - distance < 1.5:
x = x - 1.4
tracks.append(round(x, 2))
elif 1.5 < current - distance < 3:
x = x - 1.8
tracks.append(round(x, 2))
else:
tracks.append(round(x, 2))
print(sum(tracks))
return {'forward_tracks': tracks, 'back_tracks': []}
# ---- 拖拽轨迹计算 end ----
if __name__ == '__main__':
c = JD_Register(url='https://reg.jd.com/p/regPage', username='15812344455')
c.main()
|
py | 1a4337b4b20ccd6f624ece59bcf36e2436b4ca06 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# This scaffolding model makes your app work on Google App Engine too
# File is released under public domain and you can use without limitations
# -------------------------------------------------------------------------
db = DAL('sqlite://storage.sqlite', migrate=True)
from gluon.tools import *
auth = Auth(db)
auth.define_tables()
crud = Crud(db)
db.define_table('page',
Field('title'),
Field('body', 'text'),
Field('created_on', 'datetime', default=request.now),
Field('created_by', 'reference auth_user', default=auth.user_id),
format='%(title)s')
db.define_table('post',
Field('page_id', 'reference page'),
Field('body', 'text'),
Field('created_on', 'datetime', default=request.now),
Field('created_by', 'reference auth_user', default=auth.user_id))
db.define_table('document',
Field('page_id', 'reference page'),
Field('name'),
Field('file', 'upload'),
Field('created_on', 'datetime', default=request.now),
Field('created_by', 'reference auth_user', default=auth.user_id),
format='%(name)s')
db.page.title.requires = IS_NOT_IN_DB(db, 'page.title')
db.page.body.requires = IS_NOT_EMPTY()
db.page.created_by.readable = db.page.created_by.writable = False
db.page.created_on.readable = db.page.created_on.writable = False
db.post.body.requires = IS_NOT_EMPTY()
db.post.page_id.readable = db.post.page_id.writable = False
db.post.created_by.readable = db.post.created_by.writable = False
db.post.created_on.readable = db.post.created_on.writable = False
db.document.name.requires = IS_NOT_IN_DB(db, 'document.name')
db.document.page_id.readable = db.document.page_id.writable = False
db.document.created_by.readable = db.document.created_by.writable = False
db.document.created_on.readable = db.document.created_on.writable = False
if request.global_settings.web2py_version < "2.14.1":
raise HTTP(500, "Requires web2py 2.13.3 or newer")
# -------------------------------------------------------------------------
# if SSL/HTTPS is properly configured and you want all HTTP requests to
# be redirected to HTTPS, uncomment the line below:
# -------------------------------------------------------------------------
# request.requires_https()
# -------------------------------------------------------------------------
# app configuration made easy. Look inside private/appconfig.ini
# -------------------------------------------------------------------------
from gluon.contrib.appconfig import AppConfig
# -------------------------------------------------------------------------
# once in production, remove reload=True to gain full speed
# -------------------------------------------------------------------------
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
# ---------------------------------------------------------------------
# if NOT running on Google App Engine use SQLite or other DB
# ---------------------------------------------------------------------
db = DAL(myconf.get('db.uri'),
pool_size=myconf.get('db.pool_size'),
migrate_enabled=myconf.get('db.migrate'),
check_reserved=['all'])
else:
# ---------------------------------------------------------------------
# connect to Google BigTable (optional 'google:datastore://namespace')
# ---------------------------------------------------------------------
db = DAL('google:datastore+ndb')
# ---------------------------------------------------------------------
# store sessions and tickets there
# ---------------------------------------------------------------------
session.connect(request, response, db=db)
# ---------------------------------------------------------------------
# or store session in Memcache, Redis, etc.
# from gluon.contrib.memdb import MEMDB
# from google.appengine.api.memcache import Client
# session.connect(request, response, db = MEMDB(Client()))
# ---------------------------------------------------------------------
# -------------------------------------------------------------------------
# by default give a view/generic.extension to all actions from localhost
# none otherwise. a pattern can be 'controller/function.extension'
# -------------------------------------------------------------------------
response.generic_patterns = ['*'] if request.is_local else []
# -------------------------------------------------------------------------
# choose a style for forms
# -------------------------------------------------------------------------
response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.get('forms.separator') or ''
# -------------------------------------------------------------------------
# (optional) optimize handling of static files
# -------------------------------------------------------------------------
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
# -------------------------------------------------------------------------
# (optional) static assets folder versioning
# -------------------------------------------------------------------------
# response.static_version = '0.0.0'
# -------------------------------------------------------------------------
# Here is sample code if you need for
# - email capabilities
# - authentication (registration, login, logout, ... )
# - authorization (role based authorization)
# - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
# - old style crud actions
# (more options discussed in gluon/tools.py)
# -------------------------------------------------------------------------
from gluon.tools import Auth, Service, PluginManager
# host names must be a list of allowed host names (glob syntax allowed)
auth = Auth(db, host_names=myconf.get('host.names'))
service = Service()
plugins = PluginManager()
# -------------------------------------------------------------------------
# create all tables needed by auth if not custom tables
# -------------------------------------------------------------------------
auth.define_tables(username=False, signature=False)
# -------------------------------------------------------------------------
# configure email
# -------------------------------------------------------------------------
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.get('smtp.server')
mail.settings.sender = myconf.get('smtp.sender')
mail.settings.login = myconf.get('smtp.login')
mail.settings.tls = myconf.get('smtp.tls') or False
mail.settings.ssl = myconf.get('smtp.ssl') or False
# -------------------------------------------------------------------------
# configure auth policy
# -------------------------------------------------------------------------
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
# 'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
# >>> for row in rows: print row.id, row.myfield
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# after defining tables, uncomment below to enable auditing
# -------------------------------------------------------------------------
# auth.enable_record_versioning(db)
|
py | 1a43380e4e5340b23ba78bb0e6fc73c5e5e69589 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta, abstractmethod
from typing import Any, TYPE_CHECKING, Union
from pandas.api.types import CategoricalDtype
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
FractionalType,
IntegralType,
MapType,
NumericType,
StringType,
StructType,
TimestampType,
)
import pyspark.sql.types as types
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.typedef import Dtype
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def is_valid_operand_for_numeric_arithmetic(
operand: Any,
*,
allow_bool: bool = True
) -> bool:
"""Check whether the operand is valid for arithmetic operations against numerics."""
if isinstance(operand, numbers.Number) and not isinstance(operand, bool):
return True
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType))
else:
return False
def transform_boolean_operand_to_numeric(operand: Any, spark_type: types.DataType) -> Any:
"""Transform boolean operand to the given numeric spark_type.
Return the transformed operand if the operand is a boolean IndexOpsMixin,
otherwise return the original operand.
"""
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
return operand.spark.transform(lambda scol: scol.cast(spark_type))
else:
return operand
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType):
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps
from pyspark.pandas.data_type_ops.num_ops import (
IntegralOps,
FractionalOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, FractionalType):
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
@abstractmethod
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
|
py | 1a4338c5ac453a64f90be051476c5c9a80ae6195 | from model import MusicTransformer
from custom.layers import *
from custom import callback
import params as par
from tensorflow.python.keras.optimizer_v2.adam import Adam
from data import Data
import utils
import argparse
import datetime
import sys
tf.executing_eagerly()
parser = argparse.ArgumentParser()
parser.add_argument('--l_r', default=None, help='학습률', type=float)
parser.add_argument('--batch_size', default=2, help='batch size', type=int)
parser.add_argument('--pickle_dir', default='music', help='데이터셋 경로')
parser.add_argument('--max_seq', default=2048, help='최대 길이', type=int)
parser.add_argument('--epochs', default=100, help='에폭 수', type=int)
parser.add_argument('--load_path', default=None, help='모델 로드 경로', type=str)
parser.add_argument('--save_path', default="result/0722", help='모델 저장 경로')
parser.add_argument('--is_reuse', default=False)
parser.add_argument('--multi_gpu', default=True)
args = parser.parse_args()
# set arguments
l_r = args.l_r
batch_size = args.batch_size
pickle_dir = args.pickle_dir
max_seq = args.max_seq
epochs = args.epochs
is_reuse = args.is_reuse
load_path = args.load_path
save_path = args.save_path
multi_gpu = args.multi_gpu
# load data
dataset = Data('dataset/processed')
print(dataset)
# load model
learning_rate = callback.CustomSchedule(par.embedding_dim) if l_r is None else l_r
opt = Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
# define model
mt = MusicTransformer(
embedding_dim=256,
vocab_size=par.vocab_size,
num_layer=6,
max_seq=max_seq,
dropout=0.2,
debug=False, loader_path=load_path)
mt.compile(optimizer=opt, loss=callback.transformer_dist_train_loss)
# define tensorboard writer
current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
train_log_dir = 'logs/gradient_tape/'+current_time+'/train'
eval_log_dir = 'logs/gradient_tape/'+current_time+'/eval'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
eval_summary_writer = tf.summary.create_file_writer(eval_log_dir)
# Train Start
idx = 0
for e in range(epochs):
mt.reset_metrics()
for b in range(len(dataset.files) // batch_size):
try:
batch_x, batch_y = dataset.seq2seq_batch(batch_size, max_seq)
except:
continue
result_metrics = mt.train_on_batch(batch_x, batch_y)
if b % 100 == 0:
eval_x, eval_y = dataset.seq2seq_batch(batch_size, max_seq, 'eval')
eval_result_metrics, weights = mt.evaluate(eval_x, eval_y)
mt.save(save_path)
with train_summary_writer.as_default():
tf.summary.scalar('loss', result_metrics[0], step=idx)
tf.summary.scalar('accuracy', result_metrics[1], step=idx)
for i, weight in enumerate(weights):
with tf.name_scope("layer_%d" % i):
with tf.name_scope("_w0"):
utils.attention_image_summary(weight[0])
with tf.name_scope("_w1"):
utils.attention_image_summary(weight[1])
with eval_summary_writer.as_default():
tf.summary.scalar('loss', eval_result_metrics[0], step=idx)
tf.summary.scalar('accuracy', eval_result_metrics[1], step=idx)
idx += 1
print('\n====================================================')
print('Epoch/Batch: {}/{}'.format(e, b))
print('Train >>>> Loss: {:6.6}, Accuracy: {}'.format(result_metrics[0], result_metrics[1]))
print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(eval_result_metrics[0], eval_result_metrics[1]))
|
py | 1a43398b8905afc68ba608ab0f675a727275ebfc | # coding: utf-8
import pprint
import re
import six
class ListApisBindedToSignatureKeyV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'sign_id': 'str',
'env_id': 'str',
'api_id': 'str',
'api_name': 'str',
'group_id': 'str',
'offset': 'int',
'limit': 'int'
}
attribute_map = {
'instance_id': 'instance_id',
'sign_id': 'sign_id',
'env_id': 'env_id',
'api_id': 'api_id',
'api_name': 'api_name',
'group_id': 'group_id',
'offset': 'offset',
'limit': 'limit'
}
def __init__(self, instance_id=None, sign_id=None, env_id=None, api_id=None, api_name=None, group_id=None, offset=None, limit=None):
"""ListApisBindedToSignatureKeyV2Request - a model defined in huaweicloud sdk"""
self._instance_id = None
self._sign_id = None
self._env_id = None
self._api_id = None
self._api_name = None
self._group_id = None
self._offset = None
self._limit = None
self.discriminator = None
self.instance_id = instance_id
self.sign_id = sign_id
if env_id is not None:
self.env_id = env_id
if api_id is not None:
self.api_id = api_id
if api_name is not None:
self.api_name = api_name
if group_id is not None:
self.group_id = group_id
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
@property
def instance_id(self):
"""Gets the instance_id of this ListApisBindedToSignatureKeyV2Request.
:return: The instance_id of this ListApisBindedToSignatureKeyV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListApisBindedToSignatureKeyV2Request.
:param instance_id: The instance_id of this ListApisBindedToSignatureKeyV2Request.
:type: str
"""
self._instance_id = instance_id
@property
def sign_id(self):
"""Gets the sign_id of this ListApisBindedToSignatureKeyV2Request.
:return: The sign_id of this ListApisBindedToSignatureKeyV2Request.
:rtype: str
"""
return self._sign_id
@sign_id.setter
def sign_id(self, sign_id):
"""Sets the sign_id of this ListApisBindedToSignatureKeyV2Request.
:param sign_id: The sign_id of this ListApisBindedToSignatureKeyV2Request.
:type: str
"""
self._sign_id = sign_id
@property
def env_id(self):
"""Gets the env_id of this ListApisBindedToSignatureKeyV2Request.
:return: The env_id of this ListApisBindedToSignatureKeyV2Request.
:rtype: str
"""
return self._env_id
@env_id.setter
def env_id(self, env_id):
"""Sets the env_id of this ListApisBindedToSignatureKeyV2Request.
:param env_id: The env_id of this ListApisBindedToSignatureKeyV2Request.
:type: str
"""
self._env_id = env_id
@property
def api_id(self):
"""Gets the api_id of this ListApisBindedToSignatureKeyV2Request.
:return: The api_id of this ListApisBindedToSignatureKeyV2Request.
:rtype: str
"""
return self._api_id
@api_id.setter
def api_id(self, api_id):
"""Sets the api_id of this ListApisBindedToSignatureKeyV2Request.
:param api_id: The api_id of this ListApisBindedToSignatureKeyV2Request.
:type: str
"""
self._api_id = api_id
@property
def api_name(self):
"""Gets the api_name of this ListApisBindedToSignatureKeyV2Request.
:return: The api_name of this ListApisBindedToSignatureKeyV2Request.
:rtype: str
"""
return self._api_name
@api_name.setter
def api_name(self, api_name):
"""Sets the api_name of this ListApisBindedToSignatureKeyV2Request.
:param api_name: The api_name of this ListApisBindedToSignatureKeyV2Request.
:type: str
"""
self._api_name = api_name
@property
def group_id(self):
"""Gets the group_id of this ListApisBindedToSignatureKeyV2Request.
:return: The group_id of this ListApisBindedToSignatureKeyV2Request.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this ListApisBindedToSignatureKeyV2Request.
:param group_id: The group_id of this ListApisBindedToSignatureKeyV2Request.
:type: str
"""
self._group_id = group_id
@property
def offset(self):
"""Gets the offset of this ListApisBindedToSignatureKeyV2Request.
:return: The offset of this ListApisBindedToSignatureKeyV2Request.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListApisBindedToSignatureKeyV2Request.
:param offset: The offset of this ListApisBindedToSignatureKeyV2Request.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListApisBindedToSignatureKeyV2Request.
:return: The limit of this ListApisBindedToSignatureKeyV2Request.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListApisBindedToSignatureKeyV2Request.
:param limit: The limit of this ListApisBindedToSignatureKeyV2Request.
:type: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListApisBindedToSignatureKeyV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a433b17c5ad2b722d7ff2c0b3e68e7f28986014 | import datetime
from discord import utils
from discord.ext import commands
class VocalSalonSystem(commands.Cog):
""" VocalSalonSystem() -> Represent the creation of vocal custom with anyone ! """
def __init__(self,bot):
self.bot = bot
async def create_vocal(self,database,guild,member):
""" create_vocal() -> Create a channel when the member as joined "Crée un salon" """
category = utils.get(guild.categories,id=int(database["category_id"]))
# Create and get the new vocal channel
new_channel = await guild.create_voice_channel(f"{member.name}'s Channel.",bitrate=64000,category=category)
# Log
print(f"[{datetime.datetime.today().date()}] L'utilisateur {member.name} à crée un salon dans {guild.name} !")
# Move the member to the vocal channel created
await new_channel.edit(position=len(category.voice_channels)+1)
await member.move_to(new_channel)
async def delete_vocal(self,before,member):
""" delete_vocal() -> Delete a channel when the member as leave your channel """
# If 0 as in channel
if before.channel is not None:
if len(before.channel.members) == 0:
# Log
print(f"[{datetime.datetime.today().date()}] Le salon de {member.name} à été supprimé dans {member.guild.name} !")
return await before.channel.delete()
@commands.Cog.listener()
async def on_voice_state_update(self,member,before,after):
for database in self.bot.guilds_data[str(member.guild.id)]["channels"]:
if database["function"].count("create_private_vocal") == 1:
if after.channel is not None:
if int(after.channel.id) == int(database["channel_id"]):
return await self.create_vocal(database,member.guild,member)
if after.channel is None:
if int(before.channel.id) != int(database["channel_id"]):
return await self.delete_vocal(before,member)
|
py | 1a433b707661ce926acf1ea4c3ce24cdc5136851 | from django.shortcuts import render, get_object_or_404, redirect
from django.core.paginator import Paginator
from .choices import gender_choices, age_choices, size_choices
from .logic.pets_logic import delete_pet
from .models import Pet
def index(request):
queryset_list = Pet.objects.order_by(
'-list_date').filter(is_published=True)
# Keywords
if 'keywords' in request.GET:
keywords = request.GET['keywords']
if keywords:
queryset_list = queryset_list.filter(
description__icontains=keywords)
# City
if 'city' in request.GET:
city = request.GET['city']
if city:
queryset_list = queryset_list.filter(city__iexact=city)
# Gender
if 'gender' in request.GET:
gender = request.GET['gender']
if gender:
queryset_list = queryset_list.filter(gender__iexact=gender)
# Age
if 'age' in request.GET:
age = request.GET['age']
if age:
queryset_list = queryset_list.filter(age__lte=age)
# Size
if 'size' in request.GET:
size = request.GET['size']
if size:
queryset_list = queryset_list.filter(size__iexact=size)
paginator = Paginator(queryset_list, 6)
page = request.GET.get('page')
paged_pets = paginator.get_page(page)
context = {
'pets': paged_pets,
'gender_choices': gender_choices,
'age_choices': age_choices,
'size_choices': size_choices,
'values': request.GET
}
return render(request, 'pets/pets.html', context)
def pet(request, pet_id):
if request.GET.get('borrarpet_'+str(pet_id)):
delete_pet(pet_id)
return redirect("/")
else:
pet = get_object_or_404(Pet, pk=pet_id)
context = {
'pet': pet
}
return render(request, 'pets/pet.html', context)
|
py | 1a433b83869d5164e1c86104cc716fcaa87db39b | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for exploration-related statistics."""
__author__ = 'Sean Lip'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import stats_jobs
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
import feconf
IMPROVE_TYPE_DEFAULT = 'default'
IMPROVE_TYPE_INCOMPLETE = 'incomplete'
def get_top_unresolved_answers_for_default_rule(exploration_id, state_name):
return {
answer: count for (answer, count) in
stats_domain.StateRuleAnswerLog.get(
exploration_id, state_name, exp_domain.DEFAULT_RULESPEC_STR
).get_top_answers(3)
}
def get_state_rules_stats(exploration_id, state_name):
"""Gets statistics for the answer groups and rules of this state.
Returns:
A dict, keyed by the string '{HANDLER_NAME}.{RULE_STR}', whose
values are the corresponding stats_domain.StateRuleAnswerLog
instances.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
state = exploration.states[state_name]
# TODO(bhenning): Everything is handler name submit; therefore, it is
# pointless and should be removed.
_OLD_SUBMIT_HANDLER_NAME = 'submit'
rule_keys = []
for group in state.interaction.answer_groups:
for rule in group.rule_specs:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, rule.stringify_classified_rule()))
if state.interaction.default_outcome:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, exp_domain.DEFAULT_RULESPEC_STR))
answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': rule_key[1]
} for rule_key in rule_keys])
results = {}
for ind, answer_log in enumerate(answer_logs):
results['.'.join(rule_keys[ind])] = {
'answers': answer_log.get_top_answers(5),
'rule_hits': answer_log.total_answer_count
}
return results
def get_state_improvements(exploration_id, exploration_version):
"""Returns a list of dicts, each representing a suggestion for improvement
to a particular state.
"""
ranked_states = []
exploration = exp_services.get_exploration_by_id(exploration_id)
state_names = exploration.states.keys()
default_rule_answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': exp_domain.DEFAULT_RULESPEC_STR
} for state_name in state_names])
statistics = stats_jobs.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
state_hit_counts = statistics['state_hit_counts']
for ind, state_name in enumerate(state_names):
total_entry_count = 0
no_answer_submitted_count = 0
if state_name in state_hit_counts:
total_entry_count = (
state_hit_counts[state_name]['total_entry_count'])
no_answer_submitted_count = state_hit_counts[state_name].get(
'no_answer_count', 0)
if total_entry_count == 0:
continue
threshold = 0.2 * total_entry_count
default_rule_answer_log = default_rule_answer_logs[ind]
default_count = default_rule_answer_log.total_answer_count
eligible_flags = []
state = exploration.states[state_name]
if (default_count > threshold and
state.interaction.default_outcome is not None and
state.interaction.default_outcome.dest == state_name):
eligible_flags.append({
'rank': default_count,
'improve_type': IMPROVE_TYPE_DEFAULT})
if no_answer_submitted_count > threshold:
eligible_flags.append({
'rank': no_answer_submitted_count,
'improve_type': IMPROVE_TYPE_INCOMPLETE})
if eligible_flags:
eligible_flags = sorted(
eligible_flags, key=lambda flag: flag['rank'], reverse=True)
ranked_states.append({
'rank': eligible_flags[0]['rank'],
'state_name': state_name,
'type': eligible_flags[0]['improve_type'],
})
return sorted(
[state for state in ranked_states if state['rank'] != 0],
key=lambda x: -x['rank'])
def get_versions_for_exploration_stats(exploration_id):
"""Returns list of versions for this exploration."""
return stats_models.ExplorationAnnotationsModel.get_versions(
exploration_id)
def get_exploration_stats(exploration_id, exploration_version):
"""Returns a dict with state statistics for the given exploration id.
Note that exploration_version should be a string.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
exp_stats = stats_jobs.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
last_updated = exp_stats['last_updated']
state_hit_counts = exp_stats['state_hit_counts']
return {
'improvements': get_state_improvements(
exploration_id, exploration_version),
'last_updated': last_updated,
'num_completions': exp_stats['complete_exploration_count'],
'num_starts': exp_stats['start_exploration_count'],
'state_stats': {
state_name: {
'name': state_name,
'firstEntryCount': (
state_hit_counts[state_name]['first_entry_count']
if state_name in state_hit_counts else 0),
'totalEntryCount': (
state_hit_counts[state_name]['total_entry_count']
if state_name in state_hit_counts else 0),
} for state_name in exploration.states
},
}
|
py | 1a433b88809da6b95cbafb6574d53a7c001a9942 | import json
import uuid
from pathlib import Path
def load_json_template(name):
script_dir = Path(__file__).parent
json_path = (script_dir / "json" / f"{name}.json").resolve()
with open(json_path, 'r') as json_file:
template = json_file.read()
return json.loads(template)
def create_minio_connection(address, key, secret_key):
obj = load_json_template("minio_connection")
obj["url"] = address
obj["key"] = key
obj["secretKey"] = secret_key
return obj
def create_minio_input(object_name, bucket_name, minio_connection):
obj = load_json_template("minio_input")
obj["objectName"] = object_name
obj["bucketName"] = bucket_name
obj["minIOConnection"] = minio_connection
return obj
def create_binder_execution(input_list, memory=1000):
obj = load_json_template("binder_execution")
obj["executionIdentifier"] = str(uuid.uuid4())
for requirement in obj["requirements"]:
if requirement["type"] == "ConfigurationRequirementRelationalInput":
requirement["settings"] = input_list
obj["memory"] = str(memory)
return obj
def convert_minio_input_to_execution(obj_minio_input):
exec_minio_input = load_json_template("execution_minio_input")
exec_minio_connection = load_json_template("execution_minio_connection")
obj_minio_connection = obj_minio_input["minIOConnection"]
exec_minio_connection["url"] = obj_minio_connection["url"]
exec_minio_connection["key"] = obj_minio_connection["key"]
exec_minio_connection["secretKey"] = obj_minio_connection["secretKey"]
exec_minio_input["object"] = obj_minio_input["objectName"]
exec_minio_input["bucket"] = obj_minio_input["bucketName"]
exec_minio_input["minIOConnection"] = exec_minio_connection
return exec_minio_input |
py | 1a433b9a28fa01c2e81833df084b4c3ae7304f66 | import re
import sys
from ..specfile.helpers import detect_specfile, get_source_urls, detect_github_tag_prefix, get_current_version, get_url
from urllib.parse import urlparse
from typing import Optional
import requests
RE_GITHUB_PATH_REPO = re.compile('^/([^/]+/[^/]+)/?')
RE_GIT_COMMIT = re.compile('^[a-f0-9]{40}$')
def detect_previous_version(changes):
for line in changes:
previous_version_match = re.match('^- +(?:version )?update(?: to)?(?: version)? ([0-9.]+)', line, re.IGNORECASE)
if previous_version_match:
previous_version = previous_version_match[1]
break
else:
sys.exit("Could not determine the last mentioned version from the changes file.")
return previous_version
def get_changelog_from_github(previous_version: str, current_version: Optional[str] = None) -> dict:
"""
First, get the GitHub URL by interpreting the Source tags and the URL tag.
Then, detect the tag-prefix.
At the end, download the diff.
"""
specfilename = detect_specfile()
if not current_version:
current_version = get_current_version(specfilename=specfilename)
urls = get_source_urls(specfilename=specfilename)
for url in urls:
parsed = urlparse(url)
if parsed.hostname == 'github.com' and 'archive' in parsed.path:
repo_path = RE_GITHUB_PATH_REPO.match(parsed.path).group(1)
tag_prefix = detect_github_tag_prefix(specfilename=specfilename)
break
else:
url = get_url(specfilename=specfilename)
parsed = urlparse(url)
if parsed.hostname == 'github.com':
repo_path = RE_GITHUB_PATH_REPO.match(parsed.path).group(1)
tags = requests.get(f'https://api.github.com/repos/{repo_path}/tags')
tags.raise_for_status()
if tags.json()[0]['name'].startswith('v'):
tag_prefix = 'v'
else:
tag_prefix = ''
else:
sys.exit('Also found not Source URL or URL for GitHub.')
if not RE_GIT_COMMIT.match(current_version):
current_version = tag_prefix + current_version
url = f'https://api.github.com/repos/{repo_path}/compare/{tag_prefix}{previous_version}...{current_version}'
print(f'Downloading from: {url}', file=sys.stderr)
compare = requests.get(url)
compare.raise_for_status()
return compare.json()
|
py | 1a433bef64abecde07e23271a447f3ea63dd03f4 | # Created by Kelvin_Clark on 2/1/2022, 1:43 PM
from typing import List, Optional
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from starlette.status import HTTP_201_CREATED
from app.api.dependencies.oauth import get_admin_system_user, get_current_user
from app.data import get_sync_session, get_async_session
from app.data.schema.pydantic.project import ProjectOut, ProjectIn, ProjectSummary, ProjectStat
from app.data.schema.pydantic.user import UserOut
from app.data.usecases.getters.get_project import (get_latest_projects, get_projects_summary, get_projects,
get_project_stats)
from app.data.usecases.insert.insert_project import insert_project
router = APIRouter(prefix="/project", tags=["Projects"])
@router.post("/add", response_model=ProjectOut, status_code=HTTP_201_CREATED)
async def create_project(project: ProjectIn, session: Session = Depends(get_sync_session),
_: UserOut = Depends(get_admin_system_user)):
project = insert_project(session=session, project=project)
return project
@router.get("/", response_model=List[ProjectOut])
async def get_projects__(page_number: Optional[int] = 0, session: AsyncSession = Depends(get_async_session),
_: UserOut = Depends(get_current_user)):
projects = await get_projects(session=session, page_number=page_number)
return projects
@router.get("/latest", response_model=List[ProjectOut])
async def _get_latest_projects(count: Optional[int] = 5, _: UserOut = Depends(get_current_user),
session: AsyncSession = Depends(get_async_session)):
projects = await get_latest_projects(session=session, count=count)
return projects
@router.get("/stats", response_model=ProjectStat)
async def _get_project_stat(project_id: int, session: AsyncSession = Depends(get_async_session),
_: UserOut = Depends(get_current_user)):
project_stat = await get_project_stats(session=session, project_id=project_id)
return project_stat
@router.get("/summary", response_model=ProjectSummary)
async def _get_project_summary(_: UserOut = Depends(get_current_user),
session: AsyncSession = Depends(get_async_session)):
summary = await get_projects_summary(session=session)
return summary
|
py | 1a433c2509f8ab387689a6f1e06adece3f9d0142 | class Solution:
"""
@param A : a list of integers
@param target : an integer to be inserted
@return : an integer
"""
def searchInsert(self, A, target):
if not A:
return 0
lo, hi = 0, len(A)-1
while lo <= hi:
mid = lo + (hi-lo)//2
val = A[mid]
if val == target:
return mid
elif val < target:
lo = mid + 1
else:
hi = mid - 1
return lo |
py | 1a433dca6047611dca2a32251b66533ced83a015 | import os
import bpy
from .pbr_utils import PbrSettings
from . import pman
from . import operators
class PandaButtonsPanel:
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
COMPAT_ENGINES = {'PANDA'}
@classmethod
def poll(cls, context):
return context.scene.render.engine in cls.COMPAT_ENGINES
class PandaRender_PT_project(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Project Settings"
bl_context = "render"
def draw_with_config(self, context, _config):
layout = self.layout
project_settings = context.scene.panda_project
layout.prop(project_settings, 'project_name')
layout.prop(project_settings, 'renderer')
layout.prop(project_settings, 'pbr_materials')
layout.prop(project_settings, 'python_binary')
layout.operator(operators.UpdateProject.bl_idname)
def draw_no_config(self, _context):
layout = self.layout
layout.label(text="No config file detected")
def draw(self, context):
confdir = os.path.dirname(bpy.data.filepath) if bpy.data.filepath else None
if pman.config_exists(confdir):
self.draw_with_config(context, pman.get_config(confdir))
else:
self.draw_no_config(context)
layout = self.layout
layout.operator(operators.CreateProject.bl_idname)
layout.operator(operators.SwitchProject.bl_idname)
class PandaRender_PT_build(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Build Settings"
bl_context = "render"
@classmethod
def poll(cls, context):
confdir = os.path.dirname(bpy.data.filepath) if bpy.data.filepath else None
return PandaButtonsPanel.poll(context) and pman.config_exists(confdir)
def draw(self, context):
layout = self.layout
project_settings = context.scene.panda_project
layout.prop(project_settings, 'asset_dir')
layout.prop(project_settings, 'export_dir')
layout.operator(operators.BuildProject.bl_idname)
class PandaRender_PT_run(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Run Settings"
bl_context = "render"
@classmethod
def poll(cls, context):
confdir = os.path.dirname(bpy.data.filepath) if bpy.data.filepath else None
return PandaButtonsPanel.poll(context) and pman.config_exists(confdir)
def draw(self, context):
layout = self.layout
project_settings = context.scene.panda_project
layout.prop(project_settings, 'auto_save')
layout.prop(project_settings, 'auto_build')
layout.operator(operators.RunProject.bl_idname)
class Panda_PT_context_material(PandaButtonsPanel, bpy.types.Panel):
bl_label = ""
bl_context = "material"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
return (context.material or context.object) and PandaButtonsPanel.poll(context)
def draw(self, context):
layout = self.layout
mat = context.material
ob = context.object
slot = context.material_slot
space = context.space_data
is_sortable = len(ob.material_slots) > 1
if ob:
rows = 1
if is_sortable:
rows = 4
row = layout.row()
row.template_list("MATERIAL_UL_matslots", "", ob, "material_slots", ob, "active_material_index", rows=rows)
col = row.column(align=True)
col.operator("object.material_slot_add", icon='ZOOMIN', text="")
col.operator("object.material_slot_remove", icon='ZOOMOUT', text="")
col.menu("MATERIAL_MT_specials", icon='DOWNARROW_HLT', text="")
if is_sortable:
col.separator()
col.operator("object.material_slot_move", icon='TRIA_UP', text="").direction = 'UP'
col.operator("object.material_slot_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
if ob.mode == 'EDIT':
row = layout.row(align=True)
row.operator("object.material_slot_assign", text="Assign")
row.operator("object.material_slot_select", text="Select")
row.operator("object.material_slot_deselect", text="Deselect")
split = layout.split(percentage=0.65)
if ob:
split.template_ID(ob, "active_material", new="material.new")
row = split.row()
if slot:
row.prop(slot, "link", text="")
else:
row.label()
elif mat:
split.template_ID(space, "pin_id")
split.separator()
class PandaMaterial_PT_basic(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Basic Material"
bl_context = "material"
@classmethod
def poll(cls, context):
return context.material and PandaButtonsPanel.poll(context)
def draw(self, context):
layout = self.layout
mat = context.material
layout.label(text="Diffuse:")
split = layout.split()
col = split.column()
col.prop(mat, "diffuse_color", text="")
col = split.column()
col.prop(mat, "diffuse_intensity", text="Intensity")
layout.label(text="Specular:")
split = layout.split()
col = split.column()
col.prop(mat, "specular_color", text="")
col = split.column()
col.prop(mat, "specular_intensity", text="Intensity")
layout.prop(mat, "specular_hardness")
layout.prop(mat, "emit", text="Emit")
layout.prop(mat, "ambient", text="Ambient")
class PandaCamera_PT_lens(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Lens"
bl_context = "data"
@classmethod
def poll(cls, context):
return context.camera and PandaButtonsPanel.poll(context)
def draw(self, context):
layout = self.layout
camera = context.camera
layout.prop(camera, "type", text="")
if camera.type == "PERSP":
split = layout.split()
col = split.column()
col.prop(camera, "lens")
col = split.column()
col.prop(camera, "lens_unit", text="")
elif camera.type == "ORTHO":
layout.prop(camera, "ortho_scale")
else:
layout.label("Not supported")
class PandaPhysics_PT_add(PandaButtonsPanel, bpy.types.Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
bl_context = "physics"
@classmethod
def poll(cls, context):
return PandaButtonsPanel.poll(context) and context.object
def draw(self, context):
layout = self.layout
obj = context.object
if obj.rigid_body:
layout.operator('rigidbody.object_remove', text="Remove Rigid Body Physics")
else:
layout.operator('rigidbody.object_add', text="Add Rigid Body Physics")
def get_panels():
panels = [
"DATA_PT_camera_display",
"DATA_PT_camera_safe_areas",
"DATA_PT_context_lamp",
"DATA_PT_lamp",
"DATA_PT_context_mesh",
"DATA_PT_normals",
"DATA_PT_texture_space",
"DATA_PT_vertex_groups",
"DATA_PT_shape_keys",
"DATA_PT_uv_texture",
"DATA_PT_vertex_colors",
"DATA_PT_customdata",
"WORLD_PT_preview",
"WORLD_PT_world",
"TEXTURE_PT_context_texture",
"TEXTURE_PT_preview",
"TEXTURE_PT_colors",
"TEXTURE_PT_image",
"TEXTURE_PT_image_sampling",
"TEXTURE_PT_image_mapping",
"TEXTURE_PT_mapping",
"TEXTURE_PT_influence",
"PHYSICS_PT_rigid_body",
"PHYSICS_PT_rigid_body_collisions",
]
return [getattr(bpy.types, p) for p in panels if hasattr(bpy.types, p)]
def register():
for panel in get_panels():
panel.COMPAT_ENGINES.add('PANDA')
if not hasattr(bpy.types.Material, 'pbr_export_settings'):
bpy.types.Material.pbr_export_settings = bpy.props.PointerProperty(type=PbrSettings)
def unregister():
for panel in get_panels():
if 'PANDA' in panel.COMPAT_ENGINES:
panel.COMPAT_ENGINES.remove('PANDA')
if hasattr(bpy.types.Material, 'pbr_export_settings'):
del bpy.types.Material.pbr_export_settings
|
py | 1a433e8c1877f5a37d7775f7537a76c657bccaf6 | # -*- coding: utf-8 -*-
info = {
"%spellout-cardinal": {
"0": "သုည;",
"1": "တစ်;",
"2": "နှစ်;",
"3": "သုံး;",
"4": "လေး;",
"5": "ငါး;",
"6": "ခြောက်;",
"7": "ခုနှစ်;",
"8": "ရှစ်;",
"9": "ကိုး;",
"10": "ဆယ်;",
"(11, 19)": "ဆယ့်[>>];",
"(20, 99)": "<<ဆယ်[>>];",
"100": "<<ရာ;",
"(101, 999)": "<<ရာ့[>>];",
"1000": "<<ထောင်;",
"(1001, 9999)": "<<ထောင့်[>>];",
"(10000, 99999)": "<<သောင်း[>>];",
"(100000, 999999)": "<<သိန်း[>>];",
"(1000000, 9999999)": "<<သန်း[>>];",
"(10000000, 99999999999999)": "<<ကုဋေ[>>];",
"(100000000000000, 999999999999999999)": "<<ကောဋိ[>>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-numbering": {
"(0, 'inf')": "=%spellout-cardinal=;"
},
"%spellout-numbering-year": {
"(0, 'inf')": "=%spellout-cardinal=;"
},
"%spellout-ordinal": {
"0": "=%spellout-cardinal=;",
"1": "ပထမ;",
"2": "ဒုတိယ;",
"3": "တတိယ;",
"4": "စတုတ္ထ;",
"5": "ပဉ္စမ;",
"6": "ဆဋ္ဌမ;",
"7": "သတ္တမ;",
"8": "အဋ္ဌမ;",
"9": "နဝမ;",
"10": "ဒသမ;",
"(11, 'inf')": "=%spellout-cardinal=;"
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.