filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_6961 | from django.contrib import admin
from django.utils.html import format_html
from .models import File
@admin.register(File)
class FileAdmin(admin.ModelAdmin):
view_on_site = False
raw_id_fields = ('version',)
list_display = ('__str__', 'addon_slug', 'addon_guid')
search_fields = (
'^version__addon__guid',
'^version__addon__slug',
)
list_select_related = ('version__addon',)
readonly_fields = (
'id',
'created',
'file_download_url',
)
fieldsets = (
(
None,
{
'fields': (
'id',
'created',
'version',
'filename',
'size',
'hash',
'original_hash',
'status',
'file_download_url',
'manifest_version',
)
},
),
(
'Details',
{
'fields': ('cert_serial_num', 'original_status'),
},
),
(
'Flags',
{
'fields': (
'strict_compatibility',
'is_signed',
'is_experiment',
'is_mozilla_signed_extension',
)
},
),
)
def addon_slug(self, instance):
return instance.addon.slug
def addon_guid(self, instance):
return instance.addon.guid
def file_download_url(self, instance):
return format_html(
'<a href="{}">Download file</a>', instance.get_absolute_url(attachment=True)
)
file_download_url.short_description = 'Download this file'
file_download_url.allow_tags = True
|
the-stack_0_6963 | from django.db import models
# Create your models here.
class HumenManage(models.Manager):
def create_girl(self,name):
res = Humen.objects.create(
name = name,
age = 18,
money = 1
)
return res
class Humen(models.Model):
name = models.CharField(
max_length=30,
unique=True
)
age = models.IntegerField(
default=1
)
money = models.IntegerField(
default=0
)
#定义objects属性
# my_objects = models.Manager()
objects = models.Manager()
#实例化HumenManage对象
new_objects = HumenManage() |
the-stack_0_6965 | #!/usr/bin/env python3
#coding:utf8
from sanic.log import logger
# 全局debug开关 如果通过sanic命令行开启会自动忽略该字段
# 通过命令行运行
# https://sanic.readthedocs.io/en/latest/sanic/deploying.html
# python3 -m sanic app.app --host=0.0.0.0 --port=9000 --worker=1 --debug
DEBUG_MODE=True
def create_app():
global DEBUG_MODE
import os
import logging
from sanic import Sanic
from sanic.response import json
from sanic.response import text
from api import blueprint as api
from api import views as api_views_base
app = Sanic()
app.blueprint(api)
if __name__ == "__main__":
loglevel = logging.DEBUG if DEBUG_MODE else logging.INFO
logger.setLevel(loglevel)
logger.debug("\tSTARTING DUMMY DEBUG MSG")
logger.info("\tSTARTING DUMMY INFO MSG")
logger.warn("\tSTARTING DUMMY WARN MSG")
logger.error("\tSTARTING DUMMY ERROR MSG")
logger.critical("STARTING DUMMY CRITICAL MSG")
for vclass in api_views_base:
v = vclass.as_view()
url = "/api/%s"%vclass.__url__.lstrip("/")
app.add_route(v, url)
for handler, (rule, router) in app.router.routes_names.items():
logger.info("Route: %s methods: %s name: %s"%(rule, '/'.join(list(router.methods)), router.name))
from base import request_middlewares, response_middlewares
logger.info("Resigtering request middlewares")
for ware in request_middlewares:
app.register_middleware(ware, attach_to="request")
logger.info("Resigtering response middlewares")
for ware in response_middlewares:
app.register_middleware(ware, attach_to="response")
logger.info("Register static dir to %s"%(os.path.realpath("./static")))
app.static('/static', './static')
return app
app = create_app()
if __name__ == "__main__":
app.run(debug=DEBUG_MODE, host="0.0.0.0", port=9000, access_log=True)
|
the-stack_0_6967 | import torch
from .Criterion import Criterion
from .utils import clear
class L1Cost(Criterion):
def __init__(self):
super(L1Cost, self).__init__()
self.output_tensor = torch.Tensor(1)
def updateOutput(self, input, target=None):
assert target is None
if self.output_tensor is None:
self.output_tensor = input.new(1)
self._backend.L1Cost_updateOutput(
self._backend.library_state,
input,
self.output_tensor
)
self.output = self.output_tensor[0]
return self.output
def updateGradInput(self, input, target=None):
assert target is None
self._backend.L1Cost_updateGradInput(
self._backend.library_state,
input,
None,
self.gradInput
)
return self.gradInput
def clearState(self):
clear(self, 'output_tensor')
return super(L1Cost, self).clearState()
|
the-stack_0_6968 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=18
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.rx(0.666017642561036).on(input_qubit[2])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=13
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy455.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_0_6969 | # matrix_determinant 练习测试,作用,返回方阵的行列式 .行列式的概念可参考:https://www.jianshu.com/p/0fd8ac349b5e
import tensorflow as tf
import numpy as np
# 方阵
data = np.mat([[11.1,12.1],
[21.1,22.1]]);
with tf.Session() as sess:
z = tf.matrix_determinant(data);
print(sess.run(z));
|
the-stack_0_6970 | import json
source = "Île-de-France Mobilités 04/2019"
no_dataset_id = True
query = [('park_ride', 'yes')]
master_tags = ('amenity',)
max_distance = 800
max_request_boxes = 3
overpass_timeout = 550
def dataset(fileobj):
import codecs
source = json.load(codecs.getreader('utf-8-sig')(fileobj))
#source = json.load(fileobj)
data = []
for el in source:
lat = float(el['geometry']['coordinates'][1])
lon = float(el['geometry']['coordinates'][0])
tags = {
'amenity': 'parking',
'park_ride': 'yes',
'capacity': el['fields']['nb_pl_pr'],
'official_name': el['fields']['nom_pr']
}
data.append(SourcePoint(el['recordid'], lat, lon, tags))
return data
# Example line of the source JSON:
# {
# "datasetid": "parcs-relais-idf",
# "recordid": "fe9680496370980cb966e3bca09793b443915fd8",
# "fields": {
# "www": "www.saint-quentin-en-yvelines.fr",
# "nb_pl_elec": 0.0,
# "nb_pl_pr": 219.0,
# "moa_pr": "CASQY",
# "nom_lda": "Saint-Quentin-en-Yvelines (Gare)",
# "nom_comm": "Montigny-le-Bretonneux",
# "nb_pl_2rm": 0.0,
# "mes_date": "2014-03-24T01:00:00+01:00",
# "mes_annee": 2014.0,
# "nom_gare": "SAINT-QUENTIN-EN-YVELINES (SNCF)",
# "nb_pl_cov": 0.0,
# "label_pr": 1.0,
# "gestion_pr": "Q-Park",
# "nom_pr": "Jol Le Theule",
# "struct_pr": "ouvrage",
# "nom_zdl": "Saint-Quentin-en-Yvelines (Avenue des Prs)",
# "id_ref_lda": 63812.0,
# "id_pr": 35.0,
# "geo_shape": {
# "type": "MultiPoint",
# "coordinates": [
# [2.0439044, 48.78620339998614, 0.0]
# ]
# },
# "id_ref_zdl": 43249.0,
# "nb_pl_pmr": 6.0,
# "adres_pr": "10 Rue Jol le Theule, 78180 Montigny-le-Bretonneux",
# "geo_point_2d": [48.78620339998614, 2.0439044],
# "nb_pl_v": 0.0,
# "insee_t": "78423"
# },
# "geometry": {
# "type": "Point",
# "coordinates": [2.0439044, 48.78620339998614]
# },
# "record_timestamp": "2019-02-19T16:15:48+01:00"
# }
|
the-stack_0_6971 | # cifar10_svm.py
# Support Vector Machine (SVM)
import time
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import model_selection
from scipy.io import loadmat
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import hinge_loss
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
def run_svc(svc, title):
# Fit model
start = time.time()
svc.fit(x_train, y_train)
end = time.time()
print("\nModel took %0.2f seconds to train"%(end - start))
# Calculate predictions
start = time.time()
predicted = svc.predict(x_test)
end = time.time()
print("Model took %0.2f seconds to calculate predictions"%(end - start))
# Output results
print('\naccuracy', accuracy_score(y_test, predicted))
print('\nSVM Results for ' + title)
print('\nConfusion Matrix:')
print(confusion_matrix(y_test, predicted))
print('\nClassification Report:', classification_report(y_test, predicted))
#print("Hinge loss", hinge_loss(y_test, predicted))
# Load datasets from file
npzfile = np.load('cifar10.npz')
print(npzfile.files)
x_train = npzfile['x_train']
x_test = npzfile['x_test']
y_train = npzfile['y_train']
y_test = npzfile['y_test']
# Standardize the columns
x_train = x_train / 255
x_test = x_test / 255
# The model cannot deal with 2D array so we have to convert to 1D array.
x_train_flat = np.empty(shape=[x_train.shape[0]] + [3072], dtype='float32')
for i in range(x_train.shape[0]):
x_train_flat[i,:] = x_train[i,:,:].flatten()
# Flatten x_test array
x_test_flat = np.empty(shape=[x_test.shape[0]] + [3072], dtype='float32')
for i in range(x_test.shape[0]):
x_test_flat[i,:] = x_test[i,:,:].flatten()
x_train = x_train_flat
x_test = x_test_flat
y_train = y_train.ravel()
y_test = y_test.ravel()
print('\n', type(x_train))
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
# Linear
svc = SVC(kernel='linear', C=1)
run_svc(svc, 'Linear')
# Radial Basis Function (RBF)
svc = SVC(kernel='rbf', gamma=1, C=1)
run_svc(svc, 'Radial Basis Function (RBF)')
# Polynomial
svc = SVC(kernel='poly', degree=5, C=1)
run_svc(svc, 'Polynomial)')
|
the-stack_0_6972 | from infi.clickhouse_orm import migrations
from ee.clickhouse.sql.session_recording_events import SESSION_RECORDING_EVENTS_MATERIALIZED_COLUMN_COMMENTS_SQL
from posthog.client import sync_execute
from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_REPLICATION
def create_has_full_snapshot_materialized_column(database):
if CLICKHOUSE_REPLICATION:
sync_execute(
f"""
ALTER TABLE sharded_session_recording_events
ON CLUSTER '{CLICKHOUSE_CLUSTER}'
ADD COLUMN IF NOT EXISTS
has_full_snapshot Int8 MATERIALIZED JSONExtractBool(snapshot_data, 'has_full_snapshot')
"""
)
sync_execute(
f"""
ALTER TABLE session_recording_events
ON CLUSTER '{CLICKHOUSE_CLUSTER}'
ADD COLUMN IF NOT EXISTS
has_full_snapshot Int8
"""
)
else:
sync_execute(
f"""
ALTER TABLE session_recording_events
ON CLUSTER '{CLICKHOUSE_CLUSTER}'
ADD COLUMN IF NOT EXISTS
has_full_snapshot Int8 MATERIALIZED JSONExtractBool(snapshot_data, 'has_full_snapshot')
"""
)
sync_execute(SESSION_RECORDING_EVENTS_MATERIALIZED_COLUMN_COMMENTS_SQL())
operations = [migrations.RunPython(create_has_full_snapshot_materialized_column)]
|
the-stack_0_6975 | # standart modules
import threading
import struct
import os
# blender modules
import bpy
import bmesh
# addon modules
import taichi as ti
import numpy as np
from .engine import mpm_solver
from . import types
from . import particles_io
from . import nodes
WARN_SIM_NODE = 'Node tree must not contain more than 1 "Simulation" node.'
WARN_NOT_SIM_NODE = 'Node tree does not have "Simulation" node.'
mpm_solver.USE_IN_BLENDER = True
IMPORT_NODES = (
'elements_particles_mesh_node',
'elements_particles_system_node'
)
# sim_node - simulation node
def get_cache_folder(operator, sim_node):
# particles socket
par_s = sim_node.outputs['Simulation Data']
cache_nodes = []
has_cache_node = False
if par_s.is_linked:
for link in par_s.links:
# disk cache node
disk = link.to_node
if disk.bl_idname == nodes.ElementsCacheNode.bl_idname:
cache_nodes.append(disk)
if not len(cache_nodes):
operator.is_finishing = True
operator.report(
{'WARNING'},
'Node tree does not have "Cache" node.'
)
return None, has_cache_node
elif len(cache_nodes) > 1:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Node tree must not contain more than 1 "Cache" node.'
)
return None, has_cache_node
else:
cache_node = cache_nodes[0]
has_cache_node = True
folder_raw = cache_node.inputs['Folder'].get_value()[0]
folder = bpy.path.abspath(folder_raw)
return folder, has_cache_node
# get simulation nodes tree object
def get_tree_obj(node_tree):
# simulation nodes tree object
tree = types.Tree()
for node in node_tree.nodes:
if node.bl_idname == 'elements_simulation_node':
tree.sim_nds[node.name] = node
elif node.bl_idname in IMPORT_NODES:
if node.bl_idname == 'elements_particles_system_node':
import_type = 'PAR_SYS'
elif node.bl_idname == 'elements_particles_mesh_node':
import_type = 'PAR_MESH'
node.get_class()
tree.imp_nds[node.name] = node, import_type
elif node.bl_idname == 'elements_cache_node':
tree.cache_nds[node.name] = node
return tree
def create_emitter(operator, solv, emitter, vel):
# source object
src_obj = emitter.source_object
if not src_obj:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter not have source object.'
)
return
obj_name = src_obj.obj_name
obj = bpy.data.objects.get(obj_name)
if not obj:
operator.is_finishing = True
if not obj_name:
operator.report(
{'WARNING'},
'Emmiter source object not specified.'
)
else:
operator.report(
{'WARNING'},
'Cannot find emmiter source object: "{}".'.format(obj_name)
)
return
if obj.type != 'MESH':
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter source object is not mesh: "{}".'.format(obj.name)
)
return
if not emitter.material:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter not have material.'
)
return
if not len(obj.data.polygons):
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter source object not have polygons: "{}"'.format(obj.name)
)
return
b_mesh = bmesh.new()
b_mesh.from_mesh(obj.data)
bmesh.ops.triangulate(b_mesh, faces=b_mesh.faces)
# emitter triangles
tris = []
for face in b_mesh.faces:
# triangle
tri = []
# v - bmesh vertex
for v in face.verts:
# final vertex coordinate
v_co = obj.matrix_world @ v.co
tri.extend(v_co)
tris.append(tri)
b_mesh.clear()
tris = np.array(tris, dtype=np.float32)
# material type
mat = emitter.material.typ
# taichi material
ti_mat = mpm_solver.MPMSolver.materials.get(mat, None)
if ti_mat is None:
assert False, mat
# emitter particles color
red = int(emitter.color[0].r * 255) << 16
green = int(emitter.color[0].g * 255) << 8
blue = int(emitter.color[0].b * 255)
color = red | green | blue
# add emitter
solv.add_mesh(
triangles=tris,
material=ti_mat,
color=color,
velocity=vel,
emmiter_id=operator.emitter_indices[emitter]
)
return True
class ELEMENTS_OT_SimulateParticles(bpy.types.Operator):
bl_idname = "elements.simulate_particles"
bl_label = "Simulate"
device: bpy.props.EnumProperty(
name='Device',
default='cpu',
items=(
('gpu', 'GPU', 'Run on GPU, automatically detect backend'),
('cuda', 'CUDA', 'Run on GPU, with the NVIDIA CUDA backend'),
('opengl', 'OpenGL', 'Run on GPU, with the OpenGL backend'),
('metal', 'Metal', 'Run on GPU, with the Apple Metal backend, if you are on macOS'),
('cpu', 'CPU', 'Run on CPU (default)')
)
)
device_memory_fraction: bpy.props.FloatProperty(
name='Device Memory',
default=50.0,
min=10.0,
max=100.0,
subtype='PERCENTAGE'
)
def __init__(self):
self.timer = None
self.thread = None
self.is_runnig = False
self.is_finishing = False
self.event_type = 'DEFAULT'
def create_emitters(self, frame):
for emitter in self.emitters:
if len(emitter.velocity) == 1:
vel = emitter.velocity[0]
else:
vel = emitter.velocity[frame]
if emitter.typ == 'EMITTER':
if emitter.emit_frame[0] == frame:
correct_emmiter = create_emitter(self, self.solv, emitter, vel)
if not correct_emmiter:
return self.cancel(bpy.context)
elif emitter.typ == 'INFLOW':
if type(emitter.enable) == float:
enable = emitter.enable
else:
if len(emitter.enable) == 1:
index = 0
else:
index = frame
enable = bool(int(round(emitter.enable[index], 0)))
if enable:
correct_emmiter = create_emitter(self, self.solv, emitter, vel)
if not correct_emmiter:
return self.cancel(bpy.context)
return True
def save_particles(self, frame, np_x, np_v, np_color, np_material, np_emitters):
if not os.path.exists(self.cache_folder):
os.makedirs(self.cache_folder)
# file name
fname = 'particles_{0:0>6}'.format(frame)
# particle file path
pars_fpath = os.path.join(self.cache_folder, fname)
# particles data
par_data = {
particles_io.POS: np_x,
particles_io.VEL: np_v,
particles_io.COL: np_color,
particles_io.MAT: np_material,
particles_io.EMT: np_emitters,
}
data = particles_io.write_pars_v1(par_data, pars_fpath, fname)
with open(pars_fpath + '.bin', 'wb') as file:
file.write(data)
write_obj = False
if write_obj:
with open(pars_fpath + '.obj', 'w') as f:
for i in range(pars_cnt):
x = np_x[i]
print(f'v {x[0]} {x[1]} {x[2]}', file=f)
def run_sim(self):
# self.frame_end + 1 - this means include the last frame in the range
for frame in range(self.frame_start, self.frame_end + 1, 1):
if self.event_type == 'ESC':
print('STOP SIMULATION')
self.thread = None
self.is_finishing = True
self.cancel(bpy.context)
return
print('Frame: {}'.format(frame))
is_correct = self.create_emitters(frame)
if not is_correct is True:
return self.cancel(bpy.context)
# generate simulation state at t = 0
# particles
pars = self.solv.particle_info()
np_x = pars['position']
np_v = pars['velocity']
np_material = pars['material']
np_color = pars['color']
np_emitters = pars['emitter_ids']
# and then start time stepping
self.solv.step(1 / self.fps)
print(np_x)
self.save_particles(
frame,
np_x,
np_v,
np_color,
np_material,
np_emitters
)
def init_sim(self):
# simulation nodes
sim = []
for node in self.node_tree.nodes:
if node.bl_idname == 'elements_simulation_node':
sim.append(node)
if not len(sim):
self.report({'WARNING'}, WARN_NOT_SIM_NODE)
self.is_finishing = True
return self.cancel(bpy.context)
elif len(sim) > 1:
self.report({'WARNING'}, WARN_SIM_NODE)
self.is_finishing = True
return self.cancel(bpy.context)
else:
inputs = sim[0].inputs
self.scene.elements_frame_start = inputs['Frame Start'].get_value()[0]
self.scene.elements_frame_end = inputs['Frame End'].get_value()[0]
self.is_runnig = True
self.scene.elements_nodes.clear()
tree = get_tree_obj(self.node_tree)
# simulation nodes count
sim_nodes_cnt = len(tree.sim_nds)
if sim_nodes_cnt != 1:
if sim_nodes_cnt > 1:
self.report({'WARNING'}, WARN_SIM_NODE)
self.is_finishing = True
return
sim = list(tree.sim_nds.values())[0]
if not sim:
return self.cancel(bpy.context)
sim.get_class()
# simulation class
cls, _ = self.scene.elements_nodes[sim.name]
self.cache_folder, has_cache_node = get_cache_folder(self, sim)
if not has_cache_node:
return self.cancel(bpy.context)
if not self.cache_folder and has_cache_node:
self.report({'WARNING'}, 'Cache folder not specified')
self.is_finishing = True
return self.cancel(bpy.context)
self.frame_start = cls.frame_start[0]
self.frame_end = cls.frame_end[0]
self.fps = cls.fps[0]
# TODO: list is not implemented
if not cls.solver:
self.report(
{'WARNING'},
'Node tree does not have "MPM Solver" node.'
)
self.is_finishing = True
return {'FINISHED'}
res = cls.solver.resolution[0]
size = cls.solver.size[0]
ti.reset()
arch = getattr(ti, self.device)
mem = self.device_memory_fraction / 100
ti.init(arch=arch, device_memory_fraction=mem)
print(f"Creating simulation of res {res}, size {size}")
solv = mpm_solver.MPMSolver(
(res, res, res),
size=size,
unbounded=True,
use_emitter_id=True
)
solv.set_gravity(tuple(cls.gravity[0]))
self.emitters = cls.emitters
if not self.emitters:
self.report({'WARNING'}, 'Node tree not have emitters.')
self.is_finishing = True
return self.cancel(bpy.context)
self.emitter_indices = {}
for index, emitter in enumerate(self.emitters):
self.emitter_indices[emitter] = index
if cls.colliders:
for collider in cls.colliders:
direct = collider.direction[0]
if not direct[0] and not direct[1] and not direct[2]:
direct = (0, 0, 1)
frict = collider.friction[0]
if frict < 0:
frict = 0
elif frict > 1:
frict = 1
solv.add_surface_collider(
tuple(collider.position[0]),
tuple(direct),
surface=collider.surface,
friction=frict
)
self.size = size
self.solv = solv
self.run_sim()
def launch_sim(self):
self.thread = threading.Thread(target=self.init_sim, args=())
self.thread.start()
def modal(self, context, event):
if event.type == 'ESC':
self.event_type = 'ESC'
if not self.is_runnig:
self.launch_sim()
if self.is_finishing:
self.cancel(context)
return {'FINISHED'}
return {'PASS_THROUGH'}
def execute(self, context):
self.node_tree = context.space_data.node_tree
self.scene = context.scene
context.window_manager.modal_handler_add(self)
win = context.window
self.timer = context.window_manager.event_timer_add(1.0, window=win)
return {'RUNNING_MODAL'}
def cancel(self, context):
if self.timer:
context.window_manager.event_timer_remove(self.timer)
self.timer = None
self.thread = None
self.is_finishing = True
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
# operators draw function
def op_draw_func(self, context):
if context.space_data.node_tree:
if context.space_data.node_tree.bl_idname == 'elements_node_tree':
self.layout.operator('elements.simulate_particles')
self.layout.operator('elements.stable_render_animation')
class ELEMENTS_OT_StableRenderAnimation(bpy.types.Operator):
bl_idname = 'elements.stable_render_animation'
bl_label = 'Render'
bl_description = 'Stable Render Animation'
@classmethod
def poll(cls, context):
# space data
spc_data = context.space_data
if spc_data.node_tree:
return spc_data.node_tree.bl_idname == 'elements_node_tree'
def execute(self, context):
scn = context.scene
rend = scn.render
rend.image_settings.file_format = 'PNG'
# output folder
out = rend.filepath
for frm in range(scn.frame_start, scn.frame_end + 1):
file_name = '{0:0>4}.png'.format(frm)
file_path = os.path.join(bpy.path.abspath(out), file_name)
if rend.use_overwrite or not os.path.exists(file_path):
print('Render Frame:', frm)
scn.frame_set(frm)
bpy.ops.render.render(animation=False)
for image in bpy.data.images:
if image.type == 'RENDER_RESULT':
image.save_render(file_path, scene=scn)
bpy.data.images.remove(image)
return {'FINISHED'}
operator_classes = [
ELEMENTS_OT_SimulateParticles,
ELEMENTS_OT_StableRenderAnimation
]
def register():
for operator_class in operator_classes:
bpy.utils.register_class(operator_class)
def unregister():
for operator_class in reversed(operator_classes):
bpy.utils.unregister_class(operator_class)
|
the-stack_0_6976 | import sys
import os
print('Welcome User ....')
print('yt : for browsing youtube\n'+
'news : for browsing Google News\n'+
#'run : for running codes on GeeksforGeeks Online IDE\n'+
'shuffle : to shuffle play the songs of your favorite artists\n'+
'play : to shuffle play songs from your playlist\n'+
'artist : to go to your favorite artist menu\n'+
'lyrics : to go to lyrics menu\n'+
'maps : to use Google Maps\n'+
'exit : to exit')
while True:
choice = input('...$ ')
if choice.lower() == 'yt':
os.system("start cmd /C python yt.py")
elif choice.lower() == 'news':
os.system("start cmd /K python driver.py")
elif choice.lower() == 'shuffle':
os.system("start cmd /C python favart.py")
elif choice.lower() == 'play':
os.system("start cmd /C python playlist.py")
elif choice.lower() == 'artist':
os.system("start cmd /C python artist.py")
elif choice.lower() == 'exit':
sys.exit('USER CHOSE TO QUIT')
elif choice.lower() == 'lyrics':
os.system("start cmd /K python lyrics.py")
elif choice.lower() == 'maps':
os.system("start cmd /C python maps.py") |
the-stack_0_6979 | """Testing Deep Learning with Graph Neural Networks."""
import logging
import logging.config
import os
import sys
import matplotlib.pyplot as plt # this is for making the graph
import networkx as nx
import numpy as np
# import pandas as pd
import pygraphviz as pgv # sudo apt install libgraphviz-dev
from gnn.lib.common import CommonHelpers
from gnn.lib.data import DataHelpers, DataObject
from networkx.drawing.nx_agraph import graphviz_layout, write_dot
logging.config.fileConfig(
"logging.conf",
defaults={"logfilename": "training.log"},
disable_existing_loggers=True, # this will prevent modules from writing to our logger
)
logger = logging.getLogger("train")
def main():
"""Testing Deep Learning with Graph Neural Networks."""
data_helper = DataHelpers()
data_obj = DataObject()
common_helper = CommonHelpers()
# load the data files in from datastore
workdir = os.getcwd() + "/dataset/"
logger.info("Using workdir: {}".format(workdir))
created = common_helper.make_directory(
workdir
) # create the working directory if needed
bucket_name = "backend-datastore"
prefix = "test1/" # testing with a top level folder in storage bucket
# common_helper.download_to_local(workdir, bucket_name, prefix) # Make a flag for pulling remote data
data_helper.gather_dotfiles(workdir)
if data_helper.dot_files is None:
logger.info("No data files found.")
sys.exit(1)
for dot in data_helper.dot_files:
logger.info("Processing dot file: {}".format(dot))
this_uuid = dot.split(".")
data_obj = DataObject()
data_obj.my_uuid = this_uuid[0]
##############
# pygraphviz #
##############
gv = data_helper.create_graph(
workdir, dot
) # write the terraform digraph to a dot file
############
# Networkx #
############
options = {"edgecolors": "tab:gray", "node_size": 800, "alpha": 0.9}
G = nx.DiGraph(
gv, name=data_obj.my_uuid, node_color="tab:red", **options
) # Networkx can accept the pygraphviz dot format
nodelist = list(G.nodes(data=True))
# print(nodelist)
print(
"+++++ Sorted nodelist +++++\n", sorted(d for n, d in G.degree())
) # sorted list
# logger.debug(nx.clustering(G)) # cluster list
data_obj.node_count = G.number_of_nodes()
logger.debug("Node count: {}".format(data_obj.node_count))
data_obj.edge_count = G.number_of_edges()
logger.debug("Edge count: {}".format(data_obj.edge_count))
data_obj.density = G.number_of_edges() / (
G.number_of_nodes() * (G.number_of_nodes() - 1)
)
logger.debug(
"Graph density: {}".format(data_obj.density)
) # d (0 ≤ d ≤ 1 ) tells how close a graph is to being "complete"
# diameter D is the largest distance between any two nodes in the graph
data_helper.data_obj_update(
workdir, data_obj
) # update the data file for this graph
##########################################
# convert nx digraph to pandas dataframe #
##########################################
# df = nx.to_pandas_dataframe(DG)
# df = pd.DataFrame.from_dict(dict(G.nodes(data=True)), orient="index")
# print("+++++ Pandas Dataframe Values +++++\n", df.values)
# move this to the draw function
# plt.savefig(workdir + data_obj.my_uuid + ".plt.png")
# plt.show() # use this in Jupyter
####################
# Adjacency Matrix #
####################
A = nx.adjacency_matrix(G) # requires scipy module
# print(am)
# print(A.todense())
# A.setdiag(A.diagonal() * 2)
print("+++++ Adjacency Matrix ++++\n", A)
print("+++++ Dense Adj Matrix +++++\n", A.todense())
####################
# Incidence Matrix #
####################
I = nx.incidence_matrix(G)
print("+++++ Incidence Matrix +++++\n", I)
print("+++++ Dense Incidence Matrix +++++\n", I.todense())
""" Degree Matrix
Adding the inverse of the degree matrix ensures inclusion of root node.
"""
# Laplacian Matrix (L = D - A)
# L = nx.laplacian_matrix(DG)
numpy_recarray = nx.to_numpy_matrix(
G
) # graph adjacency matrix as a NumPy matrix.
AA = np.matrix(numpy_recarray)
X = np.matrix([[i, -i] for i in range(AA.shape[0])], dtype=float)
print(A * X) # apply propagation rule
if __name__ == "__main__":
main()
"""
__author__ = 'Franklin'
__version__ = '0.1'
__email__ = 'Franklin <[email protected]>'
"""
|
the-stack_0_6980 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
epsilon = 1e-8
def whitening(image):
"""Whitening
Normalises image to zero mean and unit variance
Parameters
----------
image : np.ndarray
image to be whitened
Returns
-------
np.ndarray
whitened image
"""
ret = (image - np.mean(image)) / (np.std(image) + epsilon)
return ret
def normalise_zero_one(image):
"""Image normalisation
Normalises image to fit [0, 1] range
Parameters
----------
image : np.ndarray
image to be normalised
Returns
-------
np.ndarray
normalised image
"""
image = image.astype(np.float32)
ret = (image - np.min(image))
ret /= (np.max(image) + epsilon)
return ret
def normalise_one_one(image):
"""Image normalisation
Normalises image to fit [-1, 1] range
Parameters
----------
image : np.ndarray
image to be normalised
Returns
-------
np.ndarray
normalised image
"""
ret = normalise_zero_one(image)
ret *= 2.
ret -= 1.
return ret
def resize_image_with_crop_or_pad(image, img_size=[64,64,64], **kwargs):
"""Image resizing
Resizes image by cropping or padding dimension to fit specified size.
Parameters
----------
image : np.ndarray
image to be resized
img_size : list or tuple
new image size
kwargs
additional arguments to be passed to np.pad
Returns
-------
np.ndarray
resized image
"""
assert isinstance(image, (np.ndarray, np.generic))
assert (image.ndim - 1 == len(img_size) or image.ndim == len(img_size)), \
'Example size doesnt fit image size'
# find image dimensionality
rank = len(img_size)
# create placeholders for new shape
from_indices = [[0, image.shape[dim]] for dim in range(rank)]
to_padding = [[0, 0] for dim in range(rank)]
slicer = [slice(None)] * rank
for i in range(rank):
# for each dimensions find whether it is supposed to be cropped or padded
if image.shape[i] < img_size[i]:
to_padding[i][0] = (img_size[i] - image.shape[i]) // 2
to_padding[i][1] = img_size[i] - image.shape[i] - to_padding[i][0]
else:
from_indices[i][0] = int(np.floor((image.shape[i] - img_size[i]) / 2.))
from_indices[i][1] = from_indices[i][0] + img_size[i]
# create slicer object to crop or leave each dimension
slicer[i] = slice(from_indices[i][0], from_indices[i][1])
# pad the cropped image to extend the missing dimension
return np.pad(image[slicer], to_padding, **kwargs) |
the-stack_0_6981 | """
0123. Best Time to Buy and Sell Stock III
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most two transactions.
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: [3,3,5,0,0,3,1,4]
Output: 6
Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 = 3.
Example 2:
Input: [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are
engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
"""
class Solution:
def maxProfit(self, prices: List[int]):
buy_1 = buy_2 = float('inf')
pro_1 = pro_2 = 0
for p in prices:
buy_1 = min(buy_1, p)
pro_1 = max(pro_1, p - buy_1)
buy_2 = min(buy_2, p - pro_1)
pro_2 = max(pro_2, p - buy_2)
return pro_2
|
the-stack_0_6982 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import numpy as np
from caffe2.python import utils, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import assume
# This function asserts quantized results (output[1:]) are close enough to
# floating point results (output[0]).
# The error bound is derived based on assumption that there's no input
# quantization error.
def check_quantized_results_close(outputs, ref=None, symmetric=False, atol_scale=0.53):
if ref is None:
ref = outputs[0][0]
if ref.size == 0:
return
ref_min = min(np.min(ref), 0)
ref_max = max(np.max(ref), 0)
if symmetric:
ref_scale = 2 * max(abs(ref_max), abs(ref_min)) / 255
else:
ref_scale = (ref_max - ref_min) / 255
# should be divided by 2 in an exact math, but divide by 1.9 here
# considering finite precision in floating-point numbers
atol = ref_scale * atol_scale
for o in outputs[1:]:
np.testing.assert_allclose(o[0], outputs[0][0], atol=atol, rtol=0)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
# Make sure we won't have overflows from vpmaddubsw instruction used in fbgemm)
def avoid_vpmaddubsw_overflow_fc(
batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 > (1 << 15) - 1:
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# Make sure we won't have overflows from vpmaddubsw instruction used in
# fbgemm (FIXME: this assumes fbgemm is used only for NHWC and im2col
# is done in a way that input_channels is the fastest moving
# dimension).
#
# strides, pads, kernels, dilations, and sizes should be tuples with the same dimension
# (2 for 2D conv, 3 for 3D conv, and so on)
def avoid_vpmaddubsw_overflow(
strides,
pads,
kernels,
dilations,
sizes,
input_channels,
output_channels,
batch_size,
X,
X_min,
X_max,
W,
W_min,
W_max,
):
ndim = len(sizes)
dkernels = tuple((dilations[i] * (kernels[i] - 1) + 1) for i in range(ndim))
size_cols = tuple(
(sizes[i] + 2 * pads[i] - dkernels[i]) // strides[i] + 1 for i in range(ndim)
)
for out_idx in np.ndindex((batch_size,) + size_cols + (output_channels,)):
b = out_idx[0]
oc = out_idx[-1]
o_spatial = out_idx[1:-1]
for filter_idx1, filter_idx2 in pairwise(
np.ndindex(kernels + (input_channels,))
):
f0 = filter_idx1[:-1]
ic0 = filter_idx1[-1]
f1 = filter_idx2[:-1]
ic1 = filter_idx2[-1]
i0s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f0[i]
for i in range(ndim)
)
i1s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f1[i]
for i in range(ndim)
)
w0 = W[(oc,) + f0 + (ic0,)] - 128 - W_min
w1 = W[(oc,) + f1 + (ic1,)] - 128 - W_min
if all(0 <= i0s[i] < sizes[i] for i in range(ndim)):
x0 = X[(b,) + i0s + (ic0,)] - X_min
else:
# padding
x0 = -X_min
if all(0 <= i1s[i] < sizes[i] for i in range(ndim)):
x1 = X[(b,) + i1s + (ic1,)] - X_min
else:
# padding
x1 = -X_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[(oc,) + f1 + (ic1,)] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 >= (1 << 15):
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[(oc,) + f1 + (ic1,)] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for out_idx in np.ndindex((batch_size,) + size_cols + (output_channels,)):
b = out_idx[0]
oc = out_idx[-1]
o_spatial = out_idx[1:-1]
for filter_idx1, filter_idx2 in pairwise(
np.ndindex(kernels + (input_channels,))
):
f0 = filter_idx1[:-1]
ic0 = filter_idx1[-1]
f1 = filter_idx2[:-1]
ic1 = filter_idx2[-1]
i0s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f0[i]
for i in range(ndim)
)
i1s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f1[i]
for i in range(ndim)
)
w0 = W[(oc,) + f0 + (ic0,)] - 128 - W_min
w1 = W[(oc,) + f1 + (ic1,)] - 128 - W_min
if all(0 <= i0s[i] < sizes[i] for i in range(ndim)):
x0 = X[(b,) + i0s + (ic0,)] - X_min
else:
# padding
x0 = -X_min
if all(0 <= i1s[i] < sizes[i] for i in range(ndim)):
x1 = X[(b,) + i1s + (ic1,)] - X_min
else:
# padding
x1 = -X_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# strides, pads, kernels, dilations, and sizes should be tuples with the same dimension
# (2 for 2D conv, 3 for 3D conv, and so on)
def generate_convnd_inputs(
strides,
pads,
kernels,
dilations,
sizes,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=False,
preserve_activation_sparsity=False,
preserve_weight_sparsity=False,
):
dim = len(sizes)
assume(all(len(a) == dim for a in [strides, pads, kernels, dilations]))
assume(all(sizes[d] >= dilations[d] * (kernels[d] - 1) + 1 for d in range(dim)))
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
depthwise_convolution = (
input_channels_per_group == 1 and output_channels_per_group == 1
)
assert input_channels > 1
assert output_channels > 1
# X and W have scale 1, so exactly represented after quantization
X_min = 0 if preserve_activation_sparsity else -77
X_max = X_min + 255
X_range = X_max - X_min
if depthwise_convolution and groupwise_quantization:
# For depthwise convolution, it's not enough to set input channel 0
# to all X_min to avoid overflow from vpmaddubsw
X_range /= 2
X = np.round(
np.random.rand(*((batch_size,) + tuple(sizes) + (input_channels,))) * X_range
+ X_min
)
X = X.astype(np.float32)
if (
batch_size != 0
and depthwise_convolution
and groupwise_quantization
and not preserve_activation_sparsity
):
# Put X_max in a position not to be paired with any padded value.
# Put X_min to all positions that can be paired with the X_max value.
#
# This is an example of a pattern for 3x3x3
# . . . . .
# . . . . .
# . . . . .
# . . . . .
# . . . . min
#
# . . . . .
# . . . . min
# . min max min .
# min . . . .
# . . . . .
#
# min . . . .
# . . . . .
# . . . . .
# . . . . .
# . . . . .
# Make sure we have enough dimension
assert X.shape[1] >= 3
assert all(X.shape[d + 1] >= kernels[d] + 2 for d in range(1, dim))
# Take subtensor we want to manipulate
X_sub = X[(0,) * (X.ndim - dim - 1) + (slice(None),) * dim + (0,)]
# Put X_max in the middle of the subtensor
X_sub[(1,) + tuple(kernels[d] // 2 + 1 for d in range(1, dim))] = X_max
# Put X_min to the positions that can be paired with X_max across
# the slowest moving dimension
X_sub[[[0, 2]] + [[kernels[d] + 1, 0] for d in range(1, dim)]] = X_min
# Put X_min to other positions that can be paired with X_max
for d1 in range(1, dim):
X_sub[
[[1]]
+ [[kernels[d2] // 2 + 1] for d2 in range(1, d1)]
+ [[kernels[d1] // 2, kernels[d1] // 2 + 2]]
+ [[kernels[d2] + 1, 0] for d2 in range(d1 + 1, dim)]
] = X_min
else:
# input channel 0 is all X_min to avoid overflow from vpmaddubsw when
# multiplied with W_min and W_max
X[..., 0] = X_min
if batch_size != 0:
X[(0,) * (X.ndim - 1) + (1,)] = X_max
if preserve_weight_sparsity:
W_min = -128
W_max = 100
else:
W_min = -100
W_max = W_min + 255
W = np.round(
np.random.rand(
*((output_channels,) + tuple(kernels) + (input_channels_per_group,))
)
* (W_max - W_min)
+ W_min
)
W = W.astype(np.float32)
if groupwise_quantization:
for g in range(group):
W[(g * output_channels_per_group,) + (0,) * (W.ndim - 1)] = W_min
if depthwise_convolution:
W[(g * output_channels_per_group, 1) + (0,) * (W.ndim - 2)] = W_max
else:
assert output_channels_per_group > 1
W[(g * output_channels_per_group + 1,) + (0,) * (W.ndim - 1)] = W_max
# Make sure each group has different ranges to really see the effect
# of group-wise quantization.
if not preserve_weight_sparsity:
W[
g * output_channels_per_group : (g + 1) * output_channels_per_group,
] += g
else:
W[(0,) + (0,) * (W.ndim - 1)] = W_min
W[(1,) + (0,) * (W.ndim - 1)] = W_max
different_range_per_group = groupwise_quantization and not preserve_weight_sparsity
for g in range(group):
avoid_vpmaddubsw_overflow(
strides,
pads,
kernels,
dilations,
sizes,
input_channels_per_group,
output_channels_per_group,
batch_size,
X[..., g * input_channels_per_group : (g + 1) * input_channels_per_group],
X_min,
X_max,
W[g * output_channels_per_group : (g + 1) * output_channels_per_group,],
W_min + (g if different_range_per_group else 0),
W_max + (g if different_range_per_group else 0),
)
if order == "NCHW":
X = utils.NHWC2NCHW(X)
W = utils.NHWC2NCHW(W)
b = np.random.randn(output_channels).astype(np.float32)
return X, W, b
def generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=False,
preserve_activation_sparsity=False,
preserve_weight_sparsity=False,
):
return generate_convnd_inputs(
(stride,) * 2,
(pad,) * 2,
(kernel,) * 2,
(dilation,) * 2,
(size,) * 2,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization,
preserve_activation_sparsity,
preserve_weight_sparsity,
)
def run_conv_or_fc(
test_case,
init_net,
net,
X,
W,
b,
op_type,
engine,
order,
gc,
outputs,
scale=None,
zero_point=None,
):
if order:
# Conv
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
else:
# FC
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
# We run DNNLOWP ops multiple times to test their first runs that
# do caching so exercises different code paths from the subsequent
# runs
# self.ws.run re-creates operator every time so this test covers
# cases when we have multiple nets sharing the same workspace
test_case.ws.create_blob("X").feed(X, device_option=gc)
test_case.ws.create_blob("W").feed(W, device_option=gc)
test_case.ws.create_blob("b").feed(b, device_option=gc)
if scale is not None and zero_point is not None:
with workspace.WorkspaceGuard(test_case.ws):
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"quant_param", float(scale), int(zero_point)
)
if init_net:
test_case.ws.run(init_net)
for i in range(1 if engine == "" else 2):
test_case.ws.run(net)
Y = test_case.ws.blobs["Y"].fetch()
if order:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
else:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine))
# workspace.CreateNet + workspace.RunNet reuses the same operator
if engine != "":
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
if scale is not None and zero_point is not None:
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"quant_param", float(scale), int(zero_point)
)
if init_net:
workspace.RunNetOnce(init_net)
workspace.CreateNet(net)
for i in range(2):
workspace.RunNet(net)
Y = workspace.FetchBlob("Y")
if order:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
else:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine))
|
the-stack_0_6985 | #!/usr/bin/env python
# Assume python 2.6 or 2.7
import glob
import os
import subprocess
## Simple test runner.
# -- config -----------------------
# Absolute path pointing to your cloned git repo of https://github.com/KhronosGroup/glTF-Sample-Models
sample_model_dir = "/home/syoyo/work/glTF-Sample-Models"
base_model_dir = os.path.join(sample_model_dir, "2.0")
# Include `glTF-Draco` when you build `loader_example` with draco support.
kinds = [ "glTF", "glTF-Binary", "glTF-Embedded", "glTF-MaterialsCommon"]
# ---------------------------------
failed = []
success = []
def run(filename):
print("Testing: " + filename)
cmd = ["./loader_example", filename]
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
except:
print("Failed to execute: ", cmd)
raise
if p.returncode != 0:
failed.append(filename)
print(stdout)
print(stderr)
else:
success.append(filename)
def test():
for d in os.listdir(base_model_dir):
p = os.path.join(base_model_dir, d)
if os.path.isdir(p):
for k in kinds:
targetDir = os.path.join(p, k)
g = glob.glob(targetDir + "/*.gltf") + glob.glob(targetDir + "/*.glb")
for gltf in g:
run(gltf)
def main():
test()
print("Success : {0}".format(len(success)))
print("Failed : {0}".format(len(failed)))
for fail in failed:
print("FAIL: " + fail)
if __name__ == '__main__':
main()
|
the-stack_0_6986 | import os
import time
import hashlib
import argparse
import pandas as pd
import subprocess
def unmount_SDs(sd_prefix):
'''
Unmount all disks named with matching prefix
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
'''
cwd = os.getcwd()
filename = "SDlist.txt"
for i in range(len(sd_prefix)):
cmd = "diskutil list | grep " + sd_prefix[i] + " > " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # get disk info for mounted SDs with specified prefix using diskutil
if os.stat(filename).st_size != 0: # trying to read an empty file will throw an error
lst = pd.read_csv(filename, header=None, delim_whitespace=True) # strip out disk name(s) (ie /dev/disk2) and associated SD name(s)
lst.columns = ["0", "format", "name", "size", "size-units", "disk"]
disks = lst["disk"].values
names = lst["name"].values
for i in range(len(disks)):
cmd = "diskutil unmountDisk /dev/" + disks[i][0:-2] # 'diskutil list' actually saves name like "disk2s2", so strip off last two characters
subprocess.call(cmd, shell=True)
cmd = "rm " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # delete SD reference file when done
def getlocalfile_md5(fname):
'''
Calculate the MD5 hash of a file to be used to check successful transfer
Inputs:
fname: filename to check
'''
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def copyfile_local(fname, srcpath, dstpath, delete_choice):
'''
Copy a file from a disk to a desired local destination, confirm proper data transfer, then delete original if desired
Inputs:
fname: file to transfer
srcpath: the path to the location of the file to transfer
dstpath: the path to the desired copy destination location
delete_choice: boolean decision to delete or keep the file in its original location after copy
'''
if not fname.startswith("."):
fname_safe = fname.replace(" ", "_") # replace whitespace in filename with underscores, if there.
if fname_safe != fname:
cmd = "mv " + srcpath + "/'" + fname + "' " + srcpath + "/" + fname_safe
os.system(cmd)
fname = fname_safe
copied = dstpath + "/" + fname
cmd = "cp -p " + srcpath + "/" + fname + " " + dstpath
os.system(cmd)
md5local = getlocalfile_md5(srcpath + '/' + fname) # get hash for original file
md5sd = getlocalfile_md5(copied) # get hash for new local copy
if not (md5local == md5sd):
print("Oh no! Hash test failed for " + dstpath + "/" + fname + ". Trying again.") # if copy doesn't match original, try again
copyfile_local(fname, srcpath, dstpath, delete_choice)
else:
if delete_choice: # delete file from sd if user specified to
os.remove(srcpath + "/" + fname)
def transfer_folder_contents(dst_path, sd_src_path, delete_choice):
'''
Transfer the entire contents of a folder to local storage
Inputs:
dst_path: the path to the desired copy destination location
sd_src_path: the path to the disk location of the folder to transfer
delete_choice: boolean decision to delete or keep file(s) in their original location after copy
'''
if not os.path.isdir(dst_path):
os.makedirs(dst_path, mode=0o777) # make directory with folders inside for each disk (or subdirectory)
files = os.listdir(path=sd_src_path)
for file in files:
if not file.startswith("."): # ignore hidden files
if os.path.isdir(sd_src_path + "/" + file): #recursively copy nested folders
local_subpath = dst_path + "/" + file
sd_subpath = sd_src_path + "/" + file
transfer_folder_contents(local_subpath, sd_subpath, delete_choice)
else: # bottom of the line. Copy file
copyfile_local(file, sd_src_path, dst_path, delete_choice)
def get_disks(sd_prefix, sd_mount):
'''
Get list of disks matching prefix
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
Returns a list of disks matching prefix
'''
# Account for naming errors (Kitzes Lab convention)
if sd_prefix[0] == "MSD":
sd_prefix.extend(["MS", "MD", "DMS", "DSM", "SDM", "SMD"])
# Get list of disks matching prefix
disks = os.listdir(path=sd_mount) # SD cards mount to /Volumes on Mac
matching_disks = [disk for disk in disks if disk.startswith(tuple(sd_prefix))]
if args.local or args.globus:
print(" Transferring files from " + str(len(matching_disks)) + " disks:\n")
return matching_disks
def local_transfer(sd_prefix, sd_mount, local_path, delete_choice, reformat_choice, unmount_choice):
'''
Initiate a local transfer from all SD cards meeting specs
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
local_path: destination for local copy
delete_choice: boolean decision whether to delete file(s) in original location after copy
reformat_choice: boolean decision whether to reformat cards after copy
unmount_choice: boolean decision wether to unmount cards after copy
'''
# Get list of disks matching prefixes
matching_disks = get_disks(sd_prefix, sd_mount)
# Transfer contents of all matching disks
for disk in matching_disks:
folder_name = str(disk)
sd_fullpath = sd_mount + "/" + disk
local_fullpath = os.path.join(local_path, folder_name)
transfer_folder_contents(local_fullpath, sd_fullpath, delete_choice)
print(" Files from " + disk + " copied to " + local_fullpath + ".")
if reformat_choice:
reformat_SDs_FAT32(matching_disks, sd_mount)
if not reformat_choice and unmount_choice:
unmount_SDs(matching_disks)
def globus_upload(sd_p, sd_mount, upload_dir, delete_choice, reformat_choice):
'''
Initiate a Globus transfer from all SD cards meeting specs
Inputs:
sd_p: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
upload_dir: upload destination in Globus filesystem
delete_choice: boolean decision whether to delete file(s) in original location after copy
reformat_choice: boolean decision whether to reformat cards after copy
'''
import globus_sdk
#only import if user needs - this will slow things down very slightly for Globus users, but save time for local users
CLIENT_ID = "" # app
MYENDPOINT_ID = "" # UUID
DTN_ID = "" # dtn
client = globus_sdk.NativeAppAuthClient(CLIENT_ID)
client.oauth2_start_flow(refresh_tokens=True)
authorize_url = client.oauth2_get_authorize_url()
print('Please go to this URL and login: {0}'.format(authorize_url))
get_input = getattr(__builtins__, 'raw_input', input) # get correct input() fn to be compatible with Python2 or 3
auth_code = get_input("Please enter the code you get after login here: ").strip()
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
globus_auth_data = token_response.by_resource_server["auth.globus.org"]
globus_transfer_data = token_response.by_resource_server["transfer.api.globus.org"]
# most specifically, you want these tokens as strings
AUTH_TOKEN = globus_auth_data["access_token"]
TRANSFER_TOKEN = globus_transfer_data["access_token"]
# a GlobusAuthorizer is an auxiliary object we use to wrap the token. In more
# advanced scenarios, other types of GlobusAuthorizers give us expressive power
authorizer = globus_sdk.AccessTokenAuthorizer(TRANSFER_TOKEN)
tc = globus_sdk.TransferClient(authorizer=authorizer)
tdata = globus_sdk.TransferData(tc, MYENDPOINT_ID, DTN_ID, label="", sync_level="checksum",preserve_timestamp=True,verify_checksum=True)
upload_dir = "~/" + upload_dir #f"~/{upload_dir}"
tc.operation_mkdir(DTN_ID, path=upload_dir) # new directory in ~/ibwo for each SD
# you will error out if you specified a directory that already exists
# Get list of disks matching prefixes
matching_disks = get_disks(sd_prefix, sd_mount)
# Upload contents of all matching disks
for d in matching_disks:
new_folder = upload_dir + "/" + str(disk)
sd_fullpath = sd_mount + "/" + str(disk)
tc.operation_mkdir(DTN_ID, path=new_folder) # new directory in indicated directory for each SD
files = os.listdir(path=sd_fullpath)
for file in files:
if not file.startswith("."): #ignore hidden files
if os.path.isdir(sd_fullpath + "/" + file): # recursively copy nested folders
tdata.add_item( sd_fullpath + "/" + file, new_folder + "/" + file, recursive=True)
else:
tdata.add_item(sd_fullpath + "/" + file, new_folder + "/" + file) # copy from SD to new Globus dir
transfer_result = tc.submit_transfer(tdata)
print("Globus task_id =", transfer_result["task_id"])
# not sure if it is safe to reformat right now, when globus transfer has been initiated but not necessarily completed.
# if(reformat_choice):
# reformat_SDs_FAT32(sd_prefix)
def reformat_SDs_FAT32(sd_prefix, sd_mount):
'''
Reformat disks matching prefix to FAT32 format (and delete all contents)
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
'''
print("\n Reformatting SD cards.\n---")
cwd = os.getcwd()
filename = "SDlist.txt"
for i in range(len(sd_prefix)):
cmd = "diskutil list | grep " + sd_prefix[i] + " > " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # get disk info for mounted SDs with specified prefix using diskutil
if os.stat(filename).st_size != 0: # trying to read an empty file will throw an error
lst = pd.read_csv(filename, header=None, delim_whitespace=True) # strip out disk name(s) (ie /dev/disk2) and associated SD name(s)
lst.columns = ["0", "format", "name", "size", "size-units", "disk"]
disks = lst["disk"].values
names = lst["name"].values
for i in range(len(disks)): # reformat cards to clean FAT32 with original names
cmd = "diskutil eraseDisk FAT32 " + names[i] + " MBRFormat /dev/" + disks[i][0:-2] # 'diskutil list' actually saves name like "disk2s2", so strip off last two characters
subprocess.call(cmd, shell=True)
if not args.unmount:
cmd = "diskutil mountDisk /dev/" + disks[i][0:-2]
subprocess.call(cmd,shell=True)
print("---")
cmd = "rm " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # delete SD reference file when done
if args.unmount:
matching_disks = get_disks(sd_prefix, sd_mount)
unmount_SDs(matching_disks)
###################################################################################### MAIN
start = time.time()
donemsg = 1
local = 1
parser = argparse.ArgumentParser(description="Transfer files from SD card(s) to local storage or Globus cloud storage, and/or delete data or reformat SD card(s).")
parser.add_argument("-p", "--prefix", nargs='+', required=True, help="Prefix(es) of all your SD cards' names. Enter multiple prefixes separated by spaces to indicate a range of prefixed names. [Required]")
parser.add_argument("-m", "--mountPath", default='/Volumes', help ="The path to where SD cards mount on this computer (defaults to Mac's mountpoint: /Volumes). [Optional]")
parser.add_argument("-l", "--local", help="New local directory name (with path) to save data to. [Required for local transfer]")
parser.add_argument("-g", "--globus", help="New directory name (with absolute path) in your Globus filesystem to upload data to.[Required for local Globus transfer]")
parser.add_argument("-d", "--delete", action='store_true', help="Delete files from SD cards after transfer and confirmation are complete. Files are only deleted if this flag is included. [Optional]")
parser.add_argument("-r", "--reformat", action='store_true', help="Reformat SD card to FAT32, maintaining its name. WARNING: all data will be deleted during reformat, even if you didn't specify the -d flag (defaults to not reformat). To reformat but not transfer any data, use -l 0 -g 0 -r. [Optional]")
parser.add_argument("-u", "--unmount", action='store_true', help="Unmount SD cards from your computer after done with local copy or reformat. Don't use this for Globus upload! [Optional]")
parser.add_argument("-y", "--yes", action='store_true', help="Include this flag if you want to force deletion or reformatting without typing Y in the menu [Optional]")
args = parser.parse_args()
print(" SD prefix(es): ")
for i in args.prefix:
print(" " + i)
print(" SD mount path: " + args.mountPath)
# Print delete & reformatting message - make sure they're serious about deleting data off cards
if args.delete:
if not args.yes:
tmp = input("\n Please confirm (Y/N) that you want to delete all files from the SD cards after transfer is done:\n >>> ")
if tmp == "Y" or tmp == "y":
print(" Great! Just making sure.\n")
time.sleep(2)
else:
print(" Ok! Continuing with copy, but files will NOT be deleted.\n")
args.delete = False
time.sleep(2)
else:
print(" Deleting data after transfer complete.\n")
time.sleep(2)
if args.reformat:
sd_prefix = args.prefix
if sd_prefix[0] == "MSD":
sd_prefix.extend(["MDS", "DMS", "DSM", "SDM", "SMD"]) # account for naming errors
if not args.yes:
tmp = input("\n Please confirm (Y/N) that you want to reformat and delete all files from the SD cards after transfer is done (if any):\n >>> ")
if tmp == 'Y' or tmp == 'y':
print(" Great! Just making sure.\n")
time.sleep(2)
else:
print("Ok! Continuing with copy, but SD cards will NOT be reformatted.\n")
time.sleep(2)
args.reformat = False
else:
print(" Reformatting SD cards after transfer complete (if any).")
time.sleep(2)
if args.globus:
print(" Ignoring -r (reformat) flag - run again after Globus Upload is complete to ensure data isn't deleted before it istransferred.\n.")
if (not args.globus) and (not args.local): #initiate reformat if no transfer happening
reformat_SDs_FAT32(sd_prefix, sd_mount)
# Initiate local transfer
if args.local:
print(" Saving to local directory: " + args.local)
local_transfer(args.prefix, args.mountPath, args.local, args.delete, args.reformat, args.unmount)
# Initiate Globus transfer
if args.globus:
local = 0
print(" Uploading to directory " + args.globus + " on Globus.")
tmp = input('\n Please confirm (Y/N) that you want to begin a Globus transfer, and have already updated the python script to include your Globus IDs (see README)\n >>> ')
if tmp == "Y" or tmp == "y":
globus_upload(args.prefix, args.mountPath, args.globus, args.delete, args.reformat)
else:
donemsg = 0
print(" Exiting.")
# 'Ppeace out'
if donemsg:
if not local:
print("\n Globus transfer initiated.\n")
if args.local:
print("\n Done with local transfer! Executed in " + str(time.time()-start) + " seconds\n")
print("\n Done with reformatting!\n")
|
the-stack_0_6987 | import struct
from io import BytesIO
from ot_types import *
#inline below:
# import ot_table
# from ot_font import OTFont, TableRecord
# from ot_file import calcCheckSum
class Table_head:
_expectedTag = "head"
# head v1.0 format
_head_version = ">2H"
_head_version_size = struct.calcsize(_head_version)
_head_1_0_format = ">2H4s2L2H2q" + "4h" + "2H" + "3h"
""" Structure:
(big endian) >
majorVersion uint16 H
minorVersion uint16 H
fontRevision Fixed 4s
checkSumAdjustment uint32 L
magicNumber uint32 L
flags uint16 H
unitsPerEm uint16 H
created LONGDATETIME q
modified LONGDATETIME q
xMin int16 h
yMin int16 h
xMax int16 h
yMax int16 h
macStyle uint16 H
lowestRecPPEM uint16 H
fontDirectionHint int16 h
indexToLocFormat int16 h
glyphDataFormat int16 h
"""
_head_1_0_size = struct.calcsize(_head_1_0_format)
_head_1_x_checkSumAdjustment_offset = struct.calcsize(">2H4s")
_head_1_0_fields = (
"majorVersion",
"minorVersion",
"fontRevision",
"checkSumAdjustment",
"magicNumber",
"flags",
"unitsPerEm",
"created",
"modified",
"xMin",
"yMin",
"xMax",
"yMax",
"macStyle",
"lowestRecPPEM",
"fontDirectionHint",
"indexToLocFormat",
"glyphDataFormat"
)
_head_1_0_defaults = (
1, # majorVersion
0, # minorVersion
b'\x00\x00\x00\x00', # fontRevision
0, # checkSumAdjustment
0x5F0F3CF5, # magicNumber
0, # flags
2048, # unitsPerEm
0, # created
0, # modified
0, # xMin
0, # yMin
0, # xMax
0, # yMax
0, # macStyle
0, # lowestRecPPEM
0, # fontDirectionHint
0, # indexToLocFormat
0 # glyphDataFormat
)
def __init__(self):
self.tableTag = Tag(self._expectedTag)
@staticmethod
def createNew_head():
"""Creates a new version 1.0 hhea table with default values."""
head = Table_head()
for k, v in zip(head._head_1_0_fields, head._head_1_0_defaults):
if k != "fontRevision":
setattr(head, k, v)
head.fontRevision = Fixed(head._head_1_0_defaults[2])
return head
@staticmethod
def tryReadFromFile(parentFont, tableRecord):
"""Returns a Table_head constructed from data in fileBytes.
Exceptions may be raised if tableRecord.tableTag doesn't match,
or if tableRecord.offset or .length do not fit within the file.
"""
head = Table_head()
from ot_font import OTFont, TableRecord
if not (isinstance(parentFont, OTFont) and isinstance(tableRecord, TableRecord)):
raise Exception()
import ot_table
ot_table.ValidateTableTag(tableRecord, head._expectedTag)
head.parentFont = parentFont
head.tableRecord = tableRecord
# get file bytes, then validate offset/length are in file bounds
fileBytes = parentFont.fileBytes
offsetInFile = tableRecord.offset
ot_table.ValidateOffsetAndLength(
len(fileBytes), offsetInFile, tableRecord.length
)
# get the table bytes: since offset length are in bounds, can get the expected length
tableBytes = fileBytes[offsetInFile : offsetInFile + tableRecord.length]
# check the version
if len(tableBytes) < head._head_version_size:
raise OTCodecError("The table lenght is wrong: can't even read the version.")
vals = struct.unpack(head._head_version, tableBytes[:head._head_version_size])
head.majorVersion, head.minorVersion = vals
if head.majorVersion != 1:
raise OTCodecError(f"Unsupported table version: {hhea.majorVersion}.{hhea.minorVersion}")
if len(tableBytes) < head._head_1_0_size:
raise OTCodecError(f"Can't read the version {hhea.majorVersion}.{hhea.minorVersion} hhea table: the table is too short.")
# unpack
vals = struct.unpack(head._head_1_0_format, tableBytes)
head.fontRevision = Fixed(vals[2])
for k, v in zip(head._head_1_0_fields[3:], vals[3:]):
setattr(head, k, v)
# calculate checksum
# Note: a special calculation is required for the head table. We need
# to make sure to include pad bytes from the file.
padded_length = (tableRecord.length + 3) - (tableRecord.length + 3) % 4
tableBytes = fileBytes[offsetInFile : offsetInFile + padded_length]
head.calculatedCheckSum = head._calcHeadCheckSum(tableBytes)
# Calculating the checkSumAdjustment for the font is somewhat costly,
# so don't do it up front; leave it until it's needed.
# head.calculatedCheckSumAdjustment = head._calcCheckSumAdjustment(parentFont)
return head
# End of tryReadFromFile
@staticmethod
def _calcHeadCheckSum(headBytes:bytes):
"""Calculates a checksum for the head table based on the provided data.
Can be called for a head table read from a file or a new head table
created in memory. A version 1.x table is assumed. The length of the
data should be a multiple of four. If not, the checksum will be
calculated after padding with null bytes. If the data is read from a
file, you should include padding bytes from the file.
"""
assert isinstance(headBytes, (bytearray, bytes, memoryview))
assert len(headBytes) >= Table_head._head_1_0_size
"""
The 'head' table requires special handling for calculating a checksum. The
process also involves the head.checksumAdjustment field.
From OT spec (v1.8.3) font file regarding TableRecord.checkSum for 'head':
To calculate the checkSum for the 'head' table which itself includes the
checkSumAdjustment entry for the entire font, do the following:
1. Set the checkSumAdjustment to 0.
2. Calculate the checksum for all the tables including the 'head' table
and enter that value into the table directory.
NOTE: This wording is unclear and can be misleading. The TableRecord.checkSum
for 'head' is calculated using the modified 'head' data only, not the rest of
the file.
From OT spec 'head' table regarding checkSumAdjustment:
To compute it: set it to 0, sum the entire font as uint32,
then store 0xB1B0AFBA - sum.
If the font is used as a component in a font collection file, the value
of this field will be invalidated by changes to the file structure and
font table directory, and must be ignored.
If in a TTC, ignore all that and just set both calculated values to 0.
"""
headCopy = bytearray(headBytes)
headCopy[Table_head._head_1_x_checkSumAdjustment_offset
: Table_head._head_1_x_checkSumAdjustment_offset + 4] = [0,0,0,0]
from ot_file import calcCheckSum
return calcCheckSum(headCopy)
# End _calcHeadCheckSum
def calcCheckSumAdjustment(self):
"""Calculates the checkSumAdjustment for the font containing the head
table. If the font is within a TTC, returns 0.
The checkSumAdjustment value is returned. No font data is changed.
The head table must have a parentFont attribute set to an OTFont
object, and that OT font must have the fileBytes attribute set to a
byte sequence containing the font data. This should only be called for
font data read from a file or for a complete font created in memory.
"""
from ot_font import OTFont, TableRecord
assert hasattr(self, "parentFont")
font = self.parentFont
assert isinstance(font, OTFont)
assert hasattr(font, "fileBytes")
# If within TTC, just return 0
if font.isWithinTtc:
return 0
# get the head TableRecord
head_rec = font.offsetTable.tryGetTableRecord("head")
if head_rec is None:
return None
# To calculate checkSumAdjustment, the font file must be modified by
# setting head.checkSumAdjustment to 0. A checksum is calculated for
# the entire font file with that modification. After computing the
# file checksum, the differnce from 0xB1B0AFBA is taken.
# https://docs.microsoft.com/en-us/typography/opentype/spec/otff#calculating-checksums
#
# To avoid copying the entire font data to make a small change, the
# file checksum can be computed sequentially on three segments:
#
# 1) data before the modified head table (not copied)
# 2) continue with a modified copy of the head table
# 3) continue with the remainder (not copied)
#
# A memoryview will be used to avoid copying.
fontBytesView = memoryview(font.fileBytes)
# All tables offsets (from start of file) are expected to be multiples
# of 4, though that might not be true in some fonts. Checksums must be
# calculated on 4-byte increments. Determine if we need to work around
# any such quirk.
phase = 4 - (head_rec.offset % 4) if (head_rec.offset % 4) != 0 \
else (head_rec.offset % 4)
# phase is the number of extra bytes from the start of the head table
# to include in the first segment
from ot_file import calcCheckSum
# get checksum for the first segment
first_segment_length = head_rec.offset + phase
assert first_segment_length % 4 == 0
first_segment = fontBytesView[:first_segment_length]
checksum = calcCheckSum(first_segment)
# For the second segment, use 12 bytes after the end of the first
# segment, which will include the head.checkSumAdjustment member.
# Get a copy and clear the checkSumAdjustment.
second_segment = bytearray(fontBytesView[first_segment_length : first_segment_length + 12])
csa_offset = Table_head._head_1_x_checkSumAdjustment_offset - phase
second_segment[csa_offset : csa_offset + 4] = [0,0,0,0]
# continue the checksum with the modified second segment
checksum = calcCheckSum(second_segment, leftPrior= checksum)
# finish the checksum with the third segment
third_segment = fontBytesView[first_segment_length + 12:]
checksum = calcCheckSum(third_segment, leftPrior= checksum)
return 0xB1B0AFBA - checksum
# End of _calcCheckSumAdjustment
# End of class Table_head
|
the-stack_0_6988 | import numpy as np
import pandas as pd
from scipy.sparse import issparse
from . ItClust import transfer_learning_clf
from . calculate_adj import distance
from . calculate_adj import calculate_adj_matrix
from . utils import find_l
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class SpaDecon(object):
def __init__(self):
super(SpaDecon, self).__init__()
def deconvolution(self, source_data, target_data, histology_image=None, spatial_locations = None, p=0.5, histology = True, spatial = True, adj_matrix=None, adj = False, technology = 'Visium'):
if technology=='Visium':
threshold = 1/30
elif technology=='ST':
threshold = 1/200
if issparse(target_data.X):
target_data.X=target_data.X.A
target_data.var_names=[i.upper() for i in list(target_data.var_names)]
target_data.var["genename"]=target_data.var.index.astype("str")
if adj:
self.adj = adj_matrix
l = find_l(p, [i*0.1 for i in range(1,20)], self.adj)
adj_sub=np.exp(-1*self.adj/(2*(l**2)))
target_data.X=np.matmul(adj_sub,target_data.X)
elif spatial:
target_data.obs["x1"] = np.array(spatial_locations[1])
target_data.obs["x2"] = np.array(spatial_locations[2])
target_data.obs["x3"] = np.array(spatial_locations[3])
target_data.obs["x4"] = np.array(spatial_locations[4])
target_data.obs["x5"] = np.array(spatial_locations[5])
target_data=target_data[target_data.obs["x1"]==1]
adj=calculate_adj_matrix(x=target_data.obs["x2"].tolist(),y=target_data.obs["x3"].to_list(), x_pixel=target_data.obs["x4"].to_list(), y_pixel=target_data.obs["x5"].to_list(), image=histology_image, histology = histology)
#self.adj = adj
l = find_l(p, [i*0.1 for i in range(1,20)], adj)
adj_sub=np.exp(-1*adj/(2*(l**2)))
target_data.X=np.matmul(adj_sub,target_data.X)
del adj
clf=transfer_learning_clf()
clf.fit(source_data, target_data, tol = [0.01], threshold = threshold)
type_pred = clf.predict(write=False)
spad_props = type_pred[1]
# spad_props.columns = [i[0] for i in type_pred[2].values()]
spad_props.columns = clf.celltypes_final
spad_props.index = [i[0:len(i)-7] for i in spad_props.index]
self.props = spad_props
return spad_props
|
the-stack_0_6993 | from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import platform
import logging
from subprocess import check_output
from tempfile import mkdtemp
from functools import partial
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from pyrouge.utils import log
from pyrouge.utils.file_utils import verify_dir
REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}",
"-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'}
def clean(x):
return re.sub(
r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(clean(output_string.lower()))
logger.info("Saved processed files to {}.".format(output_dir))
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
self.temp_dir = temp_dir
self.log = log.get_global_console_logger()
self.log.setLevel(logging.WARNING)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
def sent_split_to_string(s): return "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = [model_filename_pattern.replace('#ID#', id)]
# model_filenames = Rouge155.__get_model_filenames_for_id(
# id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp(dir=self.temp_dir)
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
# 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp(dir=self.temp_dir)
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
# '-2',
# '-1',
# '-U',
'-m',
# '-v',
'-r', 1000,
'-n', 2,
# '-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
|
the-stack_0_6994 | from __future__ import print_function
import pprint
from collections import OrderedDict
from nose.tools import assert_equal
from tools import unit
from xdress.doxygen import class_docstr, func_docstr
car_dict = {'file_name': 'Cars.h',
'kls_name': 'util::Car',
'members': {'methods': ['Car',
'Car',
'Car',
'navigate',
'traffic',
'isValid',
'~Car'],
'variables': ['nwheels', 'maxrpm', 'maxspeed', 'manufacturer']},
'namespace': 'util',
'protected-attrib': {'manufacturer': {'briefdescription': '',
'definition': 'str util::Car::manufacturer',
'detaileddescription': 'The manufacturer of the car. This could be anything from Saturn to Porche. ',
'type': 'str'},
'maxrpm': {'briefdescription': '',
'definition': 'double util::Car::maxrmp',
'detaileddescription': 'The maximum rmp this car can attain',
'type': 'double'},
'maxspeed': {'briefdescription': 'The top speed of the car',
'definition': 'double util::Car::maxspeed',
'detaileddescription': '',
'type': 'double'},
'nwheels': {'briefdescription': 'The number of wheels on the car. ',
'definition': 'uint util::Car::nwheels',
'detaileddescription': '',
'type': 'uint'}},
'public-func': {'Car': {'arg_string': '()',
'args': None,
'briefdescription': 'Default constructor. ',
'definition': 'util::Car::Car',
'detaileddescription': 'A very simple car class that can do the basics. This car can navigate, get a traffic report, and verify that it is indeed a valid car. ',
'ret_type': None},
'Car1': {'arg_string': '(const Car &other)',
'args': OrderedDict({'other': {'type': 'const '}}),
'briefdescription': 'Copy constructor. This literally makes a clone of the Car that is passed in.',
'definition': 'util::Car::Car',
'detaileddescription': '',
'ret_type': None},
'Car2': {'arg_string': '(uint nwheels, str manufacturer)',
'args': OrderedDict({'manufacturer': {'type': 'str'},
'nwheels': {'type': 'uint'}}),
'briefdescription': '',
'definition': 'util::Car::Car',
'detaileddescription': 'Construct a car by specifying how many wheels it should have and who the manufacturer is.',
'ret_type': None},
'isValid': {'arg_string': '()',
'args': None,
'briefdescription': 'Checks if the object is really a car. Basically sees that is has all the components of a car.',
'definition': 'bool util::Car::isValid',
'detaileddescription': '',
'ret_type': 'bool'},
'navigate': {'arg_string': '(str where, float32 howFast, Date when)',
'args': OrderedDict([('where', {'type': 'str'}),
('howFast', {'type': 'float32'}),
('when', {'type': 'Date'}),
]),
'briefdescription': 'Has the car drive to a specified location',
'definition': 'std::vector< int32> util::Car::navigate',
'detaileddescription': '',
'ret_type': 'std::vector< uint32 >'},
'traffic': {'arg_string': '(std::vector< int32 > &coord) const',
'args': OrderedDict({'coord': {'type': 'std::vector< unit32 > const &'}}),
'briefdescription': '',
'definition': 'str util::Car::traffic',
'detaileddescription': 'Check the traffic at a given location. The input parameter is a vector of integers specifying the latitude and longitude of the position where the traffic should be checked.',
'ret_type': 'str'},
'~Car': {'arg_string': '()',
'args': None,
'briefdescription': 'A destructor. ',
'definition': 'hbs::Car::~Car',
'detaileddescription': '',
'ret_type': None}}}
@unit
def test_classdocstr():
exp = \
"""A very simple car class that can do the basics. This car can
navigate, get a traffic report, and verify that it is indeed a valid
car.
Attributes
----------
nwheels (uint) : The number of wheels on the car.
maxrpm (double) : The maximum rmp this car can attain
maxspeed (double) : The top speed of the car
manufacturer (str) : The manufacturer of the car. This could be
anything from Saturn to Porche.
Methods
-------
Car
~Car
isValid
navigate
traffic
Notes
-----
This class was defined in Cars.h
The class is found in the "util" namespace
"""
actual = class_docstr(car_dict)
print('-------- Expected Class docstring --------')
print(exp)
print('-------- Actual Class docstring --------')
print(actual)
# Strip whitespace before testing b/c editor config
assert_equal(exp.strip(), actual.strip())
@unit
def test_funcdocstr():
exp = \
"""Has the car drive to a specified location
Parameters
----------
where : str
howFast : float32
when : Date
Returns
-------
res1 : std::vector< uint32 >
"""
actual = func_docstr(car_dict['public-func']['navigate'], is_method=True)
print('-------- Expected Class docstring --------')
print(exp)
print('-------- Actual Class docstring --------')
print(actual)
# Strip whitespace before testing b/c editor config
assert_equal(exp.strip(), actual.strip())
|
the-stack_0_6995 | from tqdm import tqdm
import torch
from .utils import get_cosine_schedule
from . import mcmc
import math
from .exp_utils import evaluate_model
class SGLDRunner:
def __init__(self, model, dataloader, dataloader_test, epochs_per_cycle, warmup_epochs,
sample_epochs, learning_rate=1e-2, skip=1, metrics_skip=1,
temperature=1., data_mult=1., momentum=0., sampling_decay=True,
grad_max=1e6, cycles=1, precond_update=None,
metrics_saver=None, model_saver=None, reject_samples=False):
"""Stochastic Gradient Langevin Dynamics for posterior sampling.
On calling `run`, this class runs SGLD for `cycles` sampling cycles. In
each cycle, there are 3 phases: descent, warmup and sampling. The cycle
lasts for `epochs_per_cycle` epochs in total, and the warmup and
sampling phases last for `warmup_epochs` and `sample_epochs` epochs
respectively.
The descent phase performs regular gradient descent with momentum, i.e.
SGLD with temperature=0. The warmup phase raises the temperature to 1.
During the sample phase, samples get stored.
The learning rate keep decreasing all throughout the cycle following a
cosine function, from learning_rate=1 at the beginning to
learning_rate=0 at the end.
The preconditioner gets updated every `precond_update` epochs,
regardless of the phase in the cycle.
Args:
model (torch.Module, PriorMixin): BNN model to sample from
num_data (int): Number of datapoints in training sest
warmup_epochs (int): Number of epochs per cycle for warming up the Markov chain, at the beginning.
sample_epochs (int): Number of epochs per cycle where the samples are kept, at the end.
learning_rate (float): Initial learning rate
skip (int): Number of samples to skip between saved samples during the sampling phase. Sometimes called "thinning".
metrics_skip (int): Number of samples to skip between saved metrics of the sampler
temperature (float): Temperature for tempering the posterior
data_mult (float): Effective replication of each datapoint (which is the usual approach to tempering in VI).
momentum (float): Momentum decay parameter for SGLD
sampling_decay (bool): Flag to control whether the learning rate should decay during sampling
grad_max (float): maximum absolute magnitude of an element of the gradient
cycles (int): Number of warmup and sampling cycles to perform
precond_update (int): Number of steps after which the preconditioner should be updated. None disables the preconditioner.
metrics_saver : HDF5Metrics to log metric with a certain name and value
"""
self.model = model
self.dataloader = dataloader
self.dataloader_test = dataloader_test
assert warmup_epochs >= 0
assert sample_epochs >= 0
assert epochs_per_cycle >= warmup_epochs + sample_epochs
self.epochs_per_cycle = epochs_per_cycle
self.descent_epochs = epochs_per_cycle - warmup_epochs - sample_epochs
self.warmup_epochs = warmup_epochs
self.sample_epochs = sample_epochs
self.skip = skip
self.metrics_skip = metrics_skip
# num_samples (int): Number of recorded per cycle
self.num_samples = sample_epochs // skip
assert sample_epochs % skip == 0
self.learning_rate = learning_rate
self.temperature = temperature
self.eff_num_data = len(dataloader.dataset) * data_mult
self.momentum = momentum
self.sampling_decay = sampling_decay
self.grad_max = grad_max
self.cycles = cycles
self.precond_update = precond_update
self.metrics_saver = metrics_saver
self.model_saver = model_saver
if model_saver is None:
self._samples = {
name: torch.zeros(torch.Size([self.num_samples*cycles])+p_or_b.shape, dtype=p_or_b.dtype)
for name, p_or_b in model.state_dict().items()}
self._samples["steps"] = torch.zeros(torch.Size([self.num_samples*cycles]), dtype=torch.int64)
self.param_names, self._params = zip(*model.named_parameters())
self.reject_samples = reject_samples
def _make_optimizer(self, params):
assert self.reject_samples is False, "SGLD cannot reject samples"
return mcmc.SGLD(
params=params,
lr=self.learning_rate, num_data=self.eff_num_data,
momentum=self.momentum, temperature=self.temperature)
def _make_scheduler(self, optimizer):
if self.sampling_decay is True or self.sampling_decay == "cosine":
schedule = get_cosine_schedule(
len(self.dataloader) * self.epochs_per_cycle)
return torch.optim.lr_scheduler.LambdaLR(
optimizer=optimizer, lr_lambda=schedule)
elif self.sampling_decay is False or self.sampling_decay == "stairs":
return torch.optim.lr_scheduler.StepLR(
optimizer, 150*len(self.dataloader), gamma=0.1)
elif self.sampling_decay == "flat":
# No-op scheduler
return torch.optim.lr_scheduler.StepLR(optimizer, 2**30, gamma=1.0)
raise ValueError(f"self.sampling_decay={self.sampling_decay}")
def run(self, progressbar=False):
"""
Runs the sampling on the model.
Args:
x (torch.tensor): Training input data
y (torch.tensor): Training labels
progressbar (bool): Flag that controls whether a progressbar is printed
"""
self.optimizer = self._make_optimizer(self._params)
self.optimizer.sample_momentum()
self.scheduler = self._make_scheduler(self.optimizer)
self.metrics_saver.add_scalar("test/log_prob", math.nan, step=-1)
self.metrics_saver.add_scalar("test/acc", math.nan, step=-1)
def _is_sampling_epoch(_epoch):
_epoch = _epoch % self.epochs_per_cycle
sampling_epoch = _epoch - (self.descent_epochs + self.warmup_epochs)
return (0 <= sampling_epoch) and (sampling_epoch % self.skip == 0)
step = -1 # used for `self.metrics_saver.add_scalar`, must start at 0 and never reset
postfix = {}
for cycle in range(self.cycles):
if progressbar:
epochs = tqdm(range(self.epochs_per_cycle), position=0,
leave=True, desc=f"Cycle {cycle}, Sampling", mininterval=2.0)
else:
epochs = range(self.epochs_per_cycle)
for epoch in epochs:
for g in self.optimizer.param_groups:
g['temperature'] = 0. if epoch < self.descent_epochs else self.temperature
for i, (x, y) in enumerate(self.dataloader):
step += 1
store_metrics = (
i == 0 # The start of an epoch
or step % self.metrics_skip == 0)
initial_step = (
step == 0 # The very first step
or
# This is the first step after a sampling epoch
(i == 0 and _is_sampling_epoch(epoch-1)))
loss, acc, delta_energy = self.step(
step, x.to(self._params[0].device).detach(), y.to(self._params[0].device).detach(),
store_metrics=store_metrics,
initial_step=initial_step)
if progressbar and store_metrics:
postfix["train/loss"] = loss.item()
postfix["train/acc"] = acc.item()
if delta_energy is not None:
postfix["Δₑ"] = delta_energy
epochs.set_postfix(postfix, refresh=False)
if self.precond_update is not None and epoch % self.precond_update == 0:
self.optimizer.update_preconditioner()
state_dict = self.model.state_dict()
if _is_sampling_epoch(epoch):
self._save_sample(state_dict, cycle, epoch, step)
results = self._evaluate_model(state_dict, step)
if progressbar:
postfix.update(results)
epochs.set_postfix(postfix, refresh=False)
# Important to put here because no new metrics are added
# Write metrics to disk every 30 seconds
self.metrics_saver.flush(every_s=10)
# Save metrics for the last sample
(x, y) = next(iter(self.dataloader))
self.step(step+1,
x.to(self._params[0].device),
y.to(self._params[0].device),
store_metrics=True, initial_step=_is_sampling_epoch(-1))
def _save_sample(self, state_dict, cycle, epoch, step):
# TODO: refactor this into two `model_saver` classes
sampling_epoch = epoch - (self.descent_epochs + self.warmup_epochs)
if self.model_saver is None:
for name, param in state_dict.items():
self._samples[name][(self.num_samples*cycle)+(sampling_epoch//self.skip)] = param
else:
self.model_saver.add_state_dict(state_dict, step)
self.model_saver.flush()
def _evaluate_model(self, state_dict, step):
if len(self.dataloader_test) == 0:
return {}
self.model.eval()
state_dict = {k: v.unsqueeze(0) for k, v in state_dict.items()}
results = evaluate_model(
self.model, self.dataloader_test, state_dict,
likelihood_eval=True, accuracy_eval=True, calibration_eval=False)
self.model.train()
results = {"test/loss": -results["lp_last"],
"test/acc": results["acc_last"]}
for k, v in results.items():
self.metrics_saver.add_scalar(k, v, step)
return results
def _model_potential_and_grad(self, x, y):
self.optimizer.zero_grad()
loss, log_prior, potential, accs_batch, _ = self.model.split_potential_and_acc(x, y, self.eff_num_data)
potential.backward()
for p in self.optimizer.param_groups[0]["params"]:
p.grad.clamp_(min=-self.grad_max, max=self.grad_max)
if torch.isnan(potential).item():
raise ValueError("Potential is NaN")
return loss, log_prior, potential, accs_batch.mean()
def step(self, i, x, y, store_metrics, lr_decay=True, initial_step=False):
"""
Perform one step of SGLD on the model.
Args:
x (torch.Tensor): Training input data
y (torch.Tensor): Training labels
lr_decay (bool): Flag that controls whether the learning rate should decay after this step
Returns:
loss (float): The current loss of the model for x and y
"""
loss, log_prior, potential, acc = self._model_potential_and_grad(x, y)
self.optimizer.step(calc_metrics=store_metrics)
lr = self.optimizer.param_groups[0]["lr"]
if lr_decay:
self.scheduler.step()
if store_metrics:
# The metrics are valid for the previous step.
self.store_metrics(i=i-1, loss=loss.item(), log_prior=log_prior.item(),
potential=potential.item(), acc=acc.item(), lr=lr,
corresponds_to_sample=initial_step)
return loss, acc, None
def get_samples(self):
"""
Returns the acquired SGLD samples from the last run.
Returns:
samples (dict): Dictionary of torch.tensors with num_samples*cycles samples for each parameter of the model
"""
if self.model_saver is None:
return {k: v for (k, v) in self._samples.items() if k != "steps"}
return self.model_saver.load_samples(keep_steps=False)
def store_metrics(self, i, loss, log_prior, potential, acc, lr,
corresponds_to_sample: bool,
delta_energy=None, total_energy=None, rejected=None):
est_temperature_all = 0.
est_config_temp_all = 0.
all_numel = 0
add_scalar = self.metrics_saver.add_scalar
for n, p in zip(self.param_names, self.optimizer.param_groups[0]["params"]):
state = self.optimizer.state[p]
add_scalar("preconditioner/"+n, state["preconditioner"], i)
add_scalar("est_temperature/"+n, state["est_temperature"], i)
add_scalar("est_config_temp/"+n, state["est_config_temp"], i)
est_temperature_all += state["est_temperature"] * p.numel()
est_config_temp_all += state["est_config_temp"] * p.numel()
all_numel += p.numel()
add_scalar("est_temperature/all", est_temperature_all / all_numel, i)
add_scalar("est_config_temp/all", est_config_temp_all / all_numel, i)
temperature = self.optimizer.param_groups[0]["temperature"]
add_scalar("temperature", temperature, i)
add_scalar("loss", loss, i)
add_scalar("acc", acc, i)
add_scalar("log_prior", log_prior, i)
add_scalar("potential", potential, i)
add_scalar("lr", lr, i)
add_scalar("acceptance/is_sample", int(corresponds_to_sample), i)
if delta_energy is not None:
add_scalar("delta_energy", delta_energy, i)
add_scalar("total_energy", total_energy, i)
if rejected is not None:
add_scalar("acceptance/rejected", int(rejected), i)
class VerletSGLDRunner(SGLDRunner):
def _make_optimizer(self, params):
return mcmc.VerletSGLD(
params=params,
lr=self.learning_rate, num_data=self.eff_num_data,
momentum=self.momentum, temperature=self.temperature)
def step(self, i, x, y, store_metrics, lr_decay=True, initial_step=False):
loss, log_prior, potential, acc = self._model_potential_and_grad(x, y)
lr = self.optimizer.param_groups[0]["lr"]
rejected = None
delta_energy = None
if i == 0:
# The very first step
if isinstance(self.optimizer, mcmc.HMC):
# momentum should be sampled already, but it does not hurt to
# sample again.
self.optimizer.sample_momentum()
self.optimizer.initial_step(
calc_metrics=True, save_state=self.reject_samples)
if self.reject_samples:
rejected = False # the first sample is what we have.
elif initial_step:
# Calculate metrics using the possible sample's parameter (which is
# not modified), its gradient, and the new momentum as updated by
# `final_step`.
self.optimizer.final_step(calc_metrics=True)
delta_energy = self.optimizer.delta_energy(self._initial_potential, potential)
if self.reject_samples:
rejected, _ = self.optimizer.maybe_reject(delta_energy)
# The first step of an epoch, but not the very first
if isinstance(self.optimizer, mcmc.HMC):
self.optimizer.sample_momentum()
self.optimizer.initial_step(
calc_metrics=False, save_state=self.reject_samples)
else:
# Any intermediate step
self.optimizer.step(calc_metrics=store_metrics)
if i == 0:
# Very first step
store_metrics = True
total_energy = delta_energy = self.optimizer.delta_energy(0., 0.)
self._initial_potential = potential.item()
self._total_energy = 0.
elif initial_step:
# First step of an epoch
store_metrics = True
self._initial_potential = potential.item()
self._total_energy += delta_energy
total_energy = self._total_energy
else:
# Any step
if store_metrics:
delta_energy = self.optimizer.delta_energy(self._initial_potential, loss)
total_energy = self._total_energy + delta_energy
if store_metrics:
# The metrics are valid for the previous step.
self.store_metrics(i=i-1, loss=loss.item(), log_prior=log_prior.item(),
potential=potential.item(), acc=acc.item(), lr=lr,
delta_energy=delta_energy,
total_energy=total_energy, rejected=rejected,
corresponds_to_sample=initial_step)
if lr_decay:
self.scheduler.step()
return loss, acc, delta_energy
class HMCRunner(VerletSGLDRunner):
def _make_optimizer(self, params):
assert self.temperature == 1.0, "HMC only implemented for temperature=1."
assert self.momentum == 1.0, "HMC only works with momentum=1."
assert self.descent_epochs == 0, "HMC not implemented for descent epochs with temp=0."
return mcmc.HMC(
params=params,
lr=self.learning_rate, num_data=self.eff_num_data)
|
the-stack_0_6997 | """sneh_figma_test_app_22681 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Sneh_Figma_Test_App"
admin.site.site_title = "Sneh_Figma_Test_App Admin Portal"
admin.site.index_title = "Sneh_Figma_Test_App Admin"
# swagger
api_info = openapi.Info(
title="Sneh_Figma_Test_App API",
default_version="v1",
description="API documentation for Sneh_Figma_Test_App App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
the-stack_0_6999 | """
This module contains the classes for Nodes, Arcs used in the optimizer with mnetgen format,
for the solvers.
"""
from typing import List
class Arc:
def __init__(
self,
name: int,
from_node: int,
to_node: int,
commodity: int,
cost: float,
capacity: int,
mutual_capacity_id: int,
):
self.name = name
self.from_node = from_node
self.to_node = to_node
self.commodity = commodity
self.cost = cost
self.capacity = capacity
self.mutual_capacity_id = mutual_capacity_id
def __repr__(self):
return self.__str__()
def __str__(self):
return "Arc(%s,%s,%s,%s,%s,%s,%s)" % (
self.name,
self.from_node,
self.to_node,
self.commodity,
self.cost,
self.capacity,
self.mutual_capacity_id,
)
class Node:
def __init__(self, node_id: int, commodity: int, supply: int):
self.node_id = node_id
self.commodity = commodity
self.supply = supply
def __repr__(self):
return self.__str__()
def __str__(self):
return "Node(%s,%s,%s)" % (self.node_id, self.commodity, self.supply)
class MutualCapacity:
def __init__(self, mutual_capacity_id: int, capacity: int):
self.mutual_capacity_id = mutual_capacity_id
self.capacity = capacity
class MnetgenFormatWriter:
__SEP = "\t"
def __init__(
self, nodes: List[Node], arcs: List[Arc], capacities: List[MutualCapacity]
):
self.nodes = nodes
self.arcs = arcs
self.capacities = capacities
def write(self, dir: str, filename: str):
arc_lines = self.__arc_lines()
node_lines = self.__node_lines()
mutual_capacity_lines = self.__mutual_capacity_lines()
summary_lines = self.__nod_lines()
self.__write_lines(arc_lines, f"{dir}/{filename}.arc")
self.__write_lines(node_lines, f"{dir}/{filename}.sup")
self.__write_lines(mutual_capacity_lines, f"{dir}/{filename}.mut")
self.__write_lines(summary_lines, f"{dir}/{filename}.nod")
# Arc file (*.arc):
#
# < arc name > , < from node > , < to node > , < commodity > , < cost > ,
# < capacity > , < mutual capacity pointer >
def __arc_lines(self):
SEP = self.__SEP
arc_lines = []
for a in self.arcs:
arc_lines.append(
f"{a.name}{SEP}{a.from_node}{SEP}{a.to_node}{SEP}{a.commodity}{SEP}{a.cost}{SEP}{a.capacity}{SEP}{a.mutual_capacity_id}"
)
return arc_lines
# Node supply file (*.nod if FOUR_F == 0, *.sup otherwise):
#
# < node > , < commodity > , < supply >
def __node_lines(self):
SEP = self.__SEP
node_lines = []
for n in self.nodes:
node_lines.append(f"{n.node_id}{SEP}{n.commodity}{SEP}{n.supply}")
return node_lines
# Mutual capacity file (*.mut):
#
# < mutual capacity pointer > , < mutual capacity >
def __mutual_capacity_lines(self):
SEP = self.__SEP
mc_lines = []
for mc in self.capacities:
mc_lines.append(f"{mc.mutual_capacity_id}{SEP}{mc.capacity}")
return mc_lines
def __write_lines(self, ds: List[str], _writedir: str):
with open(_writedir, "w+") as f:
for i, line in enumerate(ds):
if i != len(ds) - 1:
f.write("%s\n" % line)
else:
f.write("%s" % line)
def __nod_lines(self):
SEP = self.__SEP
commodities = len(set([node.commodity for node in self.nodes]))
nodes = len(self.nodes)
arcs = len(self.arcs)
capacitated = sum(
[1 for arc in self.arcs if arc.mutual_capacity_id != 0]
) # first bundle is for uncapacitateds.
nod_line = f"{commodities}{SEP}{nodes}{SEP}{arcs}{SEP}{capacitated}"
print(f"nod_line {nod_line}")
return [nod_line]
|
the-stack_0_7001 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_HTTPError,
)
from ..utils import (
USER_AGENTS,
ExtractorError,
int_or_none,
unified_strdate,
remove_end,
update_url_query,
)
class DPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?P<domain>www\.dplay\.(?:dk|se|no))/[^/]+/(?P<id>[^/?#]+)'
_TESTS = [{
# non geo restricted, via secure api, unsigned download hls URL
'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/',
'info_dict': {
'id': '3172',
'display_id': 'season-1-svensken-lar-sig-njuta-av-livet',
'ext': 'mp4',
'title': 'Svensken lär sig njuta av livet',
'description': 'md5:d3819c9bccffd0fe458ca42451dd50d8',
'duration': 2650,
'timestamp': 1365454320,
'upload_date': '20130408',
'creator': 'Kanal 5 (Home)',
'series': 'Nugammalt - 77 händelser som format Sverige',
'season_number': 1,
'episode_number': 1,
'age_limit': 0,
},
}, {
# geo restricted, via secure api, unsigned download hls URL
'url': 'http://www.dplay.dk/mig-og-min-mor/season-6-episode-12/',
'info_dict': {
'id': '70816',
'display_id': 'season-6-episode-12',
'ext': 'mp4',
'title': 'Episode 12',
'description': 'md5:9c86e51a93f8a4401fc9641ef9894c90',
'duration': 2563,
'timestamp': 1429696800,
'upload_date': '20150422',
'creator': 'Kanal 4 (Home)',
'series': 'Mig og min mor',
'season_number': 6,
'episode_number': 12,
'age_limit': 0,
},
}, {
# geo restricted, via direct unsigned hls URL
'url': 'http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
domain = mobj.group('domain')
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'data-video-id=["\'](\d+)', webpage, 'video id')
info = self._download_json(
'http://%s/api/v2/ajax/videos?video_id=%s' % (domain, video_id),
video_id)['data'][0]
title = info['title']
PROTOCOLS = ('hls', 'hds')
formats = []
def extract_formats(protocol, manifest_url):
if protocol == 'hls':
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id=protocol, fatal=False)
# Sometimes final URLs inside m3u8 are unsigned, let's fix this
# ourselves. Also fragments' URLs are only served signed for
# Safari user agent.
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(manifest_url).query)
for m3u8_format in m3u8_formats:
m3u8_format.update({
'url': update_url_query(m3u8_format['url'], query),
'http_headers': {
'User-Agent': USER_AGENTS['Safari'],
},
})
formats.extend(m3u8_formats)
elif protocol == 'hds':
formats.extend(self._extract_f4m_formats(
manifest_url + '&hdcore=3.8.0&plugin=flowplayer-3.8.0.0',
video_id, f4m_id=protocol, fatal=False))
domain_tld = domain.split('.')[-1]
if domain_tld in ('se', 'dk', 'no'):
for protocol in PROTOCOLS:
# Providing dsc-geo allows to bypass geo restriction in some cases
self._set_cookie(
'secure.dplay.%s' % domain_tld, 'dsc-geo',
json.dumps({
'countryCode': domain_tld.upper(),
'expiry': (time.time() + 20 * 60) * 1000,
}))
stream = self._download_json(
'https://secure.dplay.%s/secure/api/v2/user/authorization/stream/%s?stream_type=%s'
% (domain_tld, video_id, protocol), video_id,
'Downloading %s stream JSON' % protocol, fatal=False)
if stream and stream.get(protocol):
extract_formats(protocol, stream[protocol])
# The last resort is to try direct unsigned hls/hds URLs from info dictionary.
# Sometimes this does work even when secure API with dsc-geo has failed (e.g.
# http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/).
if not formats:
for protocol in PROTOCOLS:
if info.get(protocol):
extract_formats(protocol, info[protocol])
self._sort_formats(formats)
subtitles = {}
for lang in ('se', 'sv', 'da', 'nl', 'no'):
for format_id in ('web_vtt', 'vtt', 'srt'):
subtitle_url = info.get('subtitles_%s_%s' % (lang, format_id))
if subtitle_url:
subtitles.setdefault(lang, []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': info.get('video_metadata_longDescription'),
'duration': int_or_none(info.get('video_metadata_length'), scale=1000),
'timestamp': int_or_none(info.get('video_publish_date')),
'creator': info.get('video_metadata_homeChannel'),
'series': info.get('video_metadata_show'),
'season_number': int_or_none(info.get('season')),
'episode_number': int_or_none(info.get('episode')),
'age_limit': int_or_none(info.get('minimum_age')),
'formats': formats,
'subtitles': subtitles,
}
class DPlayItIE(InfoExtractor):
_VALID_URL = r'https?://it\.dplay\.com/[^/]+/[^/]+/(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['IT']
_TEST = {
'url': 'http://it.dplay.com/nove/biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij/',
'md5': '2b808ffb00fc47b884a172ca5d13053c',
'info_dict': {
'id': '6918',
'display_id': 'luigi-di-maio-la-psicosi-di-stanislawskij',
'ext': 'mp4',
'title': 'Biografie imbarazzanti: Luigi Di Maio: la psicosi di Stanislawskij',
'description': 'md5:3c7a4303aef85868f867a26f5cc14813',
'thumbnail': r're:^https?://.*\.jpe?g',
'upload_date': '20160524',
'series': 'Biografie imbarazzanti',
'season_number': 1,
'episode': 'Luigi Di Maio: la psicosi di Stanislawskij',
'episode_number': 1,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
info_url = self._search_regex(
r'url\s*[:=]\s*["\']((?:https?:)?//[^/]+/playback/videoPlaybackInfo/\d+)',
webpage, 'video id')
title = remove_end(self._og_search_title(webpage), ' | Dplay')
try:
info = self._download_json(
info_url, display_id, headers={
'Authorization': 'Bearer %s' % self._get_cookies(url).get(
'dplayit_token').value,
'Referer': url,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 403):
info = self._parse_json(e.cause.read().decode('utf-8'), display_id)
error = info['errors'][0]
if error.get('code') == 'access.denied.geoblocked':
self.raise_geo_restricted(
msg=error.get('detail'), countries=self._GEO_COUNTRIES)
raise ExtractorError(info['errors'][0]['detail'], expected=True)
raise
hls_url = info['data']['attributes']['streaming']['hls']['url']
formats = self._extract_m3u8_formats(
hls_url, display_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
series = self._html_search_regex(
r'(?s)<h1[^>]+class=["\'].*?\bshow_title\b.*?["\'][^>]*>(.+?)</h1>',
webpage, 'series', fatal=False)
episode = self._search_regex(
r'<p[^>]+class=["\'].*?\bdesc_ep\b.*?["\'][^>]*>\s*<br/>\s*<b>([^<]+)',
webpage, 'episode', fatal=False)
mobj = re.search(
r'(?s)<span[^>]+class=["\']dates["\'][^>]*>.+?\bS\.(?P<season_number>\d+)\s+E\.(?P<episode_number>\d+)\s*-\s*(?P<upload_date>\d{2}/\d{2}/\d{4})',
webpage)
if mobj:
season_number = int(mobj.group('season_number'))
episode_number = int(mobj.group('episode_number'))
upload_date = unified_strdate(mobj.group('upload_date'))
else:
season_number = episode_number = upload_date = None
return {
'id': info_url.rpartition('/')[-1],
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'upload_date': upload_date,
'formats': formats,
}
|
the-stack_0_7002 | import os
import sys
import pathlib
import time
import shutil
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from simulation import Simulation
from targets import target_dict as target_dict
def get_example_directory(base, fdir, subdir="mf6"):
exdir = None
for root, dirs, files in os.walk(base):
for d in dirs:
if d.startswith(fdir):
exdir = os.path.abspath(os.path.join(root, d, subdir))
break
if exdir is not None:
break
return exdir
# find path to modflow6-testmodels or modflow6-testmodels.git directory
home = os.path.expanduser("~")
print("$HOME={}".format(home))
fdir = "modflow6-testmodels"
exdir = get_example_directory(home, fdir, subdir="mf5to6")
if exdir is None:
p = pathlib.Path(os.getcwd())
home = os.path.abspath(pathlib.Path(*p.parts[:2]))
print("$HOME={}".format(home))
exdir = get_example_directory(home, fdir, subdir="mf5to6")
if exdir is not None:
assert os.path.isdir(exdir)
sfmt = "{:25s} - {}"
def get_mf5to6_models():
"""
Get a list of test models
"""
# list of example files to exclude
exclude = [
"test1ss_ic1",
"test9.5-3layer",
"testmm2",
"testmm3",
"testmmSimple",
"testps3a",
"testTwri",
"testTwrip",
"test028_sfr_simple",
]
# write a summary of the files to exclude
print("list of tests to exclude:")
for idx, ex in enumerate(exclude):
print(" {}: {}".format(idx + 1, ex))
# build list of directories with valid example files
if exdir is not None:
dirs = [
d for d in os.listdir(exdir) if "test" in d and d not in exclude
]
# sort in numerical order for case sensitive os
dirs = sorted(dirs, key=lambda v: (v.upper(), v[0].islower()))
else:
dirs = []
# determine if only a selection of models should be run
select_dirs = None
select_packages = None
for idx, arg in enumerate(sys.argv):
if arg.lower() == "--sim":
if len(sys.argv) > idx + 1:
select_dirs = sys.argv[idx + 1 :]
break
elif arg.lower() == "--pak":
if len(sys.argv) > idx + 1:
select_packages = sys.argv[idx + 1 :]
select_packages = [item.upper() for item in select_packages]
break
# determine if the selection of model is in the test models to evaluate
if select_dirs is not None:
found_dirs = []
for d in select_dirs:
if d in dirs:
found_dirs.append(d)
dirs = found_dirs
if len(dirs) < 1:
msg = "Selected models not available in test"
print(msg)
# determine if the specified package(s) is in the test models to evaluate
if select_packages is not None:
found_dirs = []
for d in dirs:
pth = os.path.join(exdir, d)
namefiles = pymake.get_namefiles(pth)
ftypes = []
for namefile in namefiles:
for pak in select_packages:
ftype = pymake.get_entries_from_namefile(
namefile, ftype=pak
)
for t in ftype:
if t[1] is not None:
if t[1] not in ftypes:
ftypes.append(t[1].upper())
if len(ftypes) > 0:
ftypes = [item.upper() for item in ftypes]
for pak in select_packages:
if pak in ftypes:
found_dirs.append(d)
break
dirs = found_dirs
if len(dirs) < 1:
msg = "Selected packages not available ["
for idx, pak in enumerate(select_packages):
msg += "{}".format(pak)
if idx + 1 < len(select_packages):
msg += ", "
msg += "]"
print(msg)
return dirs
def run_mf5to6(sim):
"""
Run the MODFLOW 6 simulation and compare to existing head file or
appropriate MODFLOW-2005, MODFLOW-NWT, MODFLOW-USG, or MODFLOW-LGR run.
"""
src = os.path.join(exdir, sim.name)
dst = os.path.join("temp", "working")
# set default version
version = "mf2005"
lgrpth = None
# determine if compare directory exists in directory or if mflgr control
# file is in directory
listdir = os.listdir(src)
for value in listdir:
fpth = os.path.join(src, value)
if os.path.isfile(fpth):
ext = os.path.splitext(fpth)[1]
if ".lgr" in ext.lower():
version = "mflgr"
lgrpth = fpth
elif os.path.isdir(fpth):
if "compare" in value.lower() or "cmp" in value.lower():
compare = True
cpth = value
msg = "Copying {} files to working directory".format(version)
# copy lgr files to working directory
if lgrpth is not None:
print(msg)
npth = lgrpth
pymake.setup(lgrpth, dst)
# copy modflow 2005, NWT, or USG files to working directory
else:
print(msg)
npths = pymake.get_namefiles(src)
if len(npths) < 1:
msg = "No name files in {}".format(src)
print(msg)
assert False
npth = npths[0]
pymake.setup(npth, dst)
# read ftype from name file to set modflow version
if version != "mflgr":
lines = [line.rstrip("\n") for line in open(npth)]
for line in lines:
if len(line) < 1:
continue
t = line.split()
ftype = t[0].upper()
if ftype == "NWT" or ftype == "UPW":
version = "mfnwt"
break
elif ftype == "SMS" or ftype == "DISU":
version = "mfusg"
break
# run converter
exe = os.path.abspath(target_dict["mf5to6"])
msg = sfmt.format("using executable", exe)
print(msg)
nmsg = "Program terminated normally"
try:
nam = os.path.basename(npth)
success, buff = flopy.run_model(
exe,
nam,
model_ws=dst,
silent=False,
report=True,
normal_msg=nmsg,
cargs="mf6",
)
msg = sfmt.format("MODFLOW 5 to 6 run", nam)
if success:
print(msg)
else:
print("ERROR: " + msg)
except:
msg = sfmt.format("MODFLOW 5 to 6 run", nam)
print("ERROR: " + msg)
success = False
assert success, msg
# standard setup
src = dst
dst = os.path.join("temp", sim.name)
sim.setup(src, dst)
# clean up temp/working directory (src)
if os.path.exists(src):
msg = "Removing {} directory".format(src)
print(msg)
shutil.rmtree(src)
time.sleep(0.5)
# standard comparison run
sim.run()
sim.compare()
sim.teardown()
def test_model():
# determine if test directory exists
dirtest = dir_avail()
if not dirtest:
return
# get a list of test models to run
dirs = get_mf5to6_models()
# run the test models
for dir in dirs:
yield run_mf5to6, Simulation(dir, mf6_regression=True)
return
def dir_avail():
avail = False
if exdir is not None:
avail = os.path.isdir(exdir)
if not avail:
print('"{}" does not exist'.format(exdir))
print("no need to run {}".format(os.path.basename(__file__)))
return avail
def main():
# write message
tnam = os.path.splitext(os.path.basename(__file__))[0]
msg = "Running {} test".format(tnam)
print(msg)
# get name of current file
module_name = sys.modules[__name__].__file__
# determine if test directory exists
dirtest = dir_avail()
if not dirtest:
return
# get a list of test models to run
dirs = get_mf5to6_models()
# run the test models
for dir in dirs:
sim = Simulation(dir, mf6_regression=True)
run_mf5to6(sim)
return
if __name__ == "__main__":
print("standalone run of {}".format(os.path.basename(__file__)))
delFiles = True
for idx, arg in enumerate(sys.argv):
if arg.lower() == "--keep":
if len(sys.argv) > idx + 1:
delFiles = False
break
# run main routine
main()
|
the-stack_0_7005 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 11:34:57 2021
@author: SethHarden
"""
import math
import heapq
def maxCandies(arr, k):
bags = []
minutes = k
#push the list into a heap
for i in arr:
heapq.heappush(bags, -i)
#set our minimum
answer = 0
#while we have time and there are bags
while minutes > 0 and bags:
#get the absolute value of everything in our bag
max_candy = abs(heapq.heappop(bags))
answer += max_candy
heapq.heappush(bags, - (max_candy // 2))
minutes -= 1
return answer
def printInteger(n):
print('[', n, ']', sep='', end='')
test_case_number = 1
def check(expected, output):
global test_case_number
result = False
if expected == output:
result = True
rightTick = '\u2713'
wrongTick = '\u2717'
if result:
print(rightTick, 'Test #', test_case_number, sep='')
else:
print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='')
printInteger(expected)
print(' Your output: ', end='')
printInteger(output)
print()
test_case_number += 1
if __name__ == "__main__":
n_1, k_1 = 5, 3
arr_1 = [2, 1, 7, 4, 2]
expected_1 = 14
output_1 = maxCandies(arr_1, k_1)
check(expected_1, output_1)
n_2, k_2 = 9, 3
arr_2 = [19, 78, 76, 72, 48, 8, 24, 74, 29]
expected_2 = 228
output_2 = maxCandies(arr_2, k_2)
check(expected_2, output_2)
# Add your own test cases here
|
the-stack_0_7008 | """
A module implementing EOPatch merging utility
Credits:
Copyright (c) 2018-2020 William Ouellette
Copyright (c) 2017-2020 Matej Aleksandrov, Matej Batič, Grega Milčinski, Matic Lubej, Devis Peresutti (Sinergise)
Copyright (c) 2017-2020 Nejc Vesel, Jovan Višnjić, Anže Zupanc (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import functools
import warnings
from collections.abc import Callable
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame
from .constants import FeatureType
from .utilities import FeatureParser
def merge_eopatches(*eopatches, features=..., time_dependent_op=None, timeless_op=None):
""" Merge features of given EOPatches into a new EOPatch
:param eopatches: Any number of EOPatches to be merged together
:type eopatches: EOPatch
:param features: A collection of features to be merged together. By default all features will be merged.
:type features: object
:param time_dependent_op: An operation to be used to join data for any time-dependent raster feature. Before
joining time slices of all arrays will be sorted. Supported options are:
- None (default): If time slices with matching timestamps have the same values, take one. Raise an error
otherwise.
- 'concatenate': Keep all time slices, even the ones with matching timestamps
- 'min': Join time slices with matching timestamps by taking minimum values. Ignore NaN values.
- 'max': Join time slices with matching timestamps by taking maximum values. Ignore NaN values.
- 'mean': Join time slices with matching timestamps by taking mean values. Ignore NaN values.
- 'median': Join time slices with matching timestamps by taking median values. Ignore NaN values.
:type time_dependent_op: str or Callable or None
:param timeless_op: An operation to be used to join data for any timeless raster feature. Supported options
are:
- None (default): If arrays are the same, take one. Raise an error otherwise.
- 'concatenate': Join arrays over the last (i.e. bands) dimension
- 'min': Join arrays by taking minimum values. Ignore NaN values.
- 'max': Join arrays by taking maximum values. Ignore NaN values.
- 'mean': Join arrays by taking mean values. Ignore NaN values.
- 'median': Join arrays by taking median values. Ignore NaN values.
:type timeless_op: str or Callable or None
:return: A dictionary with EOPatch features and values
:rtype: Dict[(FeatureType, str), object]
"""
reduce_timestamps = time_dependent_op != 'concatenate'
time_dependent_op = _parse_operation(time_dependent_op, is_timeless=False)
timeless_op = _parse_operation(timeless_op, is_timeless=True)
all_features = {feature for eopatch in eopatches for feature in FeatureParser(features)(eopatch)}
eopatch_content = {}
timestamps, sort_mask, split_mask = _merge_timestamps(eopatches, reduce_timestamps)
eopatch_content[FeatureType.TIMESTAMP] = timestamps
for feature in all_features:
feature_type, feature_name = feature
if feature_type.is_raster():
if feature_type.is_time_dependent():
eopatch_content[feature] = _merge_time_dependent_raster_feature(
eopatches, feature, time_dependent_op, sort_mask, split_mask
)
else:
eopatch_content[feature] = _merge_timeless_raster_feature(eopatches, feature,
timeless_op)
if feature_type.is_vector():
eopatch_content[feature] = _merge_vector_feature(eopatches, feature)
if feature_type is FeatureType.META_INFO:
eopatch_content[feature] = _select_meta_info_feature(eopatches, feature_name)
if feature_type is FeatureType.BBOX:
eopatch_content[feature] = _get_common_bbox(eopatches)
return eopatch_content
def _parse_operation(operation_input, is_timeless):
""" Transforms operation's instruction (i.e. an input string) into a function that can be applied to a list of
arrays. If the input already is a function it returns it.
"""
if isinstance(operation_input, Callable):
return operation_input
try:
return {
None: _return_if_equal_operation,
'concatenate': functools.partial(np.concatenate, axis=-1 if is_timeless else 0),
'mean': functools.partial(np.nanmean, axis=0),
'median': functools.partial(np.nanmedian, axis=0),
'min': functools.partial(np.nanmin, axis=0),
'max': functools.partial(np.nanmax, axis=0)
}[operation_input]
except KeyError as exception:
raise ValueError(f'Merge operation {operation_input} is not supported') from exception
def _return_if_equal_operation(arrays):
""" Checks if arrays are all equal and returns first one of them. If they are not equal it raises an error.
"""
if _all_equal(arrays):
return arrays[0]
raise ValueError('Cannot merge given arrays because their values are not the same')
def _merge_timestamps(eopatches, reduce_timestamps):
""" Merges together timestamps from EOPatches. It also prepares masks on how to sort and join data in any
time-dependent raster feature.
"""
all_timestamps = [timestamp for eopatch in eopatches for timestamp in eopatch.timestamp
if eopatch.timestamp is not None]
if not all_timestamps:
return [], None, None
sort_mask = np.argsort(all_timestamps)
all_timestamps = sorted(all_timestamps)
if not reduce_timestamps:
return all_timestamps, sort_mask, None
split_mask = [
index + 1 for index, (timestamp, next_timestamp) in enumerate(zip(all_timestamps[:-1], all_timestamps[1:]))
if timestamp != next_timestamp
]
reduced_timestamps = [timestamp for index, timestamp in enumerate(all_timestamps)
if index == 0 or timestamp != all_timestamps[index - 1]]
return reduced_timestamps, sort_mask, split_mask
def _merge_time_dependent_raster_feature(eopatches, feature, operation, sort_mask, split_mask):
""" Merges numpy arrays of a time-dependent raster feature with a given operation and masks on how to sort and join
time raster's time slices.
"""
arrays = _extract_feature_values(eopatches, feature)
merged_array = np.concatenate(arrays, axis=0)
del arrays
if sort_mask is None:
return merged_array
merged_array = merged_array[sort_mask]
if split_mask is None or len(split_mask) == merged_array.shape[0] - 1:
return merged_array
split_arrays = np.split(merged_array, split_mask)
del merged_array
try:
split_arrays = [operation(array_chunk) for array_chunk in split_arrays]
except ValueError as exception:
raise ValueError(f'Failed to merge {feature} with {operation}, try setting a different value for merging '
f'parameter time_dependent_op') from exception
return np.array(split_arrays)
def _merge_timeless_raster_feature(eopatches, feature, operation):
""" Merges numpy arrays of a timeless raster feature with a given operation.
"""
arrays = _extract_feature_values(eopatches, feature)
if len(arrays) == 1:
return arrays[0]
try:
return operation(arrays)
except ValueError as exception:
raise ValueError(f'Failed to merge {feature} with {operation}, try setting a different value for merging '
f'parameter timeless_op') from exception
def _merge_vector_feature(eopatches, feature):
""" Merges GeoDataFrames of a vector feature.
"""
dataframes = _extract_feature_values(eopatches, feature)
if len(dataframes) == 1:
return dataframes[0]
crs_list = [dataframe.crs for dataframe in dataframes if dataframe.crs is not None]
if not crs_list:
crs_list = [None]
if not _all_equal(crs_list):
raise ValueError(f'Cannot merge feature {feature} because dataframes are defined for '
f'different CRS')
merged_dataframe = GeoDataFrame(pd.concat(dataframes, ignore_index=True), crs=crs_list[0])
merged_dataframe = merged_dataframe.drop_duplicates(ignore_index=True)
# In future a support for vector operations could be added here
return merged_dataframe
def _select_meta_info_feature(eopatches, feature_name):
""" Selects a value for a meta info feature of a merged EOPatch. By default the value is the first one.
"""
values = _extract_feature_values(eopatches, (FeatureType.META_INFO, feature_name))
if not _all_equal(values):
message = f'EOPatches have different values of meta info feature {feature_name}. The first value will be ' \
f'used in a merged EOPatch'
warnings.warn(message, category=UserWarning)
return values[0]
def _get_common_bbox(eopatches):
""" Makes sure that all EOPatches, which define a bounding box and CRS, define the same ones.
"""
bboxes = [eopatch.bbox for eopatch in eopatches if eopatch.bbox is not None]
if not bboxes:
return None
if _all_equal(bboxes):
return bboxes[0]
raise ValueError('Cannot merge EOPatches because they are defined for different bounding boxes')
def _extract_feature_values(eopatches, feature):
""" A helper function that extracts a feature values from those EOPatches where a feature exists.
"""
feature_type, feature_name = feature
return [eopatch[feature] for eopatch in eopatches if feature_name in eopatch[feature_type]]
def _all_equal(values):
""" A helper function that checks if all values in a given list are equal to each other.
"""
first_value = values[0]
if isinstance(first_value, np.ndarray):
is_numeric_dtype = np.issubdtype(first_value.dtype, np.number)
return all(np.array_equal(first_value, array, equal_nan=is_numeric_dtype) for array in values[1:])
return all(first_value == value for value in values[1:])
|
the-stack_0_7011 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2012-2020 iSolver Software Solutions (C) 2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import division, absolute_import, print_function
from builtins import next
from past.builtins import basestring
from builtins import object
import numbers # numbers.Integral is like (int, long) but supports Py3
from tables import *
import os
from collections import namedtuple
import json
from ..errors import print2err
from pkg_resources import parse_version
import tables
if parse_version(tables.__version__) < parse_version('3'):
from tables import openFile as open_file
walk_groups = "walkGroups"
list_nodes = "listNodes"
get_node = "getNode"
read_where = "readWhere"
else:
from tables import open_file
walk_groups = "walk_groups"
list_nodes = "list_nodes"
get_node = "get_node"
read_where = "read_where"
_hubFiles = []
def openHubFile(filepath, filename, mode):
"""
Open an HDF5 DataStore file and register it so that it is closed even on interpreter crash.
"""
global _hubFiles
hubFile = open_file(os.path.join(filepath, filename), mode)
_hubFiles.append(hubFile)
return hubFile
def displayDataFileSelectionDialog(starting_dir=None):
"""Shows a FileDialog and lets you select a .hdf5 file to open for
processing."""
from psychopy.gui.qtgui import fileOpenDlg
filePath = fileOpenDlg(tryFilePath=starting_dir,
prompt = "Select a ioHub HDF5 File",
allowed='HDF5 Files (*.hdf5)')
if filePath is None:
return None
return filePath
def displayEventTableSelectionDialog(
title,
list_label,
list_values,
default=u'Select'):
from psychopy import gui
if default not in list_values:
list_values.insert(0, default)
else:
list_values.remove(list_values)
list_values.insert(0, default)
selection_dict = dict(list_label=list_values)
dlg_info = dict(selection_dict)
infoDlg = gui.DlgFromDict(dictionary=dlg_info, title=title)
if not infoDlg.OK:
return None
while list(dlg_info.values())[0] == default and infoDlg.OK:
dlg_info=dict(selection_dict)
infoDlg = gui.DlgFromDict(dictionary=dlg_info, title=title)
if not infoDlg.OK:
return None
return list(dlg_info.values())[0]
########### Experiment / Experiment Session Based Data Access #################
class ExperimentDataAccessUtility(object):
"""The ExperimentDataAccessUtility provides a simple, high level, way to
access data saved in an ioHub DataStore HDF5 file. Data access is done by
providing information at an experiment and session level, as well as
specifying the ioHub Event types you want to retieve data for.
An instance of the ExperimentDataAccessUtility class is created by providing
the location and name of the file to read, as well as any session code
filtering you want applied to the retieved datasets.
Args:
hdfFilePath (str): The path of the directory the DataStore HDF5 file is in.
hdfFileName (str): The name of the DataStore HDF5 file.
experimentCode (str): If multi-experiment support is enabled for the DataStore file, this arguement can be used to specify what experiment data to load based on the experiment_code given. NOTE: Multi-experiment data file support is not well tested and should not be used at this point.
sessionCodes (str or list): The experiment session code to filter data by. If a list of codes is given, then all codes in the list will be used.
Returns:
object: the created instance of the ExperimentDataAccessUtility, ready to get your data!
"""
def __init__(
self,
hdfFilePath,
hdfFileName,
experimentCode=None,
sessionCodes=[],
mode='r'):
"""An instance of the ExperimentDataAccessUtility class is created by
providing the location and name of the file to read, as well as any
session code filtering you want applied to the retieved datasets.
Args:
hdfFilePath (str): The path of the directory the DataStore HDF5 file is in.
hdfFileName (str): The name of the DataStore HDF5 file.
experimentCode (str): If multi-experiment support is enabled for the DataStore file, this arguement can be used to specify what experiment data to load based on the experiment_code given. NOTE: Multi-experiment data file support is not well tested and should not be used at this point.
sessionCodes (str or list): The experiment session code to filter data by. If a list of codes is given, then all codes in the list will be used.
Returns:
object: the created instance of the ExperimentDataAccessUtility, ready to get your data!
"""
self.hdfFilePath = hdfFilePath
self.hdfFileName = hdfFileName
self.mode = mode
self.hdfFile = None
self._experimentCode = experimentCode
self._sessionCodes = sessionCodes
self._lastWhereClause = None
try:
self.hdfFile = openHubFile(hdfFilePath, hdfFileName, mode)
except Exception as e:
print(e)
raise ExperimentDataAccessException(e)
self.getExperimentMetaData()
def printTableStructure(self,tableName):
"""Print to stdout the current structure and content statistics of the
specified DataStore table. To print out the complete structure of the
DataStore file, including the name of all available tables, see the
printHubFileStructure method.
Args:
tableName (str): The DataStore table name to print metadata information out for.
"""
if self.hdfFile:
hubFile = self.hdfFile
for group in getattr(hubFile, walk_groups)("/"):
for table in getattr(hubFile, list_nodes)(group, classname='Table'):
if table.name == tableName:
print('------------------')
print('Path:', table)
print('Table name:', table.name)
print('Number of rows in table:', table.nrows)
print('Number of cols in table:', len(table.colnames))
print('Attribute name := type, shape:')
for name in table.colnames:
print('\t', name, ':= %s, %s' % (table.coldtypes[name], table.coldtypes[name].shape))
print('------------------')
return
def printHubFileStructure(self):
"""Print to stdout the current global structure of the loaded DataStore
File."""
if self.hdfFile:
print(self.hdfFile)
def getExperimentMetaData(self):
"""Returns the the metadata for the experiment the datStore file is
for.
**Docstr TBC.**
"""
if self.hdfFile:
expcols = self.hdfFile.root.data_collection.experiment_meta_data.colnames
if 'sessions' not in expcols:
expcols.append('sessions')
ExperimentMetaDataInstance = namedtuple(
'ExperimentMetaDataInstance', expcols)
experiments=[]
for e in self.hdfFile.root.data_collection.experiment_meta_data:
self._experimentID = e['experiment_id']
a_exp = list(e[:])
a_exp.append(self.getSessionMetaData())
experiments.append(ExperimentMetaDataInstance(*a_exp))
return experiments
def getSessionMetaData(self, sessions=None):
"""
Returns the the metadata associated with the experiment session codes in use.
**Docstr TBC.**
"""
if self.hdfFile:
if sessions is None:
sessions = []
sessionCodes = self._sessionCodes
sesscols = self.hdfFile.root.data_collection.session_meta_data.colnames
SessionMetaDataInstance = namedtuple('SessionMetaDataInstance', sesscols)
for r in self.hdfFile.root.data_collection.session_meta_data:
if (len(sessionCodes) == 0 or r['code'] in sessionCodes) and r[
'experiment_id'] == self._experimentID:
rcpy=list(r[:])
rcpy[-1]=json.loads(rcpy[-1])
sessions.append(SessionMetaDataInstance(*rcpy))
return sessions
def getTableForPath(self, path):
"""
Given a valid table path within the DataStore file, return the accociated table.
"""
getattr(self.hdfFile, get_node)(path)
def getEventTable(self, event_type):
"""
Returns the DataStore table that contains events of the specified type.
**Docstr TBC.**
"""
if self.hdfFile:
klassTables = self.hdfFile.root.class_table_mapping
event_column = None
event_value = None
if isinstance(event_type, basestring):
if event_type.find('Event') >= 0:
event_column = 'class_name'
event_value = event_type
else:
event_value = ''
tokens = event_type.split('_')
for t in tokens:
event_value += t[0].upper()+t[1:].lower()
event_value = event_type+'Event'
elif isinstance(event_type, numbers.Integral):
event_column = 'class_id'
event_value = event_type
else:
print2err(
'getEventTable error: event_type arguement must be a string or and int')
return None
result = []
where_cls = '(%s == b"%s") & (class_type_id == 1)'%(event_column, event_value)
for row in klassTables.where(where_cls):
result.append(row.fetch_all_fields())
if len(result) == 0:
return None
if len(result)!= 1:
print2err(
'event_type_id passed to getEventAttribute can only return one row from CLASS_MAPPINGS: ',
len(result))
return None
tablePathString = result[0][3]
if isinstance(tablePathString, bytes):
tablePathString = tablePathString.decode('utf-8')
return getattr(self.hdfFile, get_node)(tablePathString)
return None
def getEventMappingInformation(self):
"""Returns details on how ioHub Event Types are mapped to tables within
the given DataStore file."""
if self.hdfFile:
eventMappings=dict()
class_2_table=self.hdfFile.root.class_table_mapping
EventTableMapping = namedtuple(
'EventTableMapping',
self.hdfFile.root.class_table_mapping.colnames)
for row in class_2_table[:]:
eventMappings[row['class_id']] = EventTableMapping(*row)
return eventMappings
return None
def getEventsByType(self, condition_str = None):
"""Returns a dict of all event tables within the DataStore file that
have at least one event instance saved.
Keys are Event Type constants, as specified by
iohub.EventConstants. Each value is a row iterator for events of
that type.
"""
eventTableMappings = self.getEventMappingInformation()
if eventTableMappings:
events_by_type = dict()
getNode = getattr(self.hdfFile, get_node)
for event_type_id, event_mapping_info in eventTableMappings.items():
try:
cond = '(type == %d)' % (event_type_id)
if condition_str:
cond += ' & ' + condition_str
et_path = event_mapping_info.table_path
if isinstance(et_path, bytes):
et_path = et_path.decode('utf-8')
events_by_type[event_type_id] = next(getNode(et_path).where(cond))
except StopIteration:
pass
return events_by_type
return None
def getConditionVariablesTable(self):
"""
**Docstr TBC.**
"""
cv_group = self.hdfFile.root.data_collection.condition_variables
ecv = 'EXP_CV_%d' % (self._experimentID,)
if ecv in cv_group._v_leaves:
return cv_group._v_leaves[ecv]
return None
def getConditionVariableNames(self):
"""
**Docstr TBC.**
"""
cv_group = self.hdfFile.root.data_collection.condition_variables
ecv = "EXP_CV_%d" % (self._experimentID,)
if ecv in cv_group._v_leaves:
ecvTable = cv_group._v_leaves[ecv]
return ecvTable.colnames
return None
def getConditionVariables(self, filter=None):
"""
**Docstr TBC.**
"""
if filter is None:
session_ids = []
for s in self.getExperimentMetaData()[0].sessions:
session_ids.append(s.session_id)
filter = dict(session_id=(' in ', session_ids))
ConditionSetInstance = None
for conditionVarName, conditionVarComparitor in filter.items():
avComparison, value = conditionVarComparitor
cv_group = self.hdfFile.root.data_collection.condition_variables
cvrows = []
ecv = "EXP_CV_%d" % (self._experimentID,)
if ecv in cv_group._v_leaves:
ecvTable = cv_group._v_leaves[ecv]
if ConditionSetInstance is None:
colnam = ecvTable.colnames
ConditionSetInstance = namedtuple('ConditionSetInstance', colnam)
cvrows.extend(
[
ConditionSetInstance(
*
r[:]) for r in ecvTable if all(
[
eval(
'{0} {1} {2}'.format(
r[conditionVarName],
conditionVarComparitor[0],
conditionVarComparitor[1])) for conditionVarName,
conditionVarComparitor in filter.items()])])
return cvrows
def getValuesForVariables(self, cv, value, cvNames):
"""
**Docstr TBC.**
"""
if isinstance(value, (list, tuple)):
resolvedValues = []
for v in value:
if isinstance(value, basestring) and value.startswith(
'@') and value.endswith('@'):
value=value[1:-1]
if value in cvNames:
resolvedValues.append(getattr(cv, v))
else:
raise ExperimentDataAccessException(
'getEventAttributeValues: {0} is not a valid attribute name in {1}'.format(
v, cvNames))
elif isinstance(value, basestring):
resolvedValues.append(value)
return resolvedValues
elif isinstance(value, basestring) and value.startswith('@') and value.endswith('@'):
value = value[1:-1]
if value in cvNames:
return getattr(cv, value)
else:
raise ExperimentDataAccessException(
'getEventAttributeValues: {0} is not a valid attribute name in {1}'.format(
value, cvNames))
else:
raise ExperimentDataAccessException(
'Unhandled value type !: {0} is not a valid type for value {1}'.format(
type(value), value))
def getEventAttributeValues(
self,
event_type_id,
event_attribute_names,
filter_id=None,
conditionVariablesFilter=None,
startConditions=None,
endConditions=None):
"""
**Docstr TBC.**
Args:
event_type_id
event_attribute_names
conditionVariablesFilter
startConditions
endConditions
Returns:
Values for the specified event type and event attribute columns which match the provided experiment condition variable filter, starting condition filer, and ending condition filter criteria.
"""
if self.hdfFile:
klassTables = self.hdfFile.root.class_table_mapping
deviceEventTable = None
result = [
row.fetch_all_fields() for row in klassTables.where(
'(class_id == %d) & (class_type_id == 1)' %
(event_type_id))]
if len(result) != 1:
raise ExperimentDataAccessException("event_type_id passed to getEventAttribute should only return one row from CLASS_MAPPINGS.")
tablePathString = result[0][3]
deviceEventTable = getattr(self.hdfFile, get_node)(tablePathString)
for ename in event_attribute_names:
if ename not in deviceEventTable.colnames:
raise ExperimentDataAccessException(
'getEventAttribute: %s does not have a column named %s' %
(deviceEventTable.title, event_attribute_names))
resultSetList = []
csier = list(event_attribute_names)
csier.append('query_string')
csier.append('condition_set')
EventAttributeResults = namedtuple('EventAttributeResults', csier)
if deviceEventTable is not None:
if not isinstance(event_attribute_names, (list, tuple)):
event_attribute_names = [event_attribute_names, ]
filteredConditionVariableList = None
if conditionVariablesFilter is None:
filteredConditionVariableList= self.getConditionVariables()
else:
filteredConditionVariableList = self.getConditionVariables(
conditionVariablesFilter)
cvNames = self.getConditionVariableNames()
# no further where clause building needed; get reseults and
# return
if startConditions is None and endConditions is None:
for cv in filteredConditionVariableList:
wclause = '( experiment_id == {0} ) & ( session_id == {1} )'.format(
self._experimentID, cv.session_id)
wclause += ' & ( type == {0} ) '.format(event_type_id)
if filter_id is not None:
wclause += '& ( filter_id == {0} ) '.format(
filter_id)
resultSetList.append([])
for ename in event_attribute_names:
resultSetList[-1].append(getattr(deviceEventTable, read_where)(wclause, field=ename))
resultSetList[-1].append(wclause)
resultSetList[-1].append(cv)
eventAttributeResults = EventAttributeResults(
*resultSetList[-1])
resultSetList[-1]=eventAttributeResults
return resultSetList
#start or end conditions exist....
for cv in filteredConditionVariableList:
resultSetList.append([])
wclause = '( experiment_id == {0} ) & ( session_id == {1} )'.format(
self._experimentID, cv.session_id)
wclause += ' & ( type == {0} ) '.format(event_type_id)
if filter_id is not None:
wclause += '& ( filter_id == {0} ) '.format(filter_id)
# start Conditions need to be added to where clause
if startConditions is not None:
wclause += '& ('
for conditionAttributeName, conditionAttributeComparitor in startConditions.items():
avComparison,value=conditionAttributeComparitor
value = self.getValuesForVariables(
cv, value, cvNames)
wclause += ' ( {0} {1} {2} ) & '.format(
conditionAttributeName, avComparison, value)
wclause=wclause[:-3]
wclause += ' ) '
# end Conditions need to be added to where clause
if endConditions is not None:
wclause += ' & ('
for conditionAttributeName, conditionAttributeComparitor in endConditions.items():
avComparison,value=conditionAttributeComparitor
value = self.getValuesForVariables(
cv, value, cvNames)
wclause += ' ( {0} {1} {2} ) & '.format(
conditionAttributeName, avComparison, value)
wclause=wclause[:-3]
wclause += ' ) '
for ename in event_attribute_names:
resultSetList[-1].append(getattr(deviceEventTable, read_where)(wclause, field=ename))
resultSetList[-1].append(wclause)
resultSetList[-1].append(cv)
eventAttributeResults = EventAttributeResults(
*resultSetList[-1])
resultSetList[-1]=eventAttributeResults
return resultSetList
return None
def getEventIterator(self, event_type):
"""
**Docstr TBC.**
Args:
event_type
Returns:
(iterator): An iterator providing access to each matching event as a numpy recarray.
"""
return self.getEventTable(event_type).iterrows()
def close(self):
"""Close the ExperimentDataAccessUtility and associated DataStore
File."""
global _hubFiles
if self.hdfFile in _hubFiles:
_hubFiles.remove(self.hdfFile)
self.hdfFile.close()
self.experimentCodes = None
self.hdfFilePath = None
self.hdfFileName = None
self.mode = None
self.hdfFile = None
def __del__(self):
try:
self.close()
except Exception:
pass
class ExperimentDataAccessException(Exception):
pass
|
the-stack_0_7013 | import pandas as pd
from bokeh.plotting import figure, show, curdoc
from bokeh.layouts import widgetbox, layout, row, column
from bokeh.models import ColumnDataSource, Button, Slider, Dropdown, PreText, DataTable, TableColumn, MultiSelect, NumberFormatter, Spacer
from collections import OrderedDict, Counter
import numpy as np
from functools import partial
import swing_table
import os
doc = curdoc()
file_path = os.path.dirname(os.path.abspath(__file__))
class MCDMModel:
def __init__(self):
self.rubric = pd.read_excel(os.path.join(file_path, "data/Rubric.xlsx"), "Rubric v3")
self.cost_model = pd.read_excel(os.path.join(file_path, "data/Rubric.xlsx"), "Cost_Model")
try:
self.rubric.drop(["Category", "Definition", "Grading Scale"], inplace=True, axis=1)
except KeyError:
pass
self.criteria = self.rubric["Criteria"].drop_duplicates().tolist()
self.swing_table = swing_table.create_swing_table()
self.chosen_criteria = []
self.criteria_selection = MultiSelect(title="Choose Criteria:", size=10)
self.choose_criteria()
self.rubric_values = self.rubric.replace("Excellent", 1.0)
self.rubric_values.replace("Good", 0.5, inplace=True)
self.rubric_values.replace("Poor", 0, inplace=True)
self.rubric_values = self.rubric_values.melt(id_vars=["Criteria"], var_name=["Tool"], value_name="Score")
self.weight_sliders = OrderedDict()
self.ranking = OrderedDict()
self.b = Button(label="Update Model", button_type="primary")
self.b.on_click(self.submit_callback)
self.criteria_b = Button(label="Submit Criteria", button_type="primary")
self.criteria_b.on_click(self.choose_criteria_callback)
self.clear_button = Button(label="Reset", button_type="warning")
self.clear_button.on_click(self.clear_model)
self.rank_submit = Button(label="Calculate Ranks", button_type="primary")
self.rank_submit.on_click(self.submit_ranks)
self.source = ColumnDataSource()
self.data_table = DataTable
self.app_layout = layout()
def clear_model(self):
self.swing_table = swing_table.create_swing_table()
self.app_layout.children.pop(1)
self.app_layout.children.append(layout([[self.swing_table]]))
def choose_criteria(self):
self.criteria_selection.options = self.rubric["Criteria"].drop_duplicates().tolist()
def choose_criteria_callback(self):
self.chosen_criteria = []
self.chosen_criteria = self.criteria_selection.value
if len(self.chosen_criteria) > 0:
self.ranking = OrderedDict()
self.rank_criteria()
self.swing_table = swing_table.create_swing_table(self.chosen_criteria)
try:
self.app_layout.children.pop(1)
except IndexError:
pass
self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table],
*[self.ranking[k] for k in self.ranking.keys()],
[self.rank_submit],
[self.clear_button]]))
def rank_criteria(self):
for c in sorted(self.chosen_criteria):
self.ranking.update({c: [PreText(text="Scenario {}".format(sorted(self.criteria).index(c) + 1)),
Dropdown(menu=[(str(i), str(i)) for i in range(1, len(self.chosen_criteria) + 1)],
button_type="primary", label="Rank")]})
for k in self.ranking.keys():
self.ranking[k][1].on_change("value", partial(self.ranking_label_callback, k=k))
def weight_calc(self):
for c in self.chosen_criteria:
self.weight_sliders.update({c: Slider(start=0, end=1, step=.01, title=c, id=c,
value=1/len(self.chosen_criteria))})
self.weight_sliders[self.chosen_criteria[0]].disabled = True
self.weight_sliders[self.chosen_criteria[0]].value = 1
for w in self.weight_sliders.keys():
self.weight_sliders[w].on_change("value", partial(self.weight_callback, c=w))
def ranking_label_callback(self, attr, old, new, k):
self.ranking[k][1].label = new
if self.ranking[k][1].button_type == "danger":
print("test")
self.ranking[k][1].button_type = "primary"
try:
self.ranking[k].pop(-1)
self.app_layout.children.pop(1)
self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table],
*[self.ranking[k] for k in self.ranking.keys()],
[self.rank_submit],
[self.clear_button]]))
except IndexError:
pass
def submit_ranks(self):
self.weight_sliders = OrderedDict()
ranks = []
for k in self.chosen_criteria:
if not self.ranking[k][1].value:
self.ranking[k][1].button_type = "danger"
self.ranking[k].append(PreText(text="Please enter a rank for all chosen criteria"))
self.app_layout.children.pop(1)
self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table],
*[self.ranking[k] for k in self.ranking.keys()],
[self.rank_submit],
[self.clear_button]]))
else:
ranks.append(self.ranking[k][1].value)
if len(ranks) == len(self.ranking.keys()):
if len(ranks) != len(list(set(ranks))):
dup_values = []
for crit, count in Counter(ranks).items():
if count > 1:
dup_values.append(crit)
for k in self.ranking.keys():
if self.ranking[k][1].value in dup_values:
self.ranking[k][1].button_type = "danger"
self.ranking[k].append(PreText(text="Please enter unique ranks for each criteria"))
self.app_layout.children.pop(1)
self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table],
*[self.ranking[k] for k in self.ranking.keys()],
[self.rank_submit],
[self.clear_button]]))
else:
for k in self.ranking.keys():
self.ranking[k][1].button_type = "primary"
temp_list = []
for r in np.argsort(ranks):
temp_list.append(self.chosen_criteria[r])
self.chosen_criteria = temp_list
self.add_weight_changes()
def weight_callback(self, attr, old, new, c):
next_index = self.chosen_criteria.index(c) + 1
prev_index = self.chosen_criteria.index(c) - 1
if next_index != len(self.chosen_criteria):
if self.weight_sliders[self.chosen_criteria[next_index]].value > new:
self.weight_sliders[self.chosen_criteria[next_index]].value = new
if prev_index != 0:
if self.weight_sliders[self.chosen_criteria[prev_index]].value < new:
self.weight_sliders[self.chosen_criteria[prev_index]].value = new
def submit_callback(self):
total_weight = sum([self.weight_sliders[s].value for s in self.weight_sliders.keys()])
normed_weights = []
for w in self.weight_sliders.keys():
normed_weights.append((w, self.weight_sliders[w].value/total_weight))
weights_df = pd.DataFrame(normed_weights, columns=["Criteria", "Normed_Weights"])
rubric_calc = self.rubric_values.merge(weights_df, on=["Criteria"])
rubric_calc = rubric_calc.merge(self.cost_model[["Tool", "Normalized Cost"]], on="Tool", how="left")
rubric_calc.loc[rubric_calc.Criteria == "Cost", "Score"] = rubric_calc["Normalized Cost"]
rubric_calc.drop("Normalized Cost", axis=1)
rubric_calc["WeightedScore"] = rubric_calc["Score"] * rubric_calc["Normed_Weights"]
values = rubric_calc[["Tool", "WeightedScore"]].groupby(["Tool"]).sum().reset_index()
values.sort_values(by="WeightedScore", inplace=True, ascending=False)
values["Rank"] = values.rank(method="dense", numeric_only=True, ascending=False)
self.source = ColumnDataSource()
self.source.data.update({"tool": values["Tool"].tolist(), "score": values["WeightedScore"],
"rank": values["Rank"].tolist()})
self.add_rank_table()
def start_model(self):
self.app_layout = layout([[self.criteria_selection, self.criteria_b]])
self.app_layout.children.append(layout(self.swing_table))
return self.app_layout
def add_weight_changes(self):
self.weight_calc()
buttons = zip([self.ranking[k][0] for k in self.chosen_criteria],
[self.ranking[k][1] for k in self.chosen_criteria],
[self.weight_sliders[k] for k in self.weight_sliders.keys()])
b_layout = [[t[0], t[1], t[2]] for t in buttons]
b_layout.append([self.rank_submit, self.b])
b_layout.append(self.clear_button)
b_layout.insert(0, [Spacer(width=300), self.swing_table])
self.app_layout.children.pop(1)
self.app_layout.children.append(layout(b_layout))
def add_rank_table(self):
columns = [TableColumn(field="tool", title="Tool"),
TableColumn(field="score", title="Weighted Score", formatter=NumberFormatter(format="0.00")),
TableColumn(field="rank", title="Rank")]
self.data_table = DataTable(columns=columns, source=self.source, reorderable=True)
buttons = zip([self.ranking[k][0] for k in self.chosen_criteria],
[self.ranking[k][1] for k in self.chosen_criteria],
[self.weight_sliders[k] for k in self.weight_sliders.keys()])
self.app_layout.children.pop(1)
b_layout = [[t[0], t[1], t[2]] for t in buttons]
b_layout.append([self.rank_submit, self.b])
b_layout.append(widgetbox(self.data_table))
b_layout.append([self.clear_button])
b_layout.insert(0, [Spacer(width=300), self.swing_table])
self.app_layout.children.append(layout(b_layout))
mcdm = MCDMModel()
app_layout = mcdm.start_model()
|
the-stack_0_7014 | #!/usr/bin/env python3
import os
import pathlib
import sys
import github
import msgpack
import packaging.version
from jinja2 import Template
from slugify import slugify
from tqdm import tqdm
DISABLE_TQDM = "CI" in os.environ
HEADERS = {"user-agent": "https://github.com/salt-extensions/salt-extensions-index"}
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
LOCAL_CACHE_PATH = pathlib.Path(
os.environ.get("LOCAL_CACHE_PATH") or REPO_ROOT.joinpath(".cache")
)
if not LOCAL_CACHE_PATH.is_dir():
LOCAL_CACHE_PATH.mkdir(0o755)
PACKAGE_INFO_CACHE = LOCAL_CACHE_PATH / "packages-info"
if not PACKAGE_INFO_CACHE.is_dir():
PACKAGE_INFO_CACHE.mkdir(0o755)
BLACKLISTED_EXTENSIONS = {"salt-extension"}
print(f"Local Cache Path: {LOCAL_CACHE_PATH}", file=sys.stderr, flush=True)
if sys.version_info < (3, 7):
print("This script is meant to only run on Py3.7+", file=sys.stderr, flush=True)
def set_progress_description(progress, message):
progress.set_description(f"{message: <60}")
def get_lastest_major_releases(progress, count=3):
# This logic might have to change because the order of tags seems to be by creation time
set_progress_description(progress, "Searching for latest salt releases...")
gh = github.Github(login_or_token=os.environ.get("GITHUB_TOKEN") or None)
repo = gh.get_repo("saltstack/salt")
releases = []
last_version = None
for tag in repo.get_tags():
if len(releases) == count:
break
version = packaging.version.parse(tag.name)
try:
if version.major < 3000:
# Don't test versions of salt older than 3000
continue
except AttributeError:
progress.write(f"Failed to parse tag {tag}")
continue
if last_version is None:
last_version = version
releases.append(tag.name)
continue
if version.major == last_version.major:
continue
last_version = version
releases.append(tag.name)
progress.write(f"Found the folowing salt releases: {', '.join(releases)}")
return releases
def collect_extensions_info():
packages = {}
for path in sorted(PACKAGE_INFO_CACHE.glob("*.msgpack")):
url = None
if path.stem in BLACKLISTED_EXTENSIONS:
continue
package_data = msgpack.unpackb(path.read_bytes())
package = package_data["info"]["name"]
for urlinfo in package_data["urls"]:
if urlinfo["packagetype"] == "sdist":
url = urlinfo["url"]
break
if url is not None:
packages[package] = url
else:
packages[package] = "no-sdist"
return packages
def main():
workflow = REPO_ROOT / ".github" / "workflows" / "test-extensions.yml"
content = (
REPO_ROOT / ".github" / "workflows" / "templates" / "generate-index-base.yml"
).read_text()
platform_templates = (
REPO_ROOT / ".github" / "workflows" / "templates" / "linux.yml.j2",
REPO_ROOT / ".github" / "workflows" / "templates" / "macos.yml.j2",
REPO_ROOT / ".github" / "workflows" / "templates" / "windows.yml.j2",
)
packages = collect_extensions_info()
progress = tqdm(
total=len(packages),
unit="pkg",
unit_scale=True,
desc=f"{' ' * 60} :",
disable=DISABLE_TQDM,
)
progress.write("Currently known extensions:")
for package in packages:
progress.write(f" * {package}")
try:
salt_versions = get_lastest_major_releases(progress)
except Exception as exc:
progress.write(f"Failed to get latest salt releases: {exc}")
return 1
common_context = {
"salt_versions": salt_versions,
"python_versions": ["3.5", "3.6", "3.7", "3.8", "3.9"],
}
with progress:
needs = []
for package, url in packages.items():
set_progress_description(progress, f"Processing {package}")
context = common_context.copy()
slug = slugify(package)
context["slug"] = slug
context["package"] = package
context["package_url"] = url
for template_path in platform_templates:
content += Template(template_path.read_text()).render(**context)
for platform in ("linux", "macos", "windows"):
needs.append(f"{slug}-{platform}")
progress.update()
generate_extensions_index = (
REPO_ROOT / ".github" / "workflows" / "templates" / "generate-index.yml.j2"
)
set_progress_description(progress, "Writing workflow")
content += Template(generate_extensions_index.read_text()).render(needs=needs)
workflow.write_text(content.rstrip() + "\n")
progress.write("Complete")
return 0
if __name__ == "__main__":
exitcode = 0
try:
main()
except Exception:
exitcode = 1
raise
finally:
sys.exit(exitcode)
|
the-stack_0_7015 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OcrLine(Model):
"""An object describing a single recognized line of text.
:param bounding_box: Bounding box of a recognized line. The four integers
represent the x-coordinate of the left edge, the y-coordinate of the top
edge, width, and height of the bounding box, in the coordinate system of
the input image, after it has been rotated around its center according to
the detected text angle (see textAngle property), with the origin at the
top-left corner, and the y-axis pointing down.
:type bounding_box: str
:param words: An array of objects, where each object represents a
recognized word.
:type words:
list[~azure.cognitiveservices.vision.computervision.models.OcrWord]
"""
_attribute_map = {
'bounding_box': {'key': 'boundingBox', 'type': 'str'},
'words': {'key': 'words', 'type': '[OcrWord]'},
}
def __init__(self, bounding_box=None, words=None):
super(OcrLine, self).__init__()
self.bounding_box = bounding_box
self.words = words
|
the-stack_0_7018 | import allure
from tep.client import request
@allure.title("重定向--put")
def test(env_vars):
# 描述
# 数据
# 请求
response = request(
"put",
url=env_vars.domain + "/redirect-to?url=https%3A%2F%2Fwww.baidu.com&status_code=200",
headers={'Host': 'httpbin.org', 'Proxy-Connection': 'keep-alive', 'Content-Length': '47', 'accept': 'text/html',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.109 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded', 'Origin': 'http://httpbin.org',
'Referer': 'http://httpbin.org/', 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cookie': 'stale_after=never; fake=fake_value; freeform=3; name=dongfanger'},
)
# 提取
# 断言
assert response.status_code == 404
|
the-stack_0_7019 | from __future__ import annotations
import re
import warnings
from typing import TYPE_CHECKING, Any, Dict, List, Optional
import pandas as pd
from dateutil import parser
from cimsparql.query_support import combine_statements, unionize
if TYPE_CHECKING: # pragma: no cover
from cimsparql.model import CimModel
as_type_able = [int, float, str, "Int64", "Int32", "Int16"]
python_type_map = {
"string": str,
"integer": int,
"boolean": lambda x: x.lower() == "true",
"float": float,
"dateTime": parser.parse,
}
uri_snmst = re.compile("^urn:snmst:#_")
sparql_type_map = {"literal": str, "uri": lambda x: uri_snmst.sub("", x)}
class TypeMapperQueries:
@property
def generals(self) -> List[List[str]]:
"""For sparql-types that are not sourced from objects of type rdf:property, sparql & type are
required
Sparql values should be like: http://iec.ch/TC57/2010/CIM-schema-cim15#PerCent this is how
type or DataType usually looks like for each data point in the converted query result from
SPARQLWrapper.
type can be anything as long as it is represented in the python_type_map.
"""
return [
[
"?sparql_type rdf:type rdfs:Datatype",
"?sparql_type owl:equivalentClass ?range",
'BIND(STRBEFORE(str(?range), "#") as ?prefix)',
'BIND(STRAFTER(str(?range), "#") as ?type)',
]
]
@property
def prefix_general(self) -> List[str]:
"""Common query used as a base for all prefix_based queries."""
return [
"?sparql_type rdf:type rdf:Property",
"?sparql_type rdfs:range ?range",
'BIND(STRBEFORE(str(?range), "#") as ?prefix)',
]
@property
def prefix_based(self) -> Dict[str, List[str]]:
"""Each prefix can have different locations of where DataTypes are described.
Based on a object of type rdf:property & its rdfs:range, one has edit the query such that
one ends up with the DataType.
"""
return {
"https://www.w3.org/2001/XMLSchema": ["?range rdfs:label ?type"],
"https://iec.ch/TC57/2010/CIM-schema-cim15": [
"?range owl:equivalentClass ?class",
"?class rdfs:label ?type",
],
}
@property
def query(self) -> str:
select_query = "SELECT ?sparql_type ?type ?prefix"
grouped_generals = [combine_statements(*g, split=" .\n") for g in self.generals]
grouped_prefixes = [
combine_statements(*v, f'FILTER (?prefix = "{k}")', split=" .\n")
for k, v in self.prefix_based.items()
]
grouped_prefix_general = combine_statements(*self.prefix_general, split=" .\n")
unionized_generals = unionize(*grouped_generals)
unionized_prefixes = unionize(*grouped_prefixes)
full_prefixes = combine_statements(grouped_prefix_general, unionized_prefixes, group=True)
full_union = unionize(unionized_generals, full_prefixes, group=False)
return f"{select_query}\nWHERE\n{{\n{full_union}\n}}"
class TypeMapper(TypeMapperQueries):
def __init__(self, client: CimModel, custom_additions: Optional[Dict[str, Any]] = None) -> None:
self.prefixes = client.prefixes
custom_additions = custom_additions if custom_additions is not None else {}
self.map = {**sparql_type_map, **self.get_map(client), **custom_additions}
def have_cim_version(self, cim) -> bool:
return cim in (val.split("#")[0] for val in self.map.keys())
@staticmethod
def type_map(df: pd.DataFrame) -> Dict[str, Any]:
df["type"] = df["type"].str.lower()
d = df.set_index("sparql_type").to_dict("index")
return {k: python_type_map.get(v.get("type", "String")) for k, v in d.items()}
@staticmethod
def prefix_map(df: pd.DataFrame) -> Dict[str, Any]:
df = df.loc[~df["prefix"].isna()].head()
df["comb"] = df["prefix"] + "#" + df["type"]
df = df.drop_duplicates("comb")
d2 = df.set_index("comb").to_dict("index")
return {k: python_type_map.get(v.get("type", "String")) for k, v in d2.items()}
def get_map(self, client: CimModel) -> Dict[str, Any]:
"""Reads all metadata from the sparql backend & creates a sparql-type -> python type map
Args:
client: initialized CimModel
Returns:
sparql-type -> python type map
"""
df = client.get_table(self.query, map_data_types=False)
if df.empty:
return {}
type_map = self.type_map(df)
prefix_map = self.prefix_map(df)
xsd_map = {
f"{self.prefixes['xsd']}#{xsd_type}": xsd_map
for xsd_type, xsd_map in python_type_map.items()
}
return {**type_map, **prefix_map, **xsd_map}
def get_type(
self,
sparql_type: str,
missing_return: str = "identity",
custom_maps: Optional[Dict[str, Any]] = None,
):
"""Gets the python type/function to apply on columns of the sparql_type
Args:
sparql_type:
missing_return: returns the identity-function if python- type/function is not found,
else returns None
custom_maps: dictionary on the form {'sparql_data_type': function/datatype} overwrites
the default types gained from the graphdb. Applies the function/datatype on all
columns in the DataFrame that are of the sparql_data_type
Returns:
python datatype or function to apply on DataFrame columns
"""
type_map = {**self.map, **custom_maps} if custom_maps is not None else self.map
try:
return type_map[sparql_type]
except KeyError:
warnings.warn(f"{sparql_type} not found in the sparql -> python type map")
if missing_return == "identity":
return lambda x: x
return None
def convert_dict(
self, d: Dict, drop_missing: bool = True, custom_maps: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Converts a col_name -> sparql_datatype map to a col_name -> python_type map
Args:
d: dictionary with {'column_name': 'sparql type/DataType'}
drop_missing: drops columns where no corresponding python type could be found
custom_maps: dictionary on the form {'sparql_data_type': function/datatype} overwrites
the default types gained from the graphdb. Applies the function/datatype on all
columns in the DataFrame that are of the sparql_data_type.
Returns:
col_name -> python_type/function map
"""
missing_return = "None" if drop_missing else "identity"
base = {
column: self.get_type(data_type, missing_return, custom_maps)
for column, data_type in d.items()
}
if drop_missing:
return {key: value for key, value in base.items() if value is not None}
return base
@staticmethod
def map_base_types(df: pd.DataFrame, type_map: Dict) -> pd.DataFrame:
"""Maps the datatypes in type_map which can be used with the df.astype function
Args:
df:
type_map: {'column_name': type/function} map of functions/types to apply on the columns
Returns:
mapped DataFrame
"""
as_type_able_columns = {c for c, datatype in type_map.items() if datatype in as_type_able}
if not df.empty:
df = df.astype({column: type_map[column] for column in as_type_able_columns})
return df
@staticmethod
def map_exceptions(df: pd.DataFrame, type_map: Dict) -> pd.DataFrame:
"""Maps the functions/datatypes in type_map which cant be done with the df.astype function
Args:
df:
type_map: {'column_name': type/function} map of functions/types to apply on the columns
Returns:
mapped DataFrame
"""
ex_columns = {c for c, datatype in type_map.items() if datatype not in as_type_able}
for column in ex_columns:
df[column] = df[column].apply(type_map[column])
return df
def map_data_types(
self, df: pd.DataFrame, col_map: Dict, custom_maps: Dict = None, columns: Dict = None
) -> pd.DataFrame:
"""Maps the dtypes of a DataFrame to the python-corresponding types of the sparql-types from the
source data
Args:
df: DataFrame with columns to be converted
data_row: a complete row with data from the source data of which the DataFrame is
constructed from
custom_maps: dictionary on the form {'sparql_data_type': function/datatype} overwrites
the default types gained from the graphdb. Applies the function/datatype on all
columns in the DataFrame that are of the sparql_data_type.
columns: dictionary on the form {'DataFrame_column_name: function/datatype} overwrites
the default types gained from the graphdb. Applies the function/datatype on the
column.
Returns:
mapped DataFrame
"""
type_map = {**self.convert_dict(col_map, custom_maps=custom_maps), **columns}
df = self.map_base_types(df, type_map)
df = self.map_exceptions(df, type_map)
return df
|
the-stack_0_7020 | """
Includes the XMattersEvent class which wraps the xMatters Event to make it easier
to use correct formatting
"""
import json
# pylint: disable = import-error
from common_utils.setup_logging import setup_logging
# pylint: enable = import-error
DEFAULT_LOGGER = setup_logging('xmatters_alert_action.log', 'xmatters_event')
class XMattersEvent(object):
"""
Class that wraps an xMatters Event so that it is easier to use correct formatting
"""
def __init__(self, **kwargs):
"""
Constructor, takes no arguments
"""
self.logger = kwargs.get('logger', DEFAULT_LOGGER)
self.properties = {}
self.recipients = []
self.priority = None
self.valid_priorities = [
'HIGH',
'MEDIUM',
'LOW'
]
def add_property(self, key, value):
"""
Adds a property to the event
@param key: <str>, The name of the property
@param value: <str>, The value of the property
"""
self.properties[key] = value
def add_recipient(self, target_name):
"""
Adds a recipient to the recipients list in the xMatters Event
@param target_name: <str>, the target name of the user, group, team, device in xMatters
"""
self.recipients.append({
'targetName': target_name
})
def set_priority(self, priority):
"""
Sets the priority of the xMatters Event
@param priority: <str>, valid values are HIGH, MEDIUM, and LOW (case insensitive)
@raise: ValueError, if the priority is invalid
"""
upper_priority = priority.upper()
if upper_priority in self.valid_priorities:
self.priority = upper_priority
else:
raise ValueError('error=XM_INVALID_PRIORITY value=%s valid_priorities=%s',
upper_priority,
';'.join(self.valid_priorities)
)
def get_json_payload(self):
"""
Gets the json payload as a string to send to xMatters
@return <str>
"""
body = {
'properties': self.properties
}
# empty arrays are considered falsey in python
if self.recipients:
body['recipients'] = self.recipients
if self.priority is not None:
body['priority'] = self.priority
return json.dumps(body)
|
the-stack_0_7022 | #searches file
import sqlite3
import os
import databaseCreate
#seacrh function
db=sqlite3.connect("SongStorage.db")
def searchSong(searchBy , searchText):
databaseCreate.createDb()
db = """SELECT * FROM song WHERE ? = ? """(searchBy ,searchText)
try:
cur = db.cursor()
cur.execute(db)
output=cur.fetchall()
db.close()
except Exception as e:
raise e
print("There was a problem while accessing our systems")
input("press Enter to continue")
return
print("===================================")
print("SEARCHED RESULTS ARE HERE:")
print("===================================")
if output == ():
print("NO RECORDS FOUND")
print("===================================")
else:
for entry in output:
print("Title: " + entry[0])
print("Star: " + entry[0])
print("Costar: " + entry[0])
print("Year: " + entry[0])
print("Genre: " + entry[0])
print("===================================")
input("Press enter to continue")
#take user inputs and run the function above to query the database
def searchLookup():
print ("""
===============================
DVD LOOKUP:
===============================
Enter the criteria to look up by:
1 - Song title
2 - Star
3 - Costar
4 - Year released
5 - Genre""")
choice = input("\nType a number and press enter: ")
try:
choice = int(choice)
if choice == 1:
searchBy = "title"
searchText = input("Enter the song title to search for: ")
elif choice == 2:
searchBy = "star"
searchText = input('Enter the song star name to search for: ')
elif choice == 3:
searchBy = "costar"
searchText = input("Enter the song costar name to search for: ")
elif choice == 4:
searchBy = "year"
searchText = input("Enter the song release year to search for: ")
elif choice == 5:
searchby = "genre"
print ("""
Enter the genre to search for:
1 - Drama
2 - reggae
3 - Rnb
4 - Romance
""")
entrychoice=input("Your value please!\t")
try:
entrychoice = int(entrychoice)
if entrychoice == 1:
searchText = "Drama"
elif entrychoice == 2:
searchText = "Reggae"
elif entrychoice == 3 :
searchText = "Rnb"
elif entrychoice == 4:
searchText = "Romance"
else:
print("Error in your choice")
input("Press enter to return to the main menu:")
except:
print("Please enter only numbers please!")
except:
print("Choose an integer please!")
searchSong(searchBy , searchText)
|
the-stack_0_7025 | #!/usr/local/bin/python3
import json, os, sys
from diff_adt import DiffConfig, DiffResult
from time import localtime, strftime
from subprocess import call
from diff_lev import *
CURRENT_TIMESTAMP = strftime("%Y-%m-%d-%H%M", localtime())
DEBUG_MODE = False
def main():
print('Getting config and preparing run ...', end=' ')
config = get_config()
print('done!')
print('Initiating grading', *config.labs, '...', end='\n\n')
# Detect initial run or directory structure corruption and run setup
if not (os.path.isdir(config.csv_path)
and os.path.isdir(config.rosters_dir)
and os.path.isdir(config.results_dir)
and os.path.isdir(config.submissions_dir)):
run_init_setup(config)
rosters = build_rosters(config.roster_paths)
write_lab_list_for_MATLAB(config)
if not setup_solution_files(config):
print('\n\nUnable to set up reference solutions. Exiting.')
exit(1)
print(' Running MATLAB script to generate student outputs ...', end='\n\n')
print('\n\nMATLAB run ' + ('finished!' if generate_MATLAB_output() else '\n\nFAILED!'), end='\n\n')
print('Comparing results and writing output ...', end=' ')
result = DiffResult()
for lab in config.labs:
diff_lab_outputs(result, lab[:-2], config)
output_result_to_csv(result, config, rosters)
for lab in config.labs:
output_result_to_csv(result, config, rosters, lab_num=lab[3:-2])
print('ALL DONE!', end='\n\n')
def run_init_setup(config):
print('Looks like this is the first time you are running this script.\n'
'Let me set up some directories ...', end='\n\n')
for p in [config.csv_path, config.rosters_dir, config.results_dir, config.submissions_dir]:
if p is config.submissions_dir:
mkdir(config.submissions_dir)
for lab in config.labs:
mkdir(config.submissions_dir + lab[:-2])
else:
mkdir(p)
print('\nAll set up! Now, copy student submissions into {}labXX/, and'.format(config.submissions_dir),
'\nplace the class rosters (CSV exported from PolyLearn) into {}.'.format(config.rosters_dir),
'\nOnce copying is done, please re-run:', *sys.argv)
exit(0)
def mkdir(directory):
print(' mkdir', directory)
if not os.path.isdir(directory):
os.mkdir(directory)
def write_lab_list_for_MATLAB(config):
# Write list of labs to .dat file for MATLAB to read which items to execute
lab_list_dat = open(config.submissions_dir + 'lab_list.dat', 'w')
for name in config.labs:
lab_list_dat.write(name + '\n')
lab_list_dat.close()
def setup_solution_files(config):
new_solutions_success = False
default_solution_success = False
if check_solution_source(config):
print('Solution source for all labs detected.\n',
' Firing up MATLAB to generate new solutions ...', end='\n\n')
sys.stdout.flush()
new_solutions_success = generate_new_solutions(config)
print('Solution generation', 'successful!' if new_solutions_success else 'failed :(', end='\n\n')
if not new_solutions_success:
print('Could not find solution sources for all labs.\n',
' Copying default solutions over instead ... ', end='\n ')
sys.stdout.flush()
default_solution_success = copy_default_solutions(config)
print('\nCopy complete!' if default_solution_success
else 'Copy failed! Please check permissions.', end='\n\n')
return new_solutions_success or default_solution_success
def check_solution_source(config):
result = True
for lab in config.labs:
result &= os.path.isfile(config.solutions_dir + 'source/' + lab)
return result
def generate_new_solutions(config):
return not call(['matlab', '-nodesktop', '-nosplash', '-nodisplay', '-r',
"try, cd '{}', pwd, run('./generate_solution'), catch exc, getReport(exc), end, exit".format(
os.getcwd())])
def copy_default_solutions(config):
result = True
default_dir = config.solutions_dir + 'default/'
for file_name in os.listdir(default_dir):
if '.txt' in file_name:
result &= not call(['cp', default_dir + file_name, config.solutions_dir])
return result
def generate_MATLAB_output():
script = 'generate_output_vm.m' if len(sys.argv) > 1 and sys.argv[1].lower() == '-vm' else 'generate_output.m'
return not call(['matlab', '-nodesktop', '-nosplash', '-nodisplay', '-r',
"try, cd '{}', pwd, run('./{}'), catch exc, getReport(exc), end, exit".format(
os.getcwd(), script)])
def diff_lab_outputs(result_obj, lab_dir_name, config):
submissions_dir = config.submissions_dir
solutions_dir = config.solutions_dir
results_dir = config.results_dir
files = [f for f in os.listdir(results_dir)
if os.path.isfile(os.path.join(results_dir, f)) and lab_dir_name in f]
solution_file = solutions_dir + lab_dir_name + '.out.txt'
alt_solution_file = solutions_dir + lab_dir_name + '.alt.txt'
for f in files:
lab_index = f.find('_lab')
author_name = join_last_name(f[:lab_index])
if DEBUG_MODE:
print('comparing', solution_file, 'and', submissions_dir + f, 'for ' + author_name, end='')
if os.path.isfile(alt_solution_file):
diff_result = max(cmp(solution_file, results_dir + f),
cmp(alt_solution_file, results_dir + f))
else:
diff_result = cmp(solution_file, results_dir + f)
if DEBUG_MODE:
print(' ... comparison result', diff_result)
result_obj.add_result(author_name, lab_dir_name, round(diff_result * config.score_out_of, 2))
def output_result_to_csv(result_obj, config, rosters, lab_num=''):
if DEBUG_MODE:
print('Final Result Object:\n', result_obj)
rosters.append(("", []))
csv_roster = {}
for id, roster in rosters:
csv = open('{}{}_{}{}{}.csv'.format(
config.csv_path,
CURRENT_TIMESTAMP,
config.csv_name,
('_' if id else '') + id,
('_lab' + lab_num) if lab_num else ''
), 'w')
if lab_num:
write_to_csv(csv, config.csv_header + 'lab' + lab_num)
else:
write_to_csv(csv, config.csv_header + str(config.labs)[1:-1].replace(' ', ''))
csv_roster[id] = (csv, roster)
result = result_obj.result
result_tuple_list = sorted([(k, v) for k, v in result.items()])
for author_name, diff_results in result_tuple_list:
id = find_roster_id_for_author(author_name, rosters)
all_results = per_author_result_to_csv_entry(config.labs, diff_results)
entry_str = '{},{},{}'.format(
author_name.replace('_', ','),
csv_roster[id][1][author_name] if id else '',
all_results if not lab_num else (
str(diff_results['lab' + lab_num]) if 'lab' + lab_num in diff_results else ''
)
)
csv_to_write_to = csv_roster[id][0]
write_to_csv(csv_to_write_to, entry_str)
if csv_to_write_to is not csv_roster[""][0]:
write_to_csv(csv_roster[""][0], entry_str)
for csv, _ in csv_roster.values():
csv.close()
def per_author_result_to_csv_entry(lab_file_names, author_result):
csv_entry_str = ''
for lab_file_name in lab_file_names:
lab = lab_file_name[:-2]
csv_entry_str += str(author_result[lab]) if lab in author_result else ''
csv_entry_str += ','
return csv_entry_str[:-1]
def find_roster_id_for_author(author_name, rosters):
for id, roster in rosters:
if DEBUG_MODE:
print('Author name used to look up in roster: ' + author_name)
print('Roster\n' + str(roster))
if author_name in roster:
return id
return ""
def write_to_csv(csv_file, line_to_write):
if DEBUG_MODE:
print(line_to_write)
csv_file.write(line_to_write + '\n')
def get_config():
with open('diff_config.json') as data_file:
data = json.load(data_file)
# get the list of lab file names
lab_file_names = []
for num in data['labs']:
lab_file_names.append('lab{:02}.m'.format(num) if num else 'final.m')
return DiffConfig(lab_file_names,
data['submissions_dir'], data['solutions_dir'], data['rosters_dir'], data['results_dir'],
data['result_csv_path'], data['result_csv_name'], data['score_out_of'],
data['roster_paths'])
def join_last_name(orig_name_str, wrapper_str='"'):
tokens = orig_name_str.split('_')
if len(tokens) > 2:
return '_'.join([tokens[0], '{}{}{}'.format(wrapper_str, ' '.join(tokens[1:]), wrapper_str)])
else:
return orig_name_str
def build_rosters(roster_paths):
rosters = []
for id, path in roster_paths:
roster_file = open(path, 'r')
lines = roster_file.readlines()[1:]
roster = {}
for l in lines:
tokens = l.split(',')
author_name = '_'.join(tokens[:2])
author_email = tokens[2]
roster[author_name] = author_email
rosters.append((id, roster))
return rosters
if __name__ == '__main__':
main()
|
the-stack_0_7027 | # Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Lock
from fasteners.process_lock import InterProcessLock
from os.path import exists
from os import chmod
class ComboLock:
""" A combined process and thread lock.
Args:
path (str): path to the lockfile for the lock
"""
def __init__(self, path):
# Create lock file if it doesn't exist and set permissions for
# all users to lock/unlock
if not exists(path):
f = open(path, 'w+')
f.close()
chmod(path, 0x1ff)
self.plock = InterProcessLock(path)
self.tlock = Lock()
def acquire(self, blocking=True):
""" Acquire lock, locks thread and process lock.
Args:
blocking(bool): Set's blocking mode of acquire operation.
Default True.
Returns: True if lock succeeded otherwise False
"""
if not blocking:
# Lock thread
tlocked = self.tlock.acquire(blocking=False)
if not tlocked:
return False
# Lock process
plocked = self.plock.acquire(blocking=False)
if not plocked:
# Release thread lock if process couldn't be locked
self.tlock.release()
return False
else: # blocking, just wait and acquire ALL THE LOCKS!!!
self.tlock.acquire()
self.plock.acquire()
return True
def release(self):
""" Release acquired lock. """
self.plock.release()
self.tlock.release()
def __enter__(self):
""" Context handler, acquires lock in blocking mode. """
self.acquire()
return self
def __exit__(self, _type, value, traceback):
""" Releases the lock. """
self.release()
|
the-stack_0_7028 | from flask import Blueprint, jsonify, request
from multiprocessing.connection import Client
from interface import IRequest, IPageResult, MessageProtocol
import uuid, zlib
from datetime import datetime, timedelta
from tool import log
l = log("Api")
NAME = ("localhost", 25100)
Api = Blueprint('Api', __name__)
@Api.route("/info")
def info():
return jsonify({"error": False, "result": ["hello", "world"] })
@Api.route("/start")
def start():
return jsonify({"error": False, "result": ["hello", "world"] })
@Api.route("/render", methods=["GET", "POST"])
def render():
data = []
if request.form:
url = request.form.get("url")
wait = request.form.get("wait")
jscript = request.form.get("jscript")
ctime =int( ( datetime.now() + timedelta(seconds=60*5) ).timestamp() )
param = IRequest(
id=uuid.uuid4().hex,
url=url, param={},
wait=float(wait) if wait else 0,
expiration_date = ctime,
jscript = jscript if jscript else "",
method = "render"
)
c = Client(NAME, authkey=b"qwerty")
c.send( param.__dict__ )
data.append(param.id)
l.info(f"Request {param}")
c.close()
# data.append( c.recv() )
return jsonify(MessageProtocol(
status_code=200, action='', message='',
payload=data
).to_dict())
# return jsonify({"response": True, "data" : data})
@Api.route("/result/<keyid>", methods=["GET", "POST"])
def get_result(keyid):
data = []
res = IPageResult(id=keyid, method="result")
c = Client(NAME, authkey=b"qwerty")
c.send( res.__dict__ )
response = c.recv()
if response:
l.info(f"Request {res}")
data.append( zlib.decompress( response ).decode("utf8") )
c.close()
return jsonify(MessageProtocol(
status_code=200, action='', message='',
payload=data
).to_dict())
# return jsonify({"response": True, "data" : data})
@Api.route("/a_content", methods=["POST"])
def active_content():
data = []
if request.form:
wait = request.form.get("wait")
jscript = request.form.get("jscript")
param = IRequest(
id="",
url="", param={},
wait=float(wait) if wait else 0,
expiration_date = 0,
jscript = jscript if jscript else "",
method = "active_content"
)
c = Client(NAME, authkey=b"qwerty")
c.send( param.__dict__ )
'''Здесь часто происходит ошибка'''
response = c.recv()
if response:
data.append( response )
l.info(f"Request {param}")
c.close()
return jsonify(MessageProtocol(
status_code=200, action='', message='',
payload=data
).to_dict())
# return jsonify({"response": True, "data" : data})
|
the-stack_0_7032 | """
Utilities of MobileNet training
"""
from models import modules
import os
import sys
import time
import math
import shutil
import tabulate
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torchvision
import torch.optim as optim
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import seaborn as sns
from functools import partial
from models import QConvBN2d
import models
_print_freq = 50
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(trainloader, net, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
net.train()
train_loss = 0
correct = 0
total = 0
end = time.time()
for batch_idx, (inputs, targets) in enumerate(trainloader):
data_time.update(time.time() - end)
targets = targets.cuda(non_blocking=True)
inputs = inputs.cuda()
outputs = net(inputs)
loss = criterion(outputs, targets)
if args.clp:
reg_alpha = torch.tensor(0.).cuda()
a_lambda = torch.tensor(args.a_lambda).cuda()
alpha = []
for name, param in net.named_parameters():
if 'alpha' in name:
alpha.append(param.item())
reg_alpha += param.item() ** 2
loss += a_lambda * (reg_alpha)
optimizer.zero_grad()
loss.backward()
# for module in net.modules():
# if 'BatchNorm' in str(type(module)):
# if module.weight.grad is not None:
# module.weight.grad.data.fill_(0)
# if module.bias.grad is not None:
# module.bias.grad.data.fill_(0)
optimizer.step()
prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
batch_time.update(time.time() - end)
end = time.time()
# import pdb;pdb.set_trace()
train_loss += loss.item()
if args.clp:
res = {
'acc':top1.avg,
'loss':losses.avg,
'clp_alpha':np.array(alpha)
}
else:
res = {
'acc':top1.avg,
'loss':losses.avg,
}
return res
def test(testloader, net, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
net.eval()
test_loss = 0
end = time.time()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
mean_loader = []
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
outputs = net(inputs)
loss = criterion(outputs, targets)
prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
test_loss += loss.item()
batch_time.update(time.time() - end)
end = time.time()
# break
return top1.avg, losses.avg
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
return need_hour, need_mins, need_secs
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
def print_table(values, columns, epoch, logger):
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f')
if epoch == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
logger.info(table)
def adjust_learning_rate_schedule(optimizer, epoch, gammas, schedule, lr, mu):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if optimizer != "YF":
assert len(gammas) == len(
schedule), "length of gammas and schedule should be equal"
for (gamma, step) in zip(gammas, schedule):
if (epoch >= step):
lr = lr * gamma
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr
elif optimizer == "YF":
lr = optimizer._lr
mu = optimizer._mu
return lr, mu
def save_checkpoint(state, is_best, save_path, filename='checkpoint.pth.tar'):
torch.save(state, save_path+filename)
if is_best:
shutil.copyfile(save_path+filename, save_path+'model_best.pth.tar')
def get_alpha_w(model):
alpha = []
count = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
if not count in [0] and not m.weight.size(2)==1:
alpha.append(m.alpha_w)
count += 1
return alpha
def log2df(log_file_name):
'''
return a pandas dataframe from a log file
'''
with open(log_file_name, 'r') as f:
lines = f.readlines()
# search backward to find table header
num_lines = len(lines)
for i in range(num_lines):
if lines[num_lines-1-i].startswith('---'):
break
header_line = lines[num_lines-2-i]
num_epochs = i
columns = header_line.split()
df = pd.DataFrame(columns=columns)
for i in range(num_epochs):
df.loc[i] = [float(x) for x in lines[num_lines-num_epochs+i].split()]
return df
"""
PROFIT Util
"""
def categorize_param(model, skip_list=()):
quant = []
skip = []
bnbias = []
weight = []
for name, param, in model.named_parameters():
skip_found = False
for s in skip_list:
if name.find(s) != -1:
skip_found = True
if not param.requires_grad:
continue
elif name.endswith(".a") or name.endswith(".c"):
quant.append(param)
elif skip_found:
skip.append(param)
elif len(param.shape) == 1 or name.endswith(".bias"):
bnbias.append(param)
else:
weight.append(param)
return (quant, skip, weight, bnbias)
def get_optimizer(params, train_quant, train_weight, train_bnbias, args):
(quant, skip, weight, bnbias) = params
optimizer = optim.SGD([
{'params': skip, 'weight_decay': 0, 'lr': 0},
{'params': quant, 'weight_decay': 0., 'lr': args.lr * 1e-2 if train_quant else 0},
{'params': bnbias, 'weight_decay': 0., 'lr': args.lr if train_bnbias else 0},
{'params': weight, 'weight_decay': args.weight_decay, 'lr': args.lr if train_weight else 0},
], momentum=0.9, nesterov=True)
return optimizer
def reset_weight_copy(model):
for name, module in model.module.named_modules():
if hasattr(module, "WQ"):
if hasattr(module.WQ, "weight_old"):
del module.WQ.weight_old
module.WQ.weight_old = None
def lasso_thre(var, thre=1.0):
thre = torch.tensor(thre).cuda()
a = var.pow(2).pow(1/2)
p = torch.max(a, thre) # penalize or not
return p
def train_profit(train_loader, net, net_t, criterion, optimizer, epoch, metric_map={}, logger=None, lasso=True):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
net.train()
# reset weight copy
reset_weight_copy(net)
if net_t is not None:
net_t.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
# deploy the data
input = input.cuda()
target = target.cuda(non_blocking=True)
if net_t is not None:
output_t = net_t(input)
# create and attach hook for layer-wise aiwq measure
hooks = []
metric_itr_map = {}
if len(metric_map) > 0:
def forward_hook(self, input, output):
if self.WQ.weight_old is not None and input[0].get_device() == 0:
with torch.no_grad():
out_old = torch.nn.functional.conv2d(input[0], self.WQ.weight_old, self.bias,
self.stride, self.padding, self.dilation, self.groups)
out_t = torch.transpose(output, 0, 1).contiguous().view(self.out_channels, -1)
out_mean = torch.mean(out_t, 1)
out_std = torch.std(out_t, 1) # + 1e-8
out_old_t = torch.transpose(out_old, 0, 1).contiguous().view(self.out_channels, -1)
out_old_mean = torch.mean(out_old_t, 1)
out_old_std = torch.std(out_old_t, 1) # + 1e-8
out_cond = out_std != 0
out_old_cond = out_old_std != 0
cond = out_cond & out_old_cond
out_mean = out_mean[cond]
out_std = out_std[cond]
out_old_mean = out_old_mean[cond]
out_old_std = out_old_std[cond]
KL = torch.log(out_old_std / out_std) + \
(out_std ** 2 + (out_mean - out_old_mean) ** 2) / (2 * out_old_std ** 2) - 0.5
metric_itr_map[self.name] = KL.mean().data.cpu().numpy()
for name, module in net.module.named_modules():
if hasattr(module, "WQ") and isinstance(module, torch.nn.Conv2d):
module.name = name
hooks.append(module.register_forward_hook(forward_hook))
# feed forward
output = net(input)
for hook in hooks:
hook.remove()
loss_s = criterion(output, target) # student model loss
if net_t is not None:
loss_kd = -1 * torch.mean(
torch.sum(torch.nn.functional.softmax(output_t, dim=1)
* torch.nn.functional.log_softmax(output, dim=1), dim=1))
loss = loss_s + loss_kd
else:
loss = loss_s
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss_s.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
if ((i+1) % _print_freq) == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch+1, i+1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
for key, value in metric_itr_map.items():
if value > 1:
continue
metric_map[key] = 0.999 * metric_map[key] + 0.001 * value
return top1.avg, losses.avg, metric_map
def init_precision(model, loader, abit, wbit, set_a=False, set_w=False, eps=0.05):
def init_hook(module, input, output):
if isinstance(module, models.modules.QConv2d) or isinstance(module, models.modules.QLinear):
if not isinstance(input, torch.Tensor):
input = input[0]
input = input.detach().cpu()
input = input.reshape(-1)
input = input[input > 0]
input, _ = torch.sort(input)
if len(input) == 0:
small, large = 0, 1e-3
else:
small, large = input[int(len(input)*eps)], input[int(len(input)*(1-eps))]
if set_a:
module.AQ._update_param(abit, small, large-small)
# import pdb;pdb.set_trace()
if set_w:
max_val = module.weight.data.abs().max().item()
module.WQ._update_param(wbit, max_val)
hooks = []
for name, module in model.named_modules():
hook = module.register_forward_hook(init_hook)
hooks.append(hook)
model.train()
model.cpu()
for i, (input, target) in enumerate(loader):
with torch.no_grad():
if isinstance(model, nn.DataParallel):
output = model.module(input)
else:
output = model(input)
break
model.cuda()
for hook in hooks:
hook.remove()
def bn_merge(model):
r"""
Fuse the batchnorm to the weight given a pretrained model
"""
for module_name in model._modules:
block = model._modules[module_name]
if not isinstance(block, nn.Sequential):
# import pdb;pdb.set_trace()
model._modules[module_name] = block
continue
else:
stack = []
for m in block.children():
sub_module = []
for n in m.children():
if isinstance(n, nn.BatchNorm2d):
if isinstance(sub_module[-1], QConvBN2d):
bn_st_dict = n.state_dict()
conv_st_dict = sub_module[-1].state_dict()
# batchnorm parameters
eps = n.eps
mu = bn_st_dict['running_mean']
var = bn_st_dict['running_var']
gamma = bn_st_dict['weight']
nb_tr = bn_st_dict['num_batches_tracked']
if 'bias' in bn_st_dict:
beta = bn_st_dict['bias']
else:
beta = torch.zeros(gamma.size(0)).float().to(gamma.device)
sub_module[-1].gamma.data = gamma
sub_module[-1].beta.data = beta
sub_module[-1].running_mean.data = mu
sub_module[-1].running_var.data = var
sub_module[-1].num_batches_tracked.data = nb_tr
sub_module[-1].eps = eps
# import pdb;pdb.set_trace()
else:
sub_module.append(n)
seq_module = nn.Sequential(*sub_module)
stack.append(seq_module)
seq_stack = nn.Sequential(*stack)
model._modules[module_name] = seq_stack
# import pdb;pdb.set_trace()
return model
def set_precision(model, abit=32, wbit=32, set_a=False, set_w=False):
for name, module in model.named_modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
if set_a:
module.AQ.abit = abit
else:
module.AQ.abit = 32
if set_w:
module.WQ.wbit = wbit
else:
module.WQ.wbit = 32
if __name__ == "__main__":
log = log2df('./save/resnet20_quant_grp8/resnet20_quant_w4_a4_modemean_k2_lambda0.0010_ratio0.7_wd0.0005_lr0.01_swpFalse_groupch8_pushFalse_iter4000_g01/resnet20_quant_w4_a4_modemean_k2_lambda0.0010_ratio0.7_wd0.0005_lr0.01_swpFalse_groupch8_pushFalse_iter4000_tmp_g03.log')
epoch = log['ep']
grp_spar = log['grp_spar']
ovall_spar = log['ovall_spar']
spar_groups = log['spar_groups']
penalty_groups = log['penalty_groups']
table = {
'epoch': epoch,
'grp_spar': grp_spar,
'ovall_spar': ovall_spar,
'spar_groups':spar_groups,
'penalty_groups':penalty_groups,
}
variable = pd.DataFrame(table, columns=['epoch','grp_spar','ovall_spar', 'spar_groups', 'penalty_groups'])
variable.to_csv('resnet20_quant_w4_a4_modemean_k2_lambda0.0010_ratio0.7_wd0.0005_lr0.01_swpFalse_groupch8_pushFalse_iter4000_tmp_g03.csv', index=False)
|
the-stack_0_7036 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import inspect
import json
import logging
import os
import random
import re
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from io import BytesIO
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
def assert_scale(number, expected_scale=8):
"""Assert number has expected scale, e.g. fractional digits; number of
digits after the decimal. The default of 8 corresponds to a Bitcoin amount."""
number = str(number)
mantissa = number.split('.')[-1].upper()
if mantissa[:3] == '0E-':
assert_equal(mantissa, '0E-{}'.format(expected_scale)) # exponent notation
elif mantissa == number:
assert_equal(0, expected_scale) # no mantissa, ergo, expected scale must be 0
else:
assert_equal(len(mantissa), expected_scale)
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
# Translate chain name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("{}=1\n".format(chain_name_conf_arg))
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("shrinkdebugfile=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert confirmations_required >= 0
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], 0)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for k in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
the-stack_0_7037 | # MIT license
#
# Copyright (C) 2018 by XESS Corporation / Hildo Guillardi Junior
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Inserted by Pasteurize tool.
from __future__ import print_function, unicode_literals, division, absolute_import
from builtins import zip, range, int, str
from future import standard_library
standard_library.install_aliases()
import future
import re, difflib
from bs4 import BeautifulSoup
import http.client # For web scraping exceptions.
from ...global_vars import PartHtmlError
from ...global_vars import logger, DEBUG_OVERVIEW, DEBUG_DETAILED, DEBUG_OBSESSIVE, DEBUG_HTTP_RESPONSES
from .. import fake_browser
from .. import distributor
from ..global_vars import distributor_dict
from urllib.parse import quote_plus as urlquote
class dist_newark(distributor.distributor):
def __init__(self, name, scrape_retries, throttle_delay):
super(dist_newark, self).__init__(name, distributor_dict[name]['site']['url'],
scrape_retries, throttle_delay)
self.browser.start_new_session()
@staticmethod
def dist_init_distributor_dict():
distributor_dict.update(
{
'newark': {
'module': 'newark', # The directory name containing this file.
'scrape': 'web', # Allowable values: 'web' or 'local'.
'label': 'Newark', # Distributor label used in spreadsheet columns.
'order_cols': ['part_num', 'purch', 'refs'], # Sort-order for online orders.
'order_delimiter': ',', # Delimiter for online orders.
# Formatting for distributor header in worksheet.
'wrk_hdr_format': {
'font_size': 14,
'font_color': 'white',
'bold': True,
'align': 'center',
'valign': 'vcenter',
'bg_color': '#A2AE06' # Newark/E14 olive green.
},
# Web site defitions.
'site': {
'url': 'https://www.newark.com/',
'currency': 'USD',
'locale': 'US'
},
}
})
def dist_get_price_tiers(self, html_tree):
'''@brief Get the pricing tiers from the parsed tree of the Newark product page.
@param html_tree `str()` html of the distributor part page.
@return `dict()` price breaks, the keys are the quantities breaks.
'''
price_tiers = {}
try:
qty_strs = []
for qty in html_tree.find(
'table',
class_=('tableProductDetailPrice', 'pricing')).find_all(
'td',
class_='qty'):
qty_strs.append(qty.text)
price_strs = []
for price in html_tree.find(
'table',
class_=('tableProductDetailPrice', 'pricing')).find_all(
'td',
class_='threeColTd'):
price_strs.append(price.text)
qtys_prices = list(zip(qty_strs, price_strs))
for qty_str, price_str in qtys_prices:
try:
qty = re.search('(\s*)([0-9,]+)', qty_str).group(2)
qty = int(re.sub('[^0-9]', '', qty))
price_tiers[qty] = float(re.sub('[^0-9\.]', '', price_str))
except (TypeError, AttributeError, ValueError):
continue
except AttributeError:
# This happens when no pricing info is found in the tree.
self.logger.log(DEBUG_OBSESSIVE, 'No Newark pricing information found!')
return price_tiers # Return empty price tiers.
return price_tiers
def dist_get_part_num(self, html_tree):
'''@brief Get the part number from the Newark product page.
@param html_tree `str()` html of the distributor part page.
@return `list()`of the parts that match.
'''
try:
# Newark catalog number is stored in a description list, so get
# all the list terms and descriptions, strip all the spaces from those,
# and pair them up.
div = html_tree.find('div', class_='productDescription').find('dl')
dt = [re.sub('\s','',d.text) for d in div.find_all('dt')]
dd = [re.sub('\s','',d.text) for d in div.find_all('dd')]
dtdd = {k:v for k,v in zip(dt,dd)} # Pair terms with descriptions.
return dtdd.get('NewarkPartNo.:', '')
except KeyError:
self.logger.log(DEBUG_OBSESSIVE, 'No Newark catalog number found!')
return '' # No catalog number found in page.
except AttributeError:
self.logger.log(DEBUG_OBSESSIVE, 'No Newark product description found!')
return '' # No ProductDescription found in page.
def dist_get_qty_avail(self, html_tree):
'''@brief Get the available quantity of the part from the Newark product page.
@param html_tree `str()` html of the distributor part page.
@return `int` avaliable quantity.
'''
try:
qty_str = html_tree.find('p', class_='availabilityHeading').text
except (AttributeError, ValueError):
# No quantity found (not even 0) so this is probably a non-stocked part.
# Return None so the part won't show in the spreadsheet for this dist.
return None
try:
qty = re.sub('[^0-9]','',qty_str) # Strip all non-number chars.
return int(re.sub('[^0-9]', '', qty_str)) # Return integer for quantity.
except ValueError:
# No quantity found (not even 0) so this is probably a non-stocked part.
# Return None so the part won't show in the spreadsheet for this dist.
self.logger.log(DEBUG_OBSESSIVE, 'No Newark part quantity found!')
return None
def dist_get_part_html_tree(self, pn, extra_search_terms='', url=None, descend=2):
'''@brief Find the Newark HTML page for a part number and return the URL and parse tree.
@param pn Part number `str()`.
@param extra_search_terms
@param url
@param descend
@return (html `str()` of the page, url)
'''
# Use the part number to lookup the part using the site search function, unless a starting url was given.
if url is None:
url = 'http://www.newark.com/webapp/wcs/stores/servlet/Search?catalogId=15003&langId=-1&storeId=10194&gs=true&st=' \
+ urlquote(pn, safe='')
if extra_search_terms:
url = url + urlquote(' ' + extra_search_terms, safe='')
elif url[0] == '/':
url = 'http://www.newark.com' + url
elif url.startswith('..'):
url = 'http://www.newark.com/Search/' + url
# Open the URL, read the HTML from it, and parse it into a tree structure.
try:
html = self.browser.scrape_URL(url)
except:
self.logger.log(DEBUG_OBSESSIVE,'No HTML page for {} from {}'.format(pn, self.name))
raise PartHtmlError
try:
tree = BeautifulSoup(html, 'lxml')
except Exception:
self.logger.log(DEBUG_OBSESSIVE,'No HTML tree for {} from {}'.format(pn, self.name))
raise PartHtmlError
# Abort if the part number isn't in the HTML somewhere.
# (Only use the numbers and letters to compare PN to HTML.)
if re.sub('[\W_]','',str.lower(pn)) not in re.sub('[\W_]','',str.lower(str(html))):
self.logger.log(DEBUG_OBSESSIVE,'No part number {} in HTML page from {}'.format(pn, self.name))
raise PartHtmlError
# If the tree contains the tag for a product page, then just return it.
if tree.find('div', class_='productDisplay', id='page') is not None:
return tree, url
# If the tree is for a list of products, then examine the links to try to find the part number.
if tree.find('table', class_='productLister', id='sProdList') is not None:
self.logger.log(DEBUG_OBSESSIVE,'Found product table for {} from {}'.format(pn, self.name))
if descend <= 0:
self.logger.log(DEBUG_OBSESSIVE,'Passed descent limit for {} from {}'.format(pn, self.name))
raise PartHtmlError
else:
# Look for the table of products.
products = tree.find('table',
class_='productLister',
id='sProdList').find('tbody').find_all('tr')
# Extract the product links for the part numbers from the table.
product_links = []
for p in products:
try:
product_links.append(
p.find('td', class_='mftrPart').find('a'))
except AttributeError:
continue
# Extract all the part numbers from the text portion of the links.
part_numbers = [l.text for l in product_links]
# Look for the part number in the list that most closely matches the requested part number.
try:
match = difflib.get_close_matches(pn, part_numbers, 1, 0.0)[0]
except IndexError:
raise PartHtmlError
# Now look for the link that goes with the closest matching part number.
for l in product_links:
if l.text == match:
# Get the tree for the linked-to page and return that.
self.logger.log(DEBUG_OBSESSIVE,'Selecting {} from product table for {} from {}'.format(l.text.strip(), pn, self.name))
return self.dist_get_part_html_tree(pn, extra_search_terms,
url=l.get('href', ''),
descend=descend-1)
# I don't know what happened here, so give up.
self.logger.log(DEBUG_OBSESSIVE,'Unknown error for {} from {}'.format(pn, self.name))
self.logger.log(DEBUG_HTTP_RESPONSES,'Response was %s' % html)
raise PartHtmlError
|
the-stack_0_7038 | """Common datatypes and pytd utilities."""
from typing import Any, List, Tuple
import dataclasses
from pytype import utils
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd.codegen import pytdgen
from pytype.pytd.parse import node as pytd_node
from typed_ast import ast3
_STRING_TYPES = ("str", "bytes", "unicode")
class ParseError(Exception):
"""Exceptions raised by the parser."""
def __init__(self, msg, line=None, filename=None, column=None, text=None):
super().__init__(msg)
self._line = line
self._filename = filename
self._column = column
self._text = text
@classmethod
def from_exc(cls, exc) -> "ParseError":
if isinstance(exc, cls):
return exc
elif exc.args:
return cls(exc.args[0])
else:
return cls(repr(exc))
def at(self, node, filename=None, src_code=None):
"""Add position information from `node` if it doesn't already exist."""
# NOTE: ast3.Module has no position info, and will be the `node` when
# build_type_decl_unit() is called, so we cannot call `node.lineno`
if not self._line:
self._line = getattr(node, "lineno", None)
self._column = getattr(node, "col_offset", None)
if not self._filename:
self._filename = filename
if self._line and src_code:
try:
self._text = src_code.splitlines()[self._line-1]
except IndexError:
pass
return self
def clear_position(self):
self._line = None
@property
def line(self):
return self._line
def __str__(self):
lines = []
if self._filename or self._line is not None:
lines.append(f' File: "{self._filename}", line {self._line}')
if self._column and self._text:
indent = 4
stripped = self._text.lstrip()
lines.append("%*s%s" % (indent, "", stripped))
# Output a pointer below the error column, adjusting for stripped spaces.
pos = indent + (self._column - 1) - (len(self._text) - len(stripped))
lines.append("%*s^" % (pos, ""))
lines.append("%s: %s" % (type(self).__name__, utils.message(self)))
return "\n".join(lines)
# Type aliases
Parameters = Tuple[pytd_node.Node, ...]
class Ellipsis: # pylint: disable=redefined-builtin
pass
@dataclasses.dataclass
class Raise:
exception: pytd.NamedType
@dataclasses.dataclass
class SlotDecl:
slots: Tuple[str, ...]
@dataclasses.dataclass
class Constant:
"""Literal constants in pyi files."""
type: str
value: Any
@classmethod
def from_num(cls, node: ast3.Num):
if isinstance(node.n, int):
return cls("int", node.n)
else:
return cls("float", node.n)
@classmethod
def from_str(cls, node: ast3.Str):
if node.kind == "b":
return cls("bytes", node.s)
elif node.kind == "u":
return cls("unicode", node.s)
else:
return cls("str", node.s)
@classmethod
def from_const(cls, node: ast3.NameConstant):
if node.value is None:
return pytd.NamedType("None")
return cls(type(node.value).__name__, node.value)
def to_pytd(self):
return pytd.NamedType(self.type)
def repr_str(self):
"""String representation with prefixes."""
if self.type == "str":
val = f"'{self.value}'"
elif self.type == "unicode":
val = f"u'{self.value}'"
elif self.type == "bytes":
val = str(self.value)
else:
# For non-strings
val = repr(self.value)
return val
def to_pytd_literal(self):
"""Make a pytd node from Literal[self.value]."""
if self.value is None:
return pytd.NamedType("None")
if self.type in _STRING_TYPES:
val = self.repr_str()
elif self.type == "float":
raise ParseError(f"Invalid type `float` in Literal[{self.value}].")
else:
val = self.value
return pytd.Literal(val)
def negated(self):
"""Return a new constant with value -self.value."""
if self.type in ("int", "float"):
return Constant(self.type, -self.value)
raise ParseError("Unary `-` can only apply to numeric literals.")
@classmethod
def is_str(cls, value):
return isinstance(value, cls) and value.type in _STRING_TYPES
def __repr__(self):
return f"LITERAL({self.repr_str()})"
def string_value(val, context=None) -> str:
"""Convert a Constant(str) to a string if needed."""
if isinstance(val, str):
return val
elif Constant.is_str(val):
return str(val.value)
else:
if context:
msg = f"Type mismatch in {context}"
else:
msg = "Type mismatch"
raise ParseError(f"{msg}: Expected str, got {val}")
def is_any(val) -> bool:
if isinstance(val, Ellipsis):
return True
return pytdgen.is_any(val)
def pytd_literal(parameters: List[Any]) -> pytd_node.Node:
"""Create a pytd.Literal."""
literal_parameters = []
for p in parameters:
if pytdgen.is_none(p):
literal_parameters.append(p)
elif isinstance(p, pytd.NamedType):
# TODO(b/173742489): support enums.
literal_parameters.append(pytd.AnythingType())
elif isinstance(p, Constant):
literal_parameters.append(p.to_pytd_literal())
elif isinstance(p, pytd.Literal):
literal_parameters.append(p)
elif isinstance(p, pytd.UnionType):
for t in p.type_list:
if isinstance(t, pytd.Literal):
literal_parameters.append(t)
else:
raise ParseError(f"Literal[{t}] not supported")
else:
raise ParseError(f"Literal[{p}] not supported")
return pytd_utils.JoinTypes(literal_parameters)
def pytd_annotated(parameters: List[Any]) -> pytd_node.Node:
"""Create a pytd.Annotated."""
if len(parameters) < 2:
raise ParseError(
"typing.Annotated takes at least two parameters: "
"Annotated[type, 'annotation', ...].")
typ, *annotations = parameters
if not all(isinstance(x, Constant) for x in annotations):
raise ParseError(
"Annotations needs to be string literals: "
"Annotated[type, 'annotation', ...].")
annotations = tuple(x.repr_str() for x in annotations)
return pytd.Annotated(typ, annotations)
def builtin_keyword_constants():
# We cannot define these in a pytd file because assigning to a keyword breaks
# the python parser.
defs = [
("True", "bool"),
("False", "bool"),
("None", "NoneType"),
("__debug__", "bool")
]
return [pytd.Constant(name, pytd.NamedType(typ)) for name, typ in defs]
|
the-stack_0_7040 | #!/usr/bin/env python3
import os
import argparse
import torch
import torch.distributed as dist
import torchvision
import torchvision.transforms as transforms
from torchvision.models import AlexNet
from torchvision.models import vgg19
import deepspeed
from deepspeed.pipe import PipelineModule
from deepspeed.utils import RepeatingLoader
def cifar_trainset(local_rank, dl_path='/tmp/cifar10-data'):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# Ensure only one rank downloads.
# Note: if the download path is not on a shared filesytem, remove the semaphore
# and switch to args.local_rank
dist.barrier()
if local_rank != 0:
dist.barrier()
trainset = torchvision.datasets.CIFAR10(root=dl_path,
train=True,
download=True,
transform=transform)
if local_rank == 0:
dist.barrier()
return trainset
def get_args():
parser = argparse.ArgumentParser(description='CIFAR')
parser.add_argument('--local_rank',
type=int,
default=-1,
help='local rank passed from distributed launcher')
parser.add_argument('-s',
'--steps',
type=int,
default=100,
help='quit after this many steps')
parser.add_argument('-p',
'--pipeline-parallel-size',
type=int,
default=2,
help='pipeline parallelism')
parser.add_argument('--backend',
type=str,
default='nccl',
help='distributed backend')
parser.add_argument('--seed', type=int, default=1138, help='PRNG seed')
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
return args
def train_base(args):
torch.manual_seed(args.seed)
# VGG also works :-)
#net = vgg19(num_classes=10)
net = AlexNet(num_classes=10)
trainset = cifar_trainset(args.local_rank)
engine, _, dataloader, __ = deepspeed.initialize(
args=args,
model=net,
model_parameters=[p for p in net.parameters() if p.requires_grad],
training_data=trainset)
dataloader = RepeatingLoader(dataloader)
data_iter = iter(dataloader)
rank = dist.get_rank()
gas = engine.gradient_accumulation_steps()
criterion = torch.nn.CrossEntropyLoss()
total_steps = args.steps * engine.gradient_accumulation_steps()
step = 0
for micro_step in range(total_steps):
batch = next(data_iter)
inputs = batch[0].to(engine.device)
labels = batch[1].to(engine.device)
outputs = engine(inputs)
loss = criterion(outputs, labels)
engine.backward(loss)
engine.step()
if micro_step % engine.gradient_accumulation_steps() == 0:
step += 1
if rank == 0 and (step % 10 == 0):
print(f'step: {step:3d} / {args.steps:3d} loss: {loss}')
def join_layers(vision_model):
layers = [
*vision_model.features,
vision_model.avgpool,
lambda x: torch.flatten(x, 1),
*vision_model.classifier,
]
return layers
def train_pipe(args, part='parameters'):
torch.manual_seed(args.seed)
deepspeed.runtime.utils.set_random_seed(args.seed)
#
# Build the model
#
# VGG also works :-)
#net = vgg19(num_classes=10)
net = AlexNet(num_classes=10)
net = PipelineModule(layers=join_layers(net),
loss_fn=torch.nn.CrossEntropyLoss(),
num_stages=args.pipeline_parallel_size,
partition_method=part,
activation_checkpoint_interval=0)
trainset = cifar_trainset(args.local_rank)
engine, _, _, _ = deepspeed.initialize(
args=args,
model=net,
model_parameters=[p for p in net.parameters() if p.requires_grad],
training_data=trainset)
for step in range(args.steps):
loss = engine.train_batch()
if __name__ == '__main__':
args = get_args()
deepspeed.init_distributed(dist_backend=args.backend)
args.local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(args.local_rank)
if args.pipeline_parallel_size == 0:
train_base(args)
else:
train_pipe(args)
|
the-stack_0_7041 | import argparse
import os
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
given_config = parser.parse_args()
given_config.out_dir = os.path.join(given_config.out_root,
str(given_config.model_ind))
reloaded_config_path = os.path.join(given_config.out_dir, "config.pickle")
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
if not hasattr(config, "batchnorm_track"):
print("adding batchnorm track")
config.batchnorm_track = True
if not hasattr(config, "num_sub_heads"):
print("adding num sub heads")
config.num_sub_heads = config.num_heads
if not hasattr(config, "select_sub_head_on_loss"):
print("adding select_sub_head_on_loss")
config.select_sub_head_on_loss = False
if not hasattr(config, "use_doersch_datasets"): # only needed for seg configs
print("adding use doersch datasets")
config.use_doersch_datasets = False
with open(os.path.join(config.out_dir, "config.pickle"), 'wb') as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file:
text_file.write("%s" % config)
# these are for backup
with open(os.path.join(config.out_dir, "best_config.pickle"), 'wb') as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "best_config.txt"), "w") as text_file:
text_file.write("%s" % config)
|
the-stack_0_7044 | # encoding: utf-8
"""
Paragraph-related proxy types.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from ..enum.text import WD_LINE_SPACING
from ..shared import ElementProxy, Emu, lazyproperty, Length, Pt, Twips
from .tabstops import TabStops
class ParagraphFormat(ElementProxy):
"""
Provides access to paragraph formatting such as justification,
indentation, line spacing, space before and after, and widow/orphan
control.
"""
__slots__ = ('_tab_stops',)
@property
def alignment(self):
"""
A member of the :ref:`WdParagraphAlignment` enumeration specifying
the justification setting for this paragraph. A value of |None|
indicates paragraph alignment is inherited from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.jc_val
@alignment.setter
def alignment(self, value):
pPr = self._element.get_or_add_pPr()
pPr.jc_val = value
@property
def first_line_indent(self):
"""
|Length| value specifying the relative difference in indentation for
the first line of the paragraph. A positive value causes the first
line to be indented. A negative value produces a hanging indent.
|None| indicates first line indentation is inherited from the style
hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.first_line_indent
@first_line_indent.setter
def first_line_indent(self, value):
pPr = self._element.get_or_add_pPr()
pPr.first_line_indent = value
@property
def keep_together(self):
"""
|True| if the paragraph should be kept "in one piece" and not broken
across a page boundary when the document is rendered. |None|
indicates its effective value is inherited from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.keepLines_val
@keep_together.setter
def keep_together(self, value):
self._element.get_or_add_pPr().keepLines_val = value
@property
def keep_with_next(self):
"""
|True| if the paragraph should be kept on the same page as the
subsequent paragraph when the document is rendered. For example, this
property could be used to keep a section heading on the same page as
its first paragraph. |None| indicates its effective value is
inherited from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.keepNext_val
@keep_with_next.setter
def keep_with_next(self, value):
self._element.get_or_add_pPr().keepNext_val = value
@property
def left_indent(self):
"""
|Length| value specifying the space between the left margin and the
left side of the paragraph. |None| indicates the left indent value is
inherited from the style hierarchy. Use an |Inches| value object as
a convenient way to apply indentation in units of inches.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.ind_left
@left_indent.setter
def left_indent(self, value):
pPr = self._element.get_or_add_pPr()
pPr.ind_left = value
@property
def line_spacing(self):
"""
|float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule)
@line_spacing.setter
def line_spacing(self, value):
pPr = self._element.get_or_add_pPr()
if value is None:
pPr.spacing_line = None
pPr.spacing_lineRule = None
elif isinstance(value, Length):
pPr.spacing_line = value
if pPr.spacing_lineRule != WD_LINE_SPACING.AT_LEAST:
pPr.spacing_lineRule = WD_LINE_SPACING.EXACTLY
else:
pPr.spacing_line = Emu(value * Twips(240))
pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE
@property
def line_spacing_rule(self):
"""
A member of the :ref:`WdLineSpacing` enumeration indicating how the
value of :attr:`line_spacing` should be interpreted. Assigning any of
the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or
:attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing`
to be updated to produce the corresponding line spacing.
"""
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing_rule(
pPr.spacing_line, pPr.spacing_lineRule
)
@line_spacing_rule.setter
def line_spacing_rule(self, value):
pPr = self._element.get_or_add_pPr()
if value == WD_LINE_SPACING.SINGLE:
pPr.spacing_line = Twips(240)
pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE
elif value == WD_LINE_SPACING.ONE_POINT_FIVE:
pPr.spacing_line = Twips(360)
pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE
elif value == WD_LINE_SPACING.DOUBLE:
pPr.spacing_line = Twips(480)
pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE
else:
pPr.spacing_lineRule = value
@property
def page_break_before(self):
"""
|True| if the paragraph should appear at the top of the page
following the prior paragraph. |None| indicates its effective value
is inherited from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.pageBreakBefore_val
@page_break_before.setter
def page_break_before(self, value):
self._element.get_or_add_pPr().pageBreakBefore_val = value
@property
def right_indent(self):
"""
|Length| value specifying the space between the right margin and the
right side of the paragraph. |None| indicates the right indent value
is inherited from the style hierarchy. Use a |Cm| value object as
a convenient way to apply indentation in units of centimeters.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.ind_right
@right_indent.setter
def right_indent(self, value):
pPr = self._element.get_or_add_pPr()
pPr.ind_right = value
@property
def shading_fill(self):
"""
A member of :ref:`WdColorIndex` indicating the color of highlighting
applied, or `None` if no highlighting is applied.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.shading_fill
@shading_fill.setter
def shading_fill(self, value):
pPr = self._element.get_or_add_pPr()
pPr.shading_fill = value
@property
def space_after(self):
"""
|Length| value specifying the spacing to appear between this
paragraph and the subsequent paragraph. |None| indicates this value
is inherited from the style hierarchy. |Length| objects provide
convenience properties, such as :attr:`~.Length.pt` and
:attr:`~.Length.inches`, that allow easy conversion to various length
units.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.spacing_after
@space_after.setter
def space_after(self, value):
self._element.get_or_add_pPr().spacing_after = value
@property
def space_before(self):
"""
|Length| value specifying the spacing to appear between this
paragraph and the prior paragraph. |None| indicates this value is
inherited from the style hierarchy. |Length| objects provide
convenience properties, such as :attr:`~.Length.pt` and
:attr:`~.Length.cm`, that allow easy conversion to various length
units.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.spacing_before
@space_before.setter
def space_before(self, value):
self._element.get_or_add_pPr().spacing_before = value
@lazyproperty
def tab_stops(self):
"""
|TabStops| object providing access to the tab stops defined for this
paragraph format.
"""
pPr = self._element.get_or_add_pPr()
return TabStops(pPr)
@property
def widow_control(self):
"""
|True| if the first and last lines in the paragraph remain on the
same page as the rest of the paragraph when Word repaginates the
document. |None| indicates its effective value is inherited from the
style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return pPr.widowControl_val
@widow_control.setter
def widow_control(self, value):
self._element.get_or_add_pPr().widowControl_val = value
@staticmethod
def _line_spacing(spacing_line, spacing_lineRule):
"""
Return the line spacing value calculated from the combination of
*spacing_line* and *spacing_lineRule*. Returns a |float| number of
lines when *spacing_lineRule* is ``WD_LINE_SPACING.MULTIPLE``,
otherwise a |Length| object of absolute line height is returned.
Returns |None| when *spacing_line* is |None|.
"""
if spacing_line is None:
return None
if spacing_lineRule == WD_LINE_SPACING.MULTIPLE:
return spacing_line / Pt(12)
return spacing_line
@staticmethod
def _line_spacing_rule(line, lineRule):
"""
Return the line spacing rule value calculated from the combination of
*line* and *lineRule*. Returns special members of the
:ref:`WdLineSpacing` enumeration when line spacing is single, double,
or 1.5 lines.
"""
if lineRule == WD_LINE_SPACING.MULTIPLE:
if line == Twips(240):
return WD_LINE_SPACING.SINGLE
if line == Twips(360):
return WD_LINE_SPACING.ONE_POINT_FIVE
if line == Twips(480):
return WD_LINE_SPACING.DOUBLE
return lineRule
|
the-stack_0_7045 | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
import os
import subprocess
import sys
from openvino.tools.mo.utils.versions_checker import check_python_version # pylint: disable=no-name-in-module
def log_ie_not_found():
log.error("Could not find the Inference Engine or nGraph Python API.\n"
"Consider building the Inference Engine and nGraph Python APIs"
" from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
.format("bat" if sys.platform == "windows" else "sh"))
def setup_env():
ret_code = check_python_version()
if ret_code:
sys.exit(ret_code)
from openvino.tools.mo.utils.find_ie_version import find_ie_version
ie_found = True
try:
ie_found = find_ie_version(silent=True)
except Exception:
ie_found = False
if not ie_found:
log_ie_not_found()
sys.exit(1)
mo_root_path = os.path.join(os.path.dirname(__file__), os.pardir)
python_path_key = 'PYTHONPATH'
if python_path_key not in os.environ:
os.environ[python_path_key] = mo_root_path
else:
os.environ[python_path_key] = os.pathsep.join([os.environ[python_path_key], mo_root_path])
return True
def subprocess_main(framework=None):
"""
Please keep this file compatible with python2 in order to check user python version.
This function checks that Inference Engine Python API available and working as expected
and then in sub-process it executes main_<fw>.py files. Due to some OSs specifics we can't
just add paths to Python modules and libraries into current env. So to make Inference Engine
Python API to be available inside MO we need to use subprocess with new env.
"""
setup_env()
path_to_main = os.path.join(os.path.realpath(os.path.dirname(__file__)),
'main_{}.py'.format(framework) if framework else 'main.py')
# python2 compatible code. Do not remove.
args = [sys.executable, path_to_main]
for arg in sys.argv[1:]:
args.append(arg)
status = subprocess.run(args, env=os.environ)
sys.exit(status.returncode)
|
the-stack_0_7047 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test if operational error is thrown!"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError]*5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
the-stack_0_7048 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
from typing import TYPE_CHECKING
from flask import flash, g, redirect
from flask_appbuilder import expose, SimpleFormView
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import lazy_gettext as _
from werkzeug.wrappers import Response
from wtforms.fields import StringField
from wtforms.validators import ValidationError
import superset.models.core as models
from superset import app, db, is_feature_enabled
from superset.connectors.sqla.models import SqlaTable
from superset.constants import RouteMethod
from superset.exceptions import CertificateException
from superset.sql_parse import Table
from superset.typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import DeleteMixin, SupersetModelView, YamlExportMixin
from .forms import CsvToDatabaseForm, ExcelToDatabaseForm
from .mixins import DatabaseMixin
from .validators import schema_allows_csv_upload, sqlalchemy_uri_validator
if TYPE_CHECKING:
from werkzeug.datastructures import FileStorage # pylint: disable=unused-import
config = app.config
stats_logger = config["STATS_LOGGER"]
def sqlalchemy_uri_form_validator(_: _, field: StringField) -> None:
"""
Check if user has submitted a valid SQLAlchemy URI
"""
sqlalchemy_uri_validator(field.data, exception=ValidationError)
def certificate_form_validator(_: _, field: StringField) -> None:
"""
Check if user has submitted a valid SSL certificate
"""
if field.data:
try:
utils.parse_ssl_cert(field.data)
except CertificateException as ex:
raise ValidationError(ex.message)
def upload_stream_write(form_file_field: "FileStorage", path: str) -> None:
chunk_size = app.config["UPLOAD_CHUNK_SIZE"]
with open(path, "bw") as file_description:
while True:
chunk = form_file_field.stream.read(chunk_size)
if not chunk:
break
file_description.write(chunk)
class DatabaseView(
DatabaseMixin, SupersetModelView, DeleteMixin, YamlExportMixin
): # pylint: disable=too-many-ancestors
datamodel = SQLAInterface(models.Database)
include_route_methods = RouteMethod.CRUD_SET
add_template = "superset/models/database/add.html"
edit_template = "superset/models/database/edit.html"
validators_columns = {
"sqlalchemy_uri": [sqlalchemy_uri_form_validator],
"server_cert": [certificate_form_validator],
}
yaml_dict_key = "databases"
def _delete(self, pk: int) -> None:
DeleteMixin._delete(self, pk)
@expose("/list/")
@has_access
def list(self) -> FlaskResponse:
if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"):
return super().list()
return super().render_app_template()
class CsvToDatabaseView(SimpleFormView):
form = CsvToDatabaseForm
form_template = "superset/form_view/csv_to_database_view/edit.html"
form_title = _("CSV to Database configuration")
add_columns = ["database", "schema", "table_name"]
def form_get(self, form: CsvToDatabaseForm) -> None:
form.sep.data = ","
form.header.data = 0
form.mangle_dupe_cols.data = True
form.skipinitialspace.data = False
form.skip_blank_lines.data = True
form.infer_datetime_format.data = True
form.decimal.data = "."
form.if_exists.data = "fail"
def form_post(self, form: CsvToDatabaseForm) -> Response:
database = form.con.data
csv_table = Table(table=form.name.data, schema=form.schema.data)
if not schema_allows_csv_upload(database, csv_table.schema):
message = _(
'Database "%(database_name)s" schema "%(schema_name)s" '
"is not allowed for csv uploads. Please contact your Superset Admin.",
database_name=database.database_name,
schema_name=csv_table.schema,
)
flash(message, "danger")
return redirect("/csvtodatabaseview/form")
if "." in csv_table.table and csv_table.schema:
message = _(
"You cannot specify a namespace both in the name of the table: "
'"%(csv_table.table)s" and in the schema field: '
'"%(csv_table.schema)s". Please remove one',
table=csv_table.table,
schema=csv_table.schema,
)
flash(message, "danger")
return redirect("/csvtodatabaseview/form")
uploaded_tmp_file_path = tempfile.NamedTemporaryFile(
dir=app.config["UPLOAD_FOLDER"],
suffix=os.path.splitext(form.csv_file.data.filename)[1].lower(),
delete=False,
).name
try:
utils.ensure_path_exists(config["UPLOAD_FOLDER"])
upload_stream_write(form.csv_file.data, uploaded_tmp_file_path)
con = form.data.get("con")
database = (
db.session.query(models.Database).filter_by(id=con.data.get("id")).one()
)
# More can be found here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
csv_to_df_kwargs = {
"sep": form.sep.data,
"header": form.header.data if form.header.data else 0,
"index_col": form.index_col.data,
"mangle_dupe_cols": form.mangle_dupe_cols.data,
"skipinitialspace": form.skipinitialspace.data,
"skiprows": form.skiprows.data,
"nrows": form.nrows.data,
"skip_blank_lines": form.skip_blank_lines.data,
"parse_dates": form.parse_dates.data,
"infer_datetime_format": form.infer_datetime_format.data,
"chunksize": 1000,
}
if form.null_values.data:
csv_to_df_kwargs["na_values"] = form.null_values.data
csv_to_df_kwargs["keep_default_na"] = False
# More can be found here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html
df_to_sql_kwargs = {
"name": csv_table.table,
"if_exists": form.if_exists.data,
"index": form.index.data,
"index_label": form.index_label.data,
"chunksize": 1000,
}
database.db_engine_spec.create_table_from_csv(
uploaded_tmp_file_path,
csv_table,
database,
csv_to_df_kwargs,
df_to_sql_kwargs,
)
# Connect table to the database that should be used for exploration.
# E.g. if hive was used to upload a csv, presto will be a better option
# to explore the table.
expore_database = database
explore_database_id = database.explore_database_id
if explore_database_id:
expore_database = (
db.session.query(models.Database)
.filter_by(id=explore_database_id)
.one_or_none()
or database
)
sqla_table = (
db.session.query(SqlaTable)
.filter_by(
table_name=csv_table.table,
schema=csv_table.schema,
database_id=expore_database.id,
)
.one_or_none()
)
if sqla_table:
sqla_table.fetch_metadata()
if not sqla_table:
sqla_table = SqlaTable(table_name=csv_table.table)
sqla_table.database = expore_database
sqla_table.database_id = database.id
sqla_table.user_id = g.user.id
sqla_table.schema = csv_table.schema
sqla_table.fetch_metadata()
db.session.add(sqla_table)
db.session.commit()
except Exception as ex: # pylint: disable=broad-except
db.session.rollback()
try:
os.remove(uploaded_tmp_file_path)
except OSError:
pass
message = _(
'Unable to upload CSV file "%(filename)s" to table '
'"%(table_name)s" in database "%(db_name)s". '
"Error message: %(error_msg)s",
filename=form.csv_file.data.filename,
table_name=form.name.data,
db_name=database.database_name,
error_msg=str(ex),
)
flash(message, "danger")
stats_logger.incr("failed_csv_upload")
return redirect("/csvtodatabaseview/form")
os.remove(uploaded_tmp_file_path)
# Go back to welcome page / splash screen
message = _(
'CSV file "%(csv_filename)s" uploaded to table "%(table_name)s" in '
'database "%(db_name)s"',
csv_filename=form.csv_file.data.filename,
table_name=str(csv_table),
db_name=sqla_table.database.database_name,
)
flash(message, "info")
stats_logger.incr("successful_csv_upload")
return redirect("/tablemodelview/list/")
class ExcelToDatabaseView(SimpleFormView):
form = ExcelToDatabaseForm
form_template = "superset/form_view/excel_to_database_view/edit.html"
form_title = _("Excel to Database configuration")
add_columns = ["database", "schema", "table_name"]
def form_get(self, form: ExcelToDatabaseForm) -> None:
form.header.data = 0
form.mangle_dupe_cols.data = True
form.decimal.data = "."
form.if_exists.data = "fail"
form.sheet_name.data = ""
def form_post(self, form: ExcelToDatabaseForm) -> Response:
database = form.con.data
excel_table = Table(table=form.name.data, schema=form.schema.data)
if not schema_allows_csv_upload(database, excel_table.schema):
message = _(
'Database "%(database_name)s" schema "%(schema_name)s" '
"is not allowed for excel uploads. Please contact your Superset Admin.",
database_name=database.database_name,
schema_name=excel_table.schema,
)
flash(message, "danger")
return redirect("/exceltodatabaseview/form")
if "." in excel_table.table and excel_table.schema:
message = _(
"You cannot specify a namespace both in the name of the table: "
'"%(excel_table.table)s" and in the schema field: '
'"%(excel_table.schema)s". Please remove one',
table=excel_table.table,
schema=excel_table.schema,
)
flash(message, "danger")
return redirect("/exceltodatabaseview/form")
uploaded_tmp_file_path = tempfile.NamedTemporaryFile(
dir=app.config["UPLOAD_FOLDER"],
suffix=os.path.splitext(form.excel_file.data.filename)[1].lower(),
delete=False,
).name
try:
utils.ensure_path_exists(config["UPLOAD_FOLDER"])
upload_stream_write(form.excel_file.data, uploaded_tmp_file_path)
con = form.data.get("con")
database = (
db.session.query(models.Database).filter_by(id=con.data.get("id")).one()
)
# some params are not supported by pandas.read_excel (e.g. chunksize).
# More can be found here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html
excel_to_df_kwargs = {
"header": form.header.data if form.header.data else 0,
"index_col": form.index_col.data,
"mangle_dupe_cols": form.mangle_dupe_cols.data,
"skiprows": form.skiprows.data,
"nrows": form.nrows.data,
"sheet_name": form.sheet_name.data if form.sheet_name.data else 0,
"parse_dates": form.parse_dates.data,
}
if form.null_values.data:
excel_to_df_kwargs["na_values"] = form.null_values.data
excel_to_df_kwargs["keep_default_na"] = False
df_to_sql_kwargs = {
"name": excel_table.table,
"if_exists": form.if_exists.data,
"index": form.index.data,
"index_label": form.index_label.data,
"chunksize": 1000,
}
database.db_engine_spec.create_table_from_excel(
uploaded_tmp_file_path,
excel_table,
database,
excel_to_df_kwargs,
df_to_sql_kwargs,
)
# Connect table to the database that should be used for exploration.
# E.g. if hive was used to upload a excel, presto will be a better option
# to explore the table.
expore_database = database
explore_database_id = database.explore_database_id
if explore_database_id:
expore_database = (
db.session.query(models.Database)
.filter_by(id=explore_database_id)
.one_or_none()
or database
)
sqla_table = (
db.session.query(SqlaTable)
.filter_by(
table_name=excel_table.table,
schema=excel_table.schema,
database_id=expore_database.id,
)
.one_or_none()
)
if sqla_table:
sqla_table.fetch_metadata()
if not sqla_table:
sqla_table = SqlaTable(table_name=excel_table.table)
sqla_table.database = expore_database
sqla_table.database_id = database.id
sqla_table.user_id = g.user.id
sqla_table.schema = excel_table.schema
sqla_table.fetch_metadata()
db.session.add(sqla_table)
db.session.commit()
except Exception as ex: # pylint: disable=broad-except
db.session.rollback()
try:
os.remove(uploaded_tmp_file_path)
except OSError:
pass
message = _(
'Unable to upload Excel file "%(filename)s" to table '
'"%(table_name)s" in database "%(db_name)s". '
"Error message: %(error_msg)s",
filename=form.excel_file.data.filename,
table_name=form.name.data,
db_name=database.database_name,
error_msg=str(ex),
)
flash(message, "danger")
stats_logger.incr("failed_excel_upload")
return redirect("/exceltodatabaseview/form")
os.remove(uploaded_tmp_file_path)
# Go back to welcome page / splash screen
message = _(
'Excel file "%(excel_filename)s" uploaded to table "%(table_name)s" in '
'database "%(db_name)s"',
excel_filename=form.excel_file.data.filename,
table_name=str(excel_table),
db_name=sqla_table.database.database_name,
)
flash(message, "info")
stats_logger.incr("successful_excel_upload")
return redirect("/tablemodelview/list/")
|
the-stack_0_7050 | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2020, Emanuele Bugliarello (@e-bug).
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import json
import yaml
import random
import logging
import argparse
from io import open
from tqdm import tqdm
import _pickle as cPickle
from easydict import EasyDict as edict
import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.data import DataLoader
from pytorch_transformers.tokenization_bert import BertTokenizer
from volta.config import BertConfig
from volta.encoders import BertForVLPreTraining
from volta.datasets import FlickrVis4LangDataset
from volta.datasets._all_image_features_reader import ImageFeaturesH5Reader
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
# Model
parser.add_argument("--from_pretrained", default="bert-base-uncased", type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--config_file", default="config/bert_config.json", type=str,
help="The config file which specified the model details.")
# Output
parser.add_argument("--output_dir", default="results", type=str,
help="The output directory where the model checkpoints will be written.")
parser.add_argument("--dump_results", default=False, action="store_true",
help="Whether to save predictions onto disk")
# Task
parser.add_argument("--tasks_config_file", default="config_tasks/vilbert_trainval_tasks.yml", type=str,
help="The config file which specified the tasks details.")
parser.add_argument("--task", default="", type=str,
help="training task number")
parser.add_argument("--masking", default=None, type=str, choices=["all", "object", "none"],
help="Image regions to mask")
parser.add_argument("--overlap_threshold", default=0.5, type=float,
help="Threshold for image regions to mask")
# Text
parser.add_argument("--do_lower_case", default=True, type=bool,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
# Evaluation
parser.add_argument("--split", default="", type=str,
help="which split to use.")
parser.add_argument("--batch_size", default=30, type=int,
help="batch size.")
parser.add_argument("--drop_last", action="store_true",
help="whether to drop last incomplete batch")
# Seed
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
# Distributed
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--num_workers", type=int, default=0,
help="Number of workers in the dataloader.")
parser.add_argument("--in_memory", default=False, type=bool,
help="whether use chunck for parallel training.")
parser.add_argument("--use_chunk", default=0, type=float,
help="whether use chunck for parallel training.")
return parser.parse_args()
def main():
args = parse_args()
# Devices
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend="nccl")
default_gpu = False
if dist.is_available() and args.local_rank != -1:
rank = dist.get_rank()
if rank == 0:
default_gpu = True
else:
default_gpu = True
logger.info(f"device: {device} n_gpu: {n_gpu}, distributed training: {bool(args.local_rank != -1)}")
# Load config
config = BertConfig.from_json_file(args.config_file)
# Load task config
with open(args.tasks_config_file, "r") as f:
task_cfg = edict(yaml.safe_load(f))
task_id = args.task.strip()
task = "TASK" + task_id
task_name = task_cfg[task]["name"]
if task_cfg[task].get("fusion_method", None):
# VL-BERT pooling for VQA
config.fusion_method = task_cfg[task]["fusion_method"]
# Output dirs
savePath = args.output_dir
if default_gpu and not os.path.exists(savePath):
os.makedirs(savePath)
# Seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Dataset
feats_h5path = task_cfg[task]["features_h5path1"]
features_reader = ImageFeaturesH5Reader(feats_h5path, config, args.in_memory)
batch_size = task_cfg[task]["batch_size"]
num_workers = args.num_workers
if args.local_rank != -1:
batch_size = int(batch_size / dist.get_world_size())
num_workers = int(num_workers / dist.get_world_size())
logger.info("Loading %s Dataset with batch size %d" % (task_name, batch_size))
eval_split = args.split or task_cfg[task]["val_split"]
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
dset = FlickrVis4LangDataset(
task, task_cfg[task]["dataroot"], args.masking, eval_split, features_reader, None,
tokenizer, args.bert_model, max_seq_length=task_cfg[task]["max_seq_length"],
max_region_num=task_cfg[task]["max_region_num"], num_locs=config.num_locs,
threshold=args.overlap_threshold, add_global_imgfeat=config.add_global_imgfeat
)
dl = DataLoader(dset, shuffle=False, batch_size=batch_size, num_workers=num_workers, pin_memory=True)
# Model
config.visual_target_weights = {}
model = BertForVLPreTraining.from_pretrained(args.from_pretrained, config=config)
# Move to GPU(s)
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
)
model = DDP(model, delay_allreduce=True)
elif n_gpu > 1:
model = nn.DataParallel(model)
# Print summary
if default_gpu:
print("***** Running evaluation *****")
print(" Num Iters: ", len(dl))
print(" Batch size: ", batch_size)
# Evaluate
model.eval()
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
phrase_ids, image_ids, pred_tokens, true_tokens, pred_scores, lm_losses = [], [], [], [], [], []
for batch in tqdm(dl, total=len(dl)):
image_id = batch[-1]
batch = batch[:-1]
if device.type != 'cpu':
batch = tuple(t.cuda(device=device, non_blocking=True) for t in batch)
phrase_id, caption, input_mask, segment_ids, lm_label_ids, features, spatials, image_cls, \
obj_labels, obj_confs, attr_labels, attr_confs, image_attrs, image_mask, image_labels = batch
with torch.no_grad():
predictions_t, _, _, _, _ = model(
caption, features, spatials,
token_type_ids=segment_ids, attention_mask=input_mask, image_attention_mask=image_mask,
masked_lm_labels=None, image_label=None, image_cls=image_cls,
obj_labels=obj_labels, obj_confs=obj_confs, attr_labels=attr_labels,
attr_confs=attr_confs, image_attrs=image_attrs
)
# loss = masked_loss_t + masked_loss_v + pair_match_loss
target_ixs = [[] for _ in range(predictions_t.size(0))]
xs, ys = torch.where(lm_label_ids != -1)
for x, y in zip(xs, ys):
target_ixs[x].append(y.item())
for bix in range(predictions_t.size(0)):
pred_bix_tokens, true_bix_tokens, bix_predictions = [], [], []
for masked_ix in target_ixs[bix]:
predicted_index = torch.argmax(predictions_t[bix, masked_ix]).item()
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
label_token = tokenizer.convert_ids_to_tokens([lm_label_ids[bix, masked_ix].item()])[0]
pred_bix_tokens.append(predicted_token)
true_bix_tokens.append(label_token)
bix_predictions.append(predictions_t[bix, masked_ix].numpy())
masked_lm_loss = loss_fct(predictions_t[bix].view(-1, config.vocab_size), lm_label_ids[bix].view(-1),).unsqueeze(0).item()
if args.dump_results:
# pred_tokens.append(pred_bix_tokens)
# true_tokens.append(true_bix_tokens)
# pred_scores.append(bix_predictions)
# image_ids.append(image_id[bix].item())
# phrase_ids.append(phrase_id[bix].item())
lm_losses.append(masked_lm_loss)
if default_gpu:
print("MLM:", np.mean(np.array(lm_losses)))
if args.dump_results:
eval_path = os.path.join(savePath, eval_split)
masking_str = args.masking if args.masking != "ref" else args.masking+str(args.overlap_threshold)
# cPickle.dump(pred_tokens, open(eval_path + "_%s_preds.pkl" % masking_str, "wb"))
# cPickle.dump(true_tokens, open(eval_path + "_%s_truth.pkl" % masking_str, "wb"))
# cPickle.dump(pred_scores, open(eval_path + "_%s_score.pkl" % masking_str, "wb"))
# cPickle.dump(image_ids, open(eval_path + "_%s_imgids.pkl" % masking_str, "wb"))
# cPickle.dump(phrase_ids, open(eval_path + "_%s_phrids.pkl" % masking_str, "wb"))
cPickle.dump(lm_losses, open(eval_path + "_%s_mlm.pkl" % masking_str, "wb"))
if __name__ == "__main__":
main()
|
the-stack_0_7051 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Databricks hook.
This hook enable the submitting and running of jobs to the Databricks platform. Internally the
operators talk to the ``api/2.0/jobs/runs/submit``
`endpoint <https://docs.databricks.com/api/latest/jobs.html#runs-submit>`_.
"""
import copy
import sys
import time
from typing import Any, Dict, Optional, Tuple
from urllib.parse import urlparse
import requests
from requests import PreparedRequest, exceptions as requests_exceptions
from requests.auth import AuthBase, HTTPBasicAuth
from requests.exceptions import JSONDecodeError
from tenacity import RetryError, Retrying, retry_if_exception, stop_after_attempt, wait_exponential
from airflow import __version__
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
if sys.version_info >= (3, 8):
from functools import cached_property
else:
from cached_property import cached_property
USER_AGENT_HEADER = {'user-agent': f'airflow-{__version__}'}
# https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token#--get-an-azure-active-directory-access-token
# https://docs.microsoft.com/en-us/graph/deployments#app-registration-and-token-service-root-endpoints
AZURE_DEFAULT_AD_ENDPOINT = "https://login.microsoftonline.com"
AZURE_TOKEN_SERVICE_URL = "{}/{}/oauth2/token"
# https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token
AZURE_METADATA_SERVICE_TOKEN_URL = "http://169.254.169.254/metadata/identity/oauth2/token"
AZURE_METADATA_SERVICE_INSTANCE_URL = "http://169.254.169.254/metadata/instance"
TOKEN_REFRESH_LEAD_TIME = 120
AZURE_MANAGEMENT_ENDPOINT = "https://management.core.windows.net/"
DEFAULT_DATABRICKS_SCOPE = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d"
class BaseDatabricksHook(BaseHook):
"""
Base for interaction with Databricks.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:param retry_limit: The number of times to retry the connection in case of
service outages.
:param retry_delay: The number of seconds to wait between retries (it
might be a floating point number).
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
"""
conn_name_attr = 'databricks_conn_id'
default_conn_name = 'databricks_default'
conn_type = 'databricks'
extra_parameters = [
'token',
'host',
'use_azure_managed_identity',
'azure_ad_endpoint',
'azure_resource_id',
'azure_tenant_id',
]
def __init__(
self,
databricks_conn_id: str = default_conn_name,
timeout_seconds: int = 180,
retry_limit: int = 3,
retry_delay: float = 1.0,
retry_args: Optional[Dict[Any, Any]] = None,
) -> None:
super().__init__()
self.databricks_conn_id = databricks_conn_id
self.timeout_seconds = timeout_seconds
if retry_limit < 1:
raise ValueError('Retry limit must be greater than or equal to 1')
self.retry_limit = retry_limit
self.retry_delay = retry_delay
self.aad_tokens: Dict[str, dict] = {}
self.aad_timeout_seconds = 10
def my_after_func(retry_state):
self._log_request_error(retry_state.attempt_number, retry_state.outcome)
if retry_args:
self.retry_args = copy.copy(retry_args)
self.retry_args['retry'] = retry_if_exception(self._retryable_error)
self.retry_args['after'] = my_after_func
else:
self.retry_args = dict(
stop=stop_after_attempt(self.retry_limit),
wait=wait_exponential(min=self.retry_delay, max=(2**retry_limit)),
retry=retry_if_exception(self._retryable_error),
after=my_after_func,
)
@cached_property
def databricks_conn(self) -> Connection:
return self.get_connection(self.databricks_conn_id)
def get_conn(self) -> Connection:
return self.databricks_conn
@cached_property
def host(self) -> str:
if 'host' in self.databricks_conn.extra_dejson:
host = self._parse_host(self.databricks_conn.extra_dejson['host'])
else:
host = self._parse_host(self.databricks_conn.host)
return host
@staticmethod
def _parse_host(host: str) -> str:
"""
The purpose of this function is to be robust to improper connections
settings provided by users, specifically in the host field.
For example -- when users supply ``https://xx.cloud.databricks.com`` as the
host, we must strip out the protocol to get the host.::
h = DatabricksHook()
assert h._parse_host('https://xx.cloud.databricks.com') == \
'xx.cloud.databricks.com'
In the case where users supply the correct ``xx.cloud.databricks.com`` as the
host, this function is a no-op.::
assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com'
"""
urlparse_host = urlparse(host).hostname
if urlparse_host:
# In this case, host = https://xx.cloud.databricks.com
return urlparse_host
else:
# In this case, host = xx.cloud.databricks.com
return host
def _get_retry_object(self) -> Retrying:
"""
Instantiates a retry object
:return: instance of Retrying class
"""
return Retrying(**self.retry_args)
def _get_aad_token(self, resource: str) -> str:
"""
Function to get AAD token for given resource. Supports managed identity or service principal auth
:param resource: resource to issue token to
:return: AAD token, or raise an exception
"""
aad_token = self.aad_tokens.get(resource)
if aad_token and self._is_aad_token_valid(aad_token):
return aad_token['token']
self.log.info('Existing AAD token is expired, or going to expire soon. Refreshing...')
try:
for attempt in self._get_retry_object():
with attempt:
if self.databricks_conn.extra_dejson.get('use_azure_managed_identity', False):
params = {
"api-version": "2018-02-01",
"resource": resource,
}
resp = requests.get(
AZURE_METADATA_SERVICE_TOKEN_URL,
params=params,
headers={**USER_AGENT_HEADER, "Metadata": "true"},
timeout=self.aad_timeout_seconds,
)
else:
tenant_id = self.databricks_conn.extra_dejson['azure_tenant_id']
data = {
"grant_type": "client_credentials",
"client_id": self.databricks_conn.login,
"resource": resource,
"client_secret": self.databricks_conn.password,
}
azure_ad_endpoint = self.databricks_conn.extra_dejson.get(
"azure_ad_endpoint", AZURE_DEFAULT_AD_ENDPOINT
)
resp = requests.post(
AZURE_TOKEN_SERVICE_URL.format(azure_ad_endpoint, tenant_id),
data=data,
headers={
**USER_AGENT_HEADER,
'Content-Type': 'application/x-www-form-urlencoded',
},
timeout=self.aad_timeout_seconds,
)
resp.raise_for_status()
jsn = resp.json()
if (
'access_token' not in jsn
or jsn.get('token_type') != 'Bearer'
or 'expires_on' not in jsn
):
raise AirflowException(f"Can't get necessary data from AAD token: {jsn}")
token = jsn['access_token']
self.aad_tokens[resource] = {'token': token, 'expires_on': int(jsn["expires_on"])}
break
except RetryError:
raise AirflowException(f'API requests to Azure failed {self.retry_limit} times. Giving up.')
except requests_exceptions.HTTPError as e:
raise AirflowException(f'Response: {e.response.content}, Status Code: {e.response.status_code}')
return token
def _get_aad_headers(self) -> dict:
"""
Fills AAD headers if necessary (SPN is outside of the workspace)
:return: dictionary with filled AAD headers
"""
headers = {}
if 'azure_resource_id' in self.databricks_conn.extra_dejson:
mgmt_token = self._get_aad_token(AZURE_MANAGEMENT_ENDPOINT)
headers['X-Databricks-Azure-Workspace-Resource-Id'] = self.databricks_conn.extra_dejson[
'azure_resource_id'
]
headers['X-Databricks-Azure-SP-Management-Token'] = mgmt_token
return headers
@staticmethod
def _is_aad_token_valid(aad_token: dict) -> bool:
"""
Utility function to check AAD token hasn't expired yet
:param aad_token: dict with properties of AAD token
:return: true if token is valid, false otherwise
:rtype: bool
"""
now = int(time.time())
if aad_token['expires_on'] > (now + TOKEN_REFRESH_LEAD_TIME):
return True
return False
@staticmethod
def _check_azure_metadata_service() -> None:
"""
Check for Azure Metadata Service
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service
"""
try:
jsn = requests.get(
AZURE_METADATA_SERVICE_INSTANCE_URL,
params={"api-version": "2021-02-01"},
headers={"Metadata": "true"},
timeout=2,
).json()
if 'compute' not in jsn or 'azEnvironment' not in jsn['compute']:
raise AirflowException(
f"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}"
)
except (requests_exceptions.RequestException, ValueError) as e:
raise AirflowException(f"Can't reach Azure Metadata Service: {e}")
def _get_token(self, raise_error: bool = False) -> Optional[str]:
if 'token' in self.databricks_conn.extra_dejson:
self.log.info(
'Using token auth. For security reasons, please set token in Password field instead of extra'
)
return self.databricks_conn.extra_dejson['token']
elif not self.databricks_conn.login and self.databricks_conn.password:
self.log.info('Using token auth.')
return self.databricks_conn.password
elif 'azure_tenant_id' in self.databricks_conn.extra_dejson:
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
raise AirflowException("Azure SPN credentials aren't provided")
self.log.info('Using AAD Token for SPN.')
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
elif self.databricks_conn.extra_dejson.get('use_azure_managed_identity', False):
self.log.info('Using AAD Token for managed identity.')
self._check_azure_metadata_service()
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
elif raise_error:
raise AirflowException('Token authentication isn\'t configured')
return None
def _log_request_error(self, attempt_num: int, error: str) -> None:
self.log.error('Attempt %s API Request to Databricks failed with reason: %s', attempt_num, error)
def _do_api_call(self, endpoint_info: Tuple[str, str], json: Optional[Dict[str, Any]] = None):
"""
Utility function to perform an API call with retries
:param endpoint_info: Tuple of method and endpoint
:param json: Parameters for this API call.
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
:rtype: dict
"""
method, endpoint = endpoint_info
# TODO: get rid of explicit 'api/' in the endpoint specification
url = f'https://{self.host}/{endpoint}'
aad_headers = self._get_aad_headers()
headers = {**USER_AGENT_HEADER.copy(), **aad_headers}
auth: AuthBase
token = self._get_token()
if token:
auth = _TokenAuth(token)
else:
self.log.info('Using basic auth.')
auth = HTTPBasicAuth(self.databricks_conn.login, self.databricks_conn.password)
request_func: Any
if method == 'GET':
request_func = requests.get
elif method == 'POST':
request_func = requests.post
elif method == 'PATCH':
request_func = requests.patch
elif method == 'DELETE':
request_func = requests.delete
else:
raise AirflowException('Unexpected HTTP Method: ' + method)
try:
for attempt in self._get_retry_object():
with attempt:
response = request_func(
url,
json=json if method in ('POST', 'PATCH') else None,
params=json if method == 'GET' else None,
auth=auth,
headers=headers,
timeout=self.timeout_seconds,
)
response.raise_for_status()
return response.json()
except RetryError:
raise AirflowException(f'API requests to Databricks failed {self.retry_limit} times. Giving up.')
except requests_exceptions.HTTPError as e:
raise AirflowException(f'Response: {e.response.content}, Status Code: {e.response.status_code}')
@staticmethod
def _get_error_code(exception: BaseException) -> str:
if isinstance(exception, requests_exceptions.HTTPError):
try:
jsn = exception.response.json()
return jsn.get('error_code', '')
except JSONDecodeError:
pass
return ""
@staticmethod
def _retryable_error(exception: BaseException) -> bool:
if not isinstance(exception, requests_exceptions.RequestException):
return False
return isinstance(exception, (requests_exceptions.ConnectionError, requests_exceptions.Timeout)) or (
exception.response is not None
and (
exception.response.status_code >= 500
or exception.response.status_code == 429
or (
exception.response.status_code == 400
and BaseDatabricksHook._get_error_code(exception) == 'COULD_NOT_ACQUIRE_LOCK'
)
)
)
class _TokenAuth(AuthBase):
"""
Helper class for requests Auth field. AuthBase requires you to implement the __call__
magic function.
"""
def __init__(self, token: str) -> None:
self.token = token
def __call__(self, r: PreparedRequest) -> PreparedRequest:
r.headers['Authorization'] = 'Bearer ' + self.token
return r
|
the-stack_0_7052 | '''
python-lambda-local: Test Direct Invocations
(command-line and direct).
Meant for use with py.test.
Copyright 2015-2020 HENNGE K.K. (formerly known as HDE, Inc.)
Licensed under MIT
'''
import json
import argparse
from multiprocessing import Process
import os
from lambda_local.main import run as lambda_run
from lambda_local.main import call as lambda_call
from lambda_local.main import ERR_TYPE_EXCEPTION
from lambda_local.context import Context
def my_lambda_function(event, context):
print("Hello World from My Lambda Function!")
return 42
def my_failing_lambda_function(event, context):
raise Exception('Oh no')
def test_function_call_for_pytest():
(result, error_type) = lambda_call(
my_lambda_function, {}, Context(1))
assert error_type is None
assert result == 42
def test_handle_exceptions_gracefully():
(result, error_type) = lambda_call(
my_failing_lambda_function, {}, Context(1))
assert error_type is ERR_TYPE_EXCEPTION
def test_check_command_line():
request = json.dumps({})
request_file = 'check_command_line_event.json'
with open(request_file, "w") as f:
f.write(request)
args = argparse.Namespace(event=request_file,
file='tests/test_direct_invocations.py',
function='my_lambda_function',
timeout=1,
environment_variables='',
library=None,
version_name='',
arn_string=''
)
p = Process(target=lambda_run, args=(args,))
p.start()
p.join()
os.remove(request_file)
assert p.exitcode == 0
def test_check_command_line_error():
request = json.dumps({})
request_file = 'check_command_line_event.json'
with open(request_file, "w") as f:
f.write(request)
args = argparse.Namespace(event=request_file,
file='tests/test_direct_invocations.py',
function='my_failing_lambda_function',
timeout=1,
environment_variables='',
library=None,
version_name='',
arn_string=''
)
p = Process(target=lambda_run, args=(args,))
p.start()
p.join()
os.remove(request_file)
assert p.exitcode == 1
|
the-stack_0_7053 | import turtle as tt
from random import randint, sample
def draw():
size = randint(40, 300)
angles = (144, 150, 157.5, 160, 165)
angle = sample(angles, 1)[0]
colors = [
('#922B21', '#E6B0AA'), ('#76448A', '#D2B4DE'), ('#1F618D', '#AED6F1'), ('#515A5A', '#EAEDED'),
('#148F77', '#D1F2EB'), ('#B7950B', '#F7DC6F'), ('#F39C12', '#FDEBD0'), ('#BA4A00', '#F6DDCC')]
color = sample(colors, 1)[0]
tt.color(color[0], color[1])
x_pos = randint(-200, 200)
y_pos = randint(-200, 200)
tt.pu()
tt.setpos(x_pos, y_pos)
start_position = tt.pos()
tt.pd()
tt.begin_fill()
while True:
tt.forward(size)
tt.left(angle)
if abs(tt.pos() - start_position) < 1:
break
tt.end_fill()
tt.circle(100)
for i in range(3):
tt.pensize(i % 3)
draw()
tt.done()
|
the-stack_0_7059 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Tuple
import abc
from flax import linen as nn
from jax import numpy as jnp
from netket.utils.types import PyTree, PRNGKeyT
from netket.utils import struct
@struct.dataclass
class MetropolisRule(abc.ABC):
"""
Base class for transition rules of Metropolis, such as Local, Exchange, Hamiltonian
and several others.
"""
def init_state(
self,
sampler: "MetropolisSampler", # noqa: F821
machine: nn.Module,
params: PyTree,
key: PRNGKeyT,
) -> Optional[Any]:
"""
Initialises the optional internal state of the Metropolis sampler transition
rule.
The provided key is unique and does not need to be splitted.
It should return an immutable data structure.
Arguments:
sampler: The Metropolis sampler.
machine: A Flax module with the forward pass of the log-pdf.
params: The PyTree of parameters of the model.
key: A Jax PRNGKey.
Returns:
An optional state.
"""
return None
def reset(
self,
sampler: "MetropolisSampler", # noqa: F821
machine: nn.Module,
params: PyTree,
sampler_state: "SamplerState", # noqa: F821
) -> Optional[Any]:
"""
Resets the internal state of the Metropolis Sampler Transition Rule.
The default implementation returns the current rule_state without modifying it.
Arguments:
sampler: The Metropolis sampler.
machine: A Flax module with the forward pass of the log-pdf.
params: The PyTree of parameters of the model.
sampler_state: The current state of the sampler. Should not modify it.
Returns:
A resetted, state of the rule. This returns the same type of
:py:meth:`~nk.sampler.rule.MetropolisRule.rule_state` and might be `None`.
"""
return sampler_state.rule_state
@abc.abstractmethod
def transition(
self,
sampler: "MetropolisSampler", # noqa: F821
machine: nn.Module,
params: PyTree,
sampler_state: "SamplerState", # noqa: F821
key: PRNGKeyT,
σ: jnp.ndarray,
) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:
r"""
Proposes a new configuration set of configurations $\sigma'$ starting from the current
chain configurations :math:`\sigma`.
The new configurations :math:`\sigma'` should be a matrix with the same dimension as
:math:`\sigma`.
This function should return a tuple. where the first element are the new configurations
$\sigma'$ and the second element is either `None` or an array of length `σ.shape[0]`
containing an optional log-correction factor. The correction factor should be non-zero
when the transition rule is non-symmetrical.
Arguments:
sampler: The Metropolis sampler.
machine: A Flax module with the forward pass of the log-pdf.
params: The PyTree of parameters of the model.
sampler_state: The current state of the sampler. Should not modify it.
key: A Jax PRNGKey to use to generate new random configurations.
σ: The current configurations stored in a 2D matrix.
Returns:
A tuple containing the new configurations :math:`\sigma'` and the optional vector of
log corrections to the transition probability.
"""
pass
def random_state(
self,
sampler: "MetropolisSampler", # noqa: F821
machine: nn.Module,
params: PyTree,
sampler_state: "SamplerState", # noqa: F821
key: PRNGKeyT,
):
"""
Generates a random state compatible with this rule.
By default this calls :func:`netket.hilbert.random.random_state`.
Arguments:
sampler: The Metropolis sampler.
machine: A Flax module with the forward pass of the log-pdf.
params: The PyTree of parameters of the model.
sampler_state: The current state of the sampler. Should not modify it.
key: The PRNGKey to use to generate the random state.
"""
return sampler.hilbert.random_state(
key, size=sampler.n_batches, dtype=sampler.dtype
)
|
the-stack_0_7064 | """
Internal subroutines for e.g. aborting execution with an error message,
or performing indenting on multiline output.
"""
import os
import six
import sys
import struct
import textwrap
from traceback import format_exc
def _encode(msg, stream):
if six.PY2 and isinstance(msg, six.text_type) \
and hasattr(stream, 'encoding') and stream.encoding is not None:
return msg.encode(stream.encoding)
else:
return str(msg)
def isatty(stream):
"""Check if a stream is a tty.
Not all file-like objects implement the `isatty` method.
"""
fn = getattr(stream, 'isatty', None)
if fn is None:
return False
return fn()
def abort(msg):
"""
Abort execution, print ``msg`` to stderr and exit with error status (1.)
This function currently makes use of `SystemExit`_ in a manner that is
similar to `sys.exit`_ (but which skips the automatic printing to stderr,
allowing us to more tightly control it via settings).
Therefore, it's possible to detect and recover from inner calls to `abort`
by using ``except SystemExit`` or similar.
.. _sys.exit: http://docs.python.org/library/sys.html#sys.exit
.. _SystemExit: http://docs.python.org/library/exceptions.html#exceptions.SystemExit
"""
from fabric.state import output, env
if not env.colorize_errors:
red = lambda x: x # noqa: E731
else:
from fabric.colors import red
if output.aborts:
sys.stderr.write(red("\nFatal error: %s\n" % _encode(msg, sys.stderr)))
sys.stderr.write(red("\nAborting.\n"))
if env.abort_exception:
raise env.abort_exception(msg)
else:
# See issue #1318 for details on the below; it lets us construct a
# valid, useful SystemExit while sidestepping the automatic stderr
# print (which would otherwise duplicate with the above in a
# non-controllable fashion).
e = SystemExit(1)
e.message = msg
raise e
def warn(msg):
"""
Print warning message, but do not abort execution.
This function honors Fabric's :doc:`output controls
<../../usage/output_controls>` and will print the given ``msg`` to stderr,
provided that the ``warnings`` output level (which is active by default) is
turned on.
"""
from fabric.state import output, env
if not env.colorize_errors:
magenta = lambda x: x # noqa: E731
else:
from fabric.colors import magenta
if output.warnings:
msg = _encode(msg, sys.stderr)
sys.stderr.write(magenta("\nWarning: %s\n\n" % msg))
def indent(text, spaces=4, strip=False):
"""
Return ``text`` indented by the given number of spaces.
If text is not a string, it is assumed to be a list of lines and will be
joined by ``\\n`` prior to indenting.
When ``strip`` is ``True``, a minimum amount of whitespace is removed from
the left-hand side of the given string (so that relative indents are
preserved, but otherwise things are left-stripped). This allows you to
effectively "normalize" any previous indentation for some inputs.
"""
# Normalize list of strings into a string for dedenting. "list" here means
# "not a string" meaning "doesn't have splitlines". Meh.
if not hasattr(text, 'splitlines'):
text = '\n'.join(text)
# Dedent if requested
if strip:
text = textwrap.dedent(text)
prefix = ' ' * spaces
output = '\n'.join(prefix + line for line in text.splitlines())
# Strip out empty lines before/aft
output = output.strip()
# Reintroduce first indent (which just got stripped out)
output = prefix + output
return output
def puts(text, show_prefix=None, end="\n", flush=False):
"""
An alias for ``print`` whose output is managed by Fabric's output controls.
In other words, this function simply prints to ``sys.stdout``, but will
hide its output if the ``user`` :doc:`output level
</usage/output_controls>` is set to ``False``.
If ``show_prefix=False``, `puts` will omit the leading ``[hostname]``
which it tacks on by default. (It will also omit this prefix if
``env.host_string`` is empty.)
Newlines may be disabled by setting ``end`` to the empty string (``''``).
(This intentionally mirrors Python 3's ``print`` syntax.)
You may force output flushing (e.g. to bypass output buffering) by setting
``flush=True``.
.. seealso:: `~fabric.utils.fastprint`
"""
from fabric.state import output, env
if show_prefix is None:
show_prefix = env.output_prefix
if output.user:
prefix = ""
if env.host_string and show_prefix:
prefix = "[%s] " % env.host_string
sys.stdout.write(prefix + _encode(text, sys.stdout) + end)
if flush:
sys.stdout.flush()
def fastprint(text, show_prefix=False, end="", flush=True):
"""
Print ``text`` immediately, without any prefix or line ending.
This function is simply an alias of `~fabric.utils.puts` with different
default argument values, such that the ``text`` is printed without any
embellishment and immediately flushed.
It is useful for any situation where you wish to print text which might
otherwise get buffered by Python's output buffering (such as within a
processor intensive ``for`` loop). Since such use cases typically also
require a lack of line endings (such as printing a series of dots to
signify progress) it also omits the traditional newline by default.
.. note::
Since `~fabric.utils.fastprint` calls `~fabric.utils.puts`, it is
likewise subject to the ``user`` :doc:`output level
</usage/output_controls>`.
.. seealso:: `~fabric.utils.puts`
"""
return puts(text=text, show_prefix=show_prefix, end=end, flush=flush)
def handle_prompt_abort(prompt_for):
import fabric.state
reason = "Needed to prompt for %s (host: %s), but %%s" % (
prompt_for, fabric.state.env.host_string
)
# Explicit "don't prompt me bro"
if fabric.state.env.abort_on_prompts:
abort(reason % "abort-on-prompts was set to True")
# Implicit "parallel == stdin/prompts have ambiguous target"
if fabric.state.env.parallel:
abort(reason % "input would be ambiguous in parallel mode")
class _AttributeDict(dict):
"""
Dictionary subclass enabling attribute lookup/assignment of keys/values.
For example::
>>> m = _AttributeDict({'foo': 'bar'})
>>> m.foo
'bar'
>>> m.foo = 'not bar'
>>> m['foo']
'not bar'
``_AttributeDict`` objects also provide ``.first()`` which acts like
``.get()`` but accepts multiple keys as arguments, and returns the value of
the first hit, e.g.::
>>> m = _AttributeDict({'foo': 'bar', 'biz': 'baz'})
>>> m.first('wrong', 'incorrect', 'foo', 'biz')
'bar'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError:
# to conform with __getattr__ spec
raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def first(self, *names):
for name in names:
value = self.get(name)
if value:
return value
class _AliasDict(_AttributeDict):
"""
`_AttributeDict` subclass that allows for "aliasing" of keys to other keys.
Upon creation, takes an ``aliases`` mapping, which should map alias names
to lists of key names. Aliases do not store their own value, but instead
set (override) all mapped keys' values. For example, in the following
`_AliasDict`, calling ``mydict['foo'] = True`` will set the values of
``mydict['bar']``, ``mydict['biz']`` and ``mydict['baz']`` all to True::
mydict = _AliasDict(
{'biz': True, 'baz': False},
aliases={'foo': ['bar', 'biz', 'baz']}
)
Because it is possible for the aliased values to be in a heterogenous
state, reading aliases is not supported -- only writing to them is allowed.
This also means they will not show up in e.g. ``dict.keys()``.
.. note::
Aliases are recursive, so you may refer to an alias within the key list
of another alias. Naturally, this means that you can end up with
infinite loops if you're not careful.
`_AliasDict` provides a special function, `expand_aliases`, which will take
a list of keys as an argument and will return that list of keys with any
aliases expanded. This function will **not** dedupe, so any aliases which
overlap will result in duplicate keys in the resulting list.
"""
def __init__(self, arg=None, aliases=None):
init = super(_AliasDict, self).__init__
if arg is not None:
init(arg)
else:
init()
# Can't use super() here because of _AttributeDict's setattr override
dict.__setattr__(self, 'aliases', aliases)
def __setitem__(self, key, value):
# Attr test required to not blow up when deepcopy'd
if hasattr(self, 'aliases') and key in self.aliases:
for aliased in self.aliases[key]:
self[aliased] = value
else:
return super(_AliasDict, self).__setitem__(key, value)
def expand_aliases(self, keys):
ret = []
for key in keys:
if key in self.aliases:
ret.extend(self.expand_aliases(self.aliases[key]))
else:
ret.append(key)
return ret
def _pty_size():
"""
Obtain (rows, cols) tuple for sizing a pty on the remote end.
Defaults to 80x24 (which is also the 'ssh' lib's default) but will detect
local (stdout-based) terminal window size on non-Windows platforms.
"""
win32 = (sys.platform == 'win32')
default_rows, default_cols = 24, 80
rows, cols = default_rows, default_cols
if not win32 and isatty(sys.stdout):
import fcntl
import termios
# We want two short unsigned integers (rows, cols)
fmt = 'HH'
# Create an empty (zeroed) buffer for ioctl to map onto. Yay for C!
buffer = struct.pack(fmt, 0, 0)
# Call TIOCGWINSZ to get window size of stdout, returns our filled
# buffer
try:
result = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ,
buffer)
# Unpack buffer back into Python data types
rows, cols = struct.unpack(fmt, result)
# Fall back to defaults if TIOCGWINSZ returns unreasonable values
if rows == 0:
rows = default_rows
if cols == 0:
cols = default_cols
# Deal with e.g. sys.stdout being monkeypatched, such as in testing.
# Or termios not having a TIOCGWINSZ.
except AttributeError:
pass
return rows, cols
def error(message, func=None, exception=None, stdout=None, stderr=None):
"""
Call ``func`` with given error ``message``.
If ``func`` is None (the default), the value of ``env.warn_only``
determines whether to call ``abort`` or ``warn``.
If ``exception`` is given, it is inspected to get a string message, which
is printed alongside the user-generated ``message``.
If ``stdout`` and/or ``stderr`` are given, they are assumed to be strings
to be printed.
"""
import fabric.state
if func is None:
func = fabric.state.env.warn_only and warn or abort
# If exception printing is on, append a traceback to the message
if fabric.state.output.exceptions or fabric.state.output.debug:
exception_message = format_exc()
if exception_message:
message += "\n\n" + exception_message
# Otherwise, if we were given an exception, append its contents.
elif exception is not None:
# Figure out how to get a string out of the exception; EnvironmentError
# subclasses, for example, "are" integers and .strerror is the string.
# Others "are" strings themselves. May have to expand this further for
# other error types.
if hasattr(exception, 'strerror') and exception.strerror is not None:
underlying = exception.strerror
else:
underlying = exception
message += "\n\nUnderlying exception:\n" + indent(str(underlying))
if func is abort:
if stdout and not fabric.state.output.stdout:
message += _format_error_output("Standard output", stdout)
if stderr and not fabric.state.output.stderr:
message += _format_error_output("Standard error", stderr)
return func(message)
def _format_error_output(header, body):
term_width = _pty_size()[1]
header_side_length = int((term_width - (len(header) + 2)) / 2)
mark = "="
side = mark * header_side_length
return "\n\n%s %s %s\n\n%s\n\n%s" % (
side, header, side, body, mark * term_width
)
def apply_lcwd(path, env):
# Apply CWD if a relative path
if not os.path.isabs(path) and env.lcwd:
path = os.path.join(env.lcwd, path)
return path
|
the-stack_0_7065 | from programs.schema.attributes.abstractattribute import AbstractAttribute
from constants import CC
class HHRaceAttr(AbstractAttribute):
@staticmethod
def getName():
return CC.ATTR_HHRACE
@staticmethod
def getLevels():
return {
'white' : [0],
'black' : [1],
'aian' : [2],
'asian' : [3],
'nhopi' : [4],
'sor' : [5],
'two or more': [6]
}
@staticmethod
def recodeWhiteAlone():
name = CC.HHRACE_WHITEALONE
groupings = {
"White alone": [0]
}
return name, groupings
|
the-stack_0_7068 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urlparse
from oslo.config import cfg
import routes as routes_mapper
import webob
import webob.dec
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron import manager
from neutron.openstack.common import log as logging
from neutron import wsgi
LOG = logging.getLogger(__name__)
RESOURCES = {'network': 'networks',
'subnet': 'subnets',
'port': 'ports'}
SUB_RESOURCES = {}
COLLECTION_ACTIONS = ['index', 'create']
MEMBER_ACTIONS = ['show', 'update', 'delete']
REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'xml|json'}
class Index(wsgi.Application):
def __init__(self, resources):
self.resources = resources
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
metadata = {'application/xml': {'attributes': {
'resource': ['name', 'collection'],
'link': ['href', 'rel']}}}
layout = []
for name, collection in self.resources.iteritems():
href = urlparse.urljoin(req.path_url, collection)
resource = {'name': name,
'collection': collection,
'links': [{'rel': 'self',
'href': href}]}
layout.append(resource)
response = dict(resources=layout)
content_type = req.best_match_content_type()
body = wsgi.Serializer(metadata=metadata).serialize(response,
content_type)
return webob.Response(body=body, content_type=content_type)
class APIRouter(wsgi.Router):
@classmethod
def factory(cls, global_config, **local_config):
return cls(**local_config)
def __init__(self, **local_config):
mapper = routes_mapper.Mapper()
plugin = manager.NeutronManager.get_plugin()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP)
col_kwargs = dict(collection_actions=COLLECTION_ACTIONS,
member_actions=MEMBER_ACTIONS)
def _map_resource(collection, resource, params, parent=None):
allow_bulk = cfg.CONF.allow_bulk
allow_pagination = cfg.CONF.allow_pagination
allow_sorting = cfg.CONF.allow_sorting
controller = base.create_resource(
collection, resource, plugin, params, allow_bulk=allow_bulk,
parent=parent, allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
path_prefix = None
if parent:
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection)
mapper_kwargs = dict(controller=controller,
requirements=REQUIREMENTS,
path_prefix=path_prefix,
**col_kwargs)
return mapper.collection(collection, resource,
**mapper_kwargs)
mapper.connect('index', '/', controller=Index(RESOURCES))
for resource in RESOURCES:
_map_resource(RESOURCES[resource], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
RESOURCES[resource], dict()))
for resource in SUB_RESOURCES:
_map_resource(SUB_RESOURCES[resource]['collection_name'], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
SUB_RESOURCES[resource]['collection_name'],
dict()),
SUB_RESOURCES[resource]['parent'])
super(APIRouter, self).__init__(mapper)
|
the-stack_0_7070 | __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
import itertools
import operator
import warnings
from . import numeric as _nx
from . import overrides
from .multiarray import array, asanyarray, normalize_axis_index
from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _atleast_1d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_2d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_3d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
warnings.warn('arrays to stack must be passed as a "sequence" type '
'such as list or tuple. Support for non-sequence '
'iterables such as generators is deprecated as of '
'NumPy 1.16 and will raise an error in the future.',
FutureWarning, stacklevel=stacklevel)
return ()
return arrays
def _vhstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.vstack((a,b))
array([[1, 2, 3],
[4, 5, 6]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[4], [5], [6]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[4],
[5],
[6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
return _nx.concatenate(arrs, 0)
@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((4,5,6))
>>> np.hstack((a,b))
array([1, 2, 3, 4, 5, 6])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[4],[5],[6]])
>>> np.hstack((a,b))
array([[1, 4],
[2, 5],
[3, 6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
block : Assemble an nd-array from nested lists of blocks.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.stack((a, b))
array([[1, 2, 3],
[4, 5, 6]])
>>> np.stack((a, b), axis=-1)
array([[1, 4],
[2, 5],
[3, 6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
"""
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
return 'arrays' + idx_str
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
_block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at depth "
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
_block_format_index(index)
)
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
# Ensures `a` has at least `ndim` dimensions by prepending
# ones to `a.shape` as necessary
return array(a, ndmin=ndim, copy=False, subok=True)
def _accumulate(values):
return list(itertools.accumulate(values))
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatenation.
Returns
-------
shape: tuple of int
This tuple satisfies::
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds::
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis+1:]
if any(shape[:axis] != first_shape_pre or
shape[axis+1:] != first_shape_post for shape in shapes):
raise ValueError(
'Mismatched array shapes in block along axis {}.'.format(axis))
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim : int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
def _block(arrays, max_depth, result_ndim, depth=0):
"""
Internal implementation of block based on repeated concatenation.
`arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
yield from _block_dispatcher(subarrays)
else:
yield arrays
@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated (see `concatenate`) along
the last dimension (-1), then these are concatenated along the
second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``np.block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
.. versionadded:: 1.13.0
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Notes
-----
When called with only scalars, ``np.block`` is equivalent to an ndarray
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
``np.array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
Examples
--------
The most common use of this function is to build a block matrix
>>> A = np.eye(2) * 2
>>> B = np.eye(3) * 3
>>> np.block([
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[1., 1., 3., 0., 0.],
[1., 1., 0., 3., 0.],
[1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
array([1, 2, 3])
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
array([ 1, 2, 3, 4, 5, 6, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([A, B]) # hstack([A, B])
array([[1, 1, 2, 2],
[1, 1, 2, 2]])
With a list of depth 2, `block` can be used in place of `vstack`:
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.block([[a], [b]]) # vstack([a, b])
array([[1, 2, 3],
[4, 5, 6]])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([[A], [B]]) # vstack([A, B])
array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
It can also be used in places of `atleast_1d` and `atleast_2d`
>>> a = np.array(0)
>>> b = np.array([1])
>>> np.block([a]) # atleast_1d(a)
array([0])
>>> np.block([b]) # atleast_1d(b)
array([1])
>>> np.block([[a]]) # atleast_2d(a)
array([[0]])
>>> np.block([[b]]) # atleast_2d(b)
array([[1]])
"""
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
# It was found through benchmarking that making an array of final size
# around 256x256 was faster by straight concatenation on a
# i7-7700HQ processor and dual channel ram 2400MHz.
# It didn't seem to matter heavily on the dtype used.
#
# A 2D array using repeated concatenation requires 2 copies of the array.
#
# The fastest algorithm will depend on the ratio of CPU power to memory
# speed.
# One can monitor the results of the benchmark
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
# to tune this parameter until a C version of the `_block_info_recursion`
# algorithm is implemented which would likely be faster than the python
# version.
if list_ndim * final_size > (2 * 512 * 512):
return _block_slicing(arrays, list_ndim, result_ndim)
else:
return _block_concatenate(arrays, list_ndim, result_ndim)
# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
Returns
(`arrays`, list_ndim, result_ndim, final_size)
"""
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
'List at {} cannot be empty'.format(
_block_format_index(bottom_index)
)
)
result_ndim = max(arr_ndim, list_ndim)
return arrays, list_ndim, result_ndim, final_size
def _block_slicing(arrays, list_ndim, result_ndim):
shape, slices, arrays = _block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
# Test preferring F only in the case that all input arrays are F
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = _nx.empty(shape=shape, dtype=dtype, order=order)
# Note: In a c implementation, the function
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
# guessing of the desired order.
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
def _block_concatenate(arrays, list_ndim, result_ndim):
result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
# This might copy scalars or lists twice, but this isn't a likely
# usecase for those interested in performance
result = result.copy()
return result
|
the-stack_0_7071 | # coding: utf-8
"""
ThingsBoard REST API
ThingsBoard open-source IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3-SNAPSHOT
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from tb_rest_client.models.models_ce import EventFilter
class LifeCycleEventFilter(EventFilter):
"""
Do not edit the class manually.
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'event_type': 'str',
'server': 'str',
'event': 'str',
'status': 'str',
'error_str': 'str'
}
if hasattr(EventFilter, "swagger_types"):
swagger_types.update(EventFilter.swagger_types)
attribute_map = {
'event_type': 'eventType',
'server': 'server',
'event': 'event',
'status': 'status',
'error_str': 'errorStr'
}
if hasattr(EventFilter, "attribute_map"):
attribute_map.update(EventFilter.attribute_map)
def __init__(self, event_type=None, server=None, event=None, status=None, error_str=None, *args, **kwargs): # noqa: E501
"""LifeCycleEventFilter - a model defined in Swagger""" # noqa: E501
self._event_type = None
self._server = None
self._event = None
self._status = None
self._error_str = None
self.discriminator = None
self.event_type = event_type
if server is not None:
self.server = server
if event is not None:
self.event = event
if status is not None:
self.status = status
if error_str is not None:
self.error_str = error_str
EventFilter.__init__(self, *args, **kwargs)
@property
def event_type(self):
"""Gets the event_type of this LifeCycleEventFilter. # noqa: E501
String value representing the event type # noqa: E501
:return: The event_type of this LifeCycleEventFilter. # noqa: E501
:rtype: str
"""
return self._event_type
@event_type.setter
def event_type(self, event_type):
"""Sets the event_type of this LifeCycleEventFilter.
String value representing the event type # noqa: E501
:param event_type: The event_type of this LifeCycleEventFilter. # noqa: E501
:type: str
"""
if event_type is None:
raise ValueError("Invalid value for `event_type`, must not be `None`") # noqa: E501
allowed_values = ["DEBUG_RULE_CHAIN", "DEBUG_RULE_NODE", "ERROR", "LC_EVENT", "STATS"] # noqa: E501
if event_type not in allowed_values:
raise ValueError(
"Invalid value for `event_type` ({0}), must be one of {1}" # noqa: E501
.format(event_type, allowed_values)
)
self._event_type = event_type
@property
def server(self):
"""Gets the server of this LifeCycleEventFilter. # noqa: E501
String value representing the server name, identifier or ip address where the platform is running # noqa: E501
:return: The server of this LifeCycleEventFilter. # noqa: E501
:rtype: str
"""
return self._server
@server.setter
def server(self, server):
"""Sets the server of this LifeCycleEventFilter.
String value representing the server name, identifier or ip address where the platform is running # noqa: E501
:param server: The server of this LifeCycleEventFilter. # noqa: E501
:type: str
"""
self._server = server
@property
def event(self):
"""Gets the event of this LifeCycleEventFilter. # noqa: E501
String value representing the lifecycle event type # noqa: E501
:return: The event of this LifeCycleEventFilter. # noqa: E501
:rtype: str
"""
return self._event
@event.setter
def event(self, event):
"""Sets the event of this LifeCycleEventFilter.
String value representing the lifecycle event type # noqa: E501
:param event: The event of this LifeCycleEventFilter. # noqa: E501
:type: str
"""
self._event = event
@property
def status(self):
"""Gets the status of this LifeCycleEventFilter. # noqa: E501
String value representing status of the lifecycle event # noqa: E501
:return: The status of this LifeCycleEventFilter. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this LifeCycleEventFilter.
String value representing status of the lifecycle event # noqa: E501
:param status: The status of this LifeCycleEventFilter. # noqa: E501
:type: str
"""
allowed_values = ["Failure", "Success"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def error_str(self):
"""Gets the error_str of this LifeCycleEventFilter. # noqa: E501
The case insensitive 'contains' filter based on error message # noqa: E501
:return: The error_str of this LifeCycleEventFilter. # noqa: E501
:rtype: str
"""
return self._error_str
@error_str.setter
def error_str(self, error_str):
"""Sets the error_str of this LifeCycleEventFilter.
The case insensitive 'contains' filter based on error message # noqa: E501
:param error_str: The error_str of this LifeCycleEventFilter. # noqa: E501
:type: str
"""
self._error_str = error_str
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LifeCycleEventFilter, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LifeCycleEventFilter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_7073 | """runpy.py - locating and running Python code using the module namespace
Provides support for locating and running Python scripts using the Python
module namespace instead of the native filesystem.
This allows Python code to play nicely with non-filesystem based PEP 302
importers when locating support scripts as well as when importing modules.
"""
# Written by Nick Coghlan <ncoghlan at gmail.com>
# to implement PEP 338 (Executing Modules as Scripts)
import sys
import importlib.machinery # importlib first so we can test #15386 via -m
import importlib.util
import io
import types
from pkgutil import read_code, get_importer
__all__ = [
"run_module", "run_path",
]
class _TempModule(object):
"""Temporarily replace a module in sys.modules with an empty namespace"""
def __init__(self, mod_name):
self.mod_name = mod_name
self.module = types.ModuleType(mod_name)
self._saved_module = []
def __enter__(self):
mod_name = self.mod_name
try:
self._saved_module.append(sys.modules[mod_name])
except KeyError:
pass
sys.modules[mod_name] = self.module
return self
def __exit__(self, *args):
if self._saved_module:
sys.modules[self.mod_name] = self._saved_module[0]
else:
del sys.modules[self.mod_name]
self._saved_module = []
class _ModifiedArgv0(object):
def __init__(self, value):
self.value = value
self._saved_value = self._sentinel = object()
def __enter__(self):
if self._saved_value is not self._sentinel:
raise RuntimeError("Already preserving saved value")
self._saved_value = sys.argv[0]
sys.argv[0] = self.value
def __exit__(self, *args):
self.value = self._sentinel
sys.argv[0] = self._saved_value
# TODO: Replace these helpers with importlib._bootstrap_external functions.
def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_spec=None,
pkg_name=None, script_name=None):
"""Helper to run code in nominated namespace"""
if init_globals is not None:
run_globals.update(init_globals)
if mod_spec is None:
loader = None
fname = script_name
cached = None
else:
loader = mod_spec.loader
fname = mod_spec.origin
cached = mod_spec.cached
if pkg_name is None:
pkg_name = mod_spec.parent
run_globals.update(__name__ = mod_name,
__file__ = fname,
__cached__ = cached,
__doc__ = None,
__loader__ = loader,
__package__ = pkg_name,
__spec__ = mod_spec)
exec(code, run_globals)
return run_globals
def _run_module_code(code, init_globals=None,
mod_name=None, mod_spec=None,
pkg_name=None, script_name=None):
"""Helper to run code in new namespace with sys modified"""
fname = script_name if mod_spec is None else mod_spec.origin
with _TempModule(mod_name) as temp_module, _ModifiedArgv0(fname):
mod_globals = temp_module.module.__dict__
_run_code(code, mod_globals, init_globals,
mod_name, mod_spec, pkg_name, script_name)
# Copy the globals of the temporary module, as they
# may be cleared when the temporary module goes away
return mod_globals.copy()
# Helper to get the full name, spec and code for a module
def _get_module_details(mod_name, error=ImportError):
if mod_name.startswith("."):
raise error("Relative module names not supported")
pkg_name, _, _ = mod_name.rpartition(".")
if pkg_name:
# Try importing the parent to avoid catching initialization errors
try:
__import__(pkg_name)
except ImportError as e:
# If the parent or higher ancestor package is missing, let the
# error be raised by find_spec() below and then be caught. But do
# not allow other errors to be caught.
if e.name is None or (e.name != pkg_name and
not pkg_name.startswith(e.name + ".")):
raise
# Warn if the module has already been imported under its normal name
existing = sys.modules.get(mod_name)
if existing is not None and not hasattr(existing, "__path__"):
from warnings import warn
msg = "{mod_name!r} found in sys.modules after import of " \
"package {pkg_name!r}, but prior to execution of " \
"{mod_name!r}; this may result in unpredictable " \
"behaviour".format(mod_name=mod_name, pkg_name=pkg_name)
warn(RuntimeWarning(msg))
try:
spec = importlib.util.find_spec(mod_name)
except (ImportError, AttributeError, TypeError, ValueError) as ex:
# This hack fixes an impedance mismatch between pkgutil and
# importlib, where the latter raises other errors for cases where
# pkgutil previously raised ImportError
msg = "Error while finding module specification for {!r} ({}: {})"
raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex
if spec is None:
raise error("No module named %s" % mod_name)
if spec.submodule_search_locations is not None:
if mod_name == "__main__" or mod_name.endswith(".__main__"):
raise error("Cannot use package as __main__ module")
try:
pkg_main_name = mod_name + ".__main__"
return _get_module_details(pkg_main_name, error)
except error as e:
if mod_name not in sys.modules:
raise # No module loaded; being a package is irrelevant
raise error(("%s; %r is a package and cannot " +
"be directly executed") %(e, mod_name))
loader = spec.loader
if loader is None:
raise error("%r is a namespace package and cannot be executed"
% mod_name)
try:
code = loader.get_code(mod_name)
except ImportError as e:
raise error(format(e)) from e
if code is None:
raise error("No code object available for %s" % mod_name)
return mod_name, spec, code
class _Error(Exception):
"""Error that _run_module_as_main() should report without a traceback"""
# XXX ncoghlan: Should this be documented and made public?
# (Current thoughts: don't repeat the mistake that lead to its
# creation when run_module() no longer met the needs of
# mainmodule.c, but couldn't be changed because it was public)
def _run_module_as_main(mod_name, alter_argv=True):
"""Runs the designated module in the __main__ namespace
Note that the executed module will have full access to the
__main__ namespace. If this is not desirable, the run_module()
function should be used to run the module code in a fresh namespace.
At the very least, these variables in __main__ will be overwritten:
__name__
__file__
__cached__
__loader__
__package__
"""
try:
if alter_argv or mod_name != "__main__": # i.e. -m switch
mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
else: # i.e. directory or zipfile execution
mod_name, mod_spec, code = _get_main_module_details(_Error)
except _Error as exc:
msg = "%s: %s" % (sys.executable, exc)
sys.exit(msg)
main_globals = sys.modules["__main__"].__dict__
if alter_argv:
sys.argv[0] = mod_spec.origin
return _run_code(code, main_globals, None,
"__main__", mod_spec)
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
"""Execute a module's code without importing it
Returns the resulting top level namespace dictionary
"""
mod_name, mod_spec, code = _get_module_details(mod_name)
if run_name is None:
run_name = mod_name
if alter_sys:
return _run_module_code(code, init_globals, run_name, mod_spec)
else:
# Leave the sys module alone
return _run_code(code, {}, init_globals, run_name, mod_spec)
def _get_main_module_details(error=ImportError):
# Helper that gives a nicer error message when attempting to
# execute a zipfile or directory by invoking __main__.py
# Also moves the standard __main__ out of the way so that the
# preexisting __loader__ entry doesn't cause issues
main_name = "__main__"
saved_main = sys.modules[main_name]
del sys.modules[main_name]
try:
return _get_module_details(main_name)
except ImportError as exc:
if main_name in str(exc):
raise error("can't find %r module in %r" %
(main_name, sys.path[0])) from exc
raise
finally:
sys.modules[main_name] = saved_main
def _get_code_from_file(run_name, fname):
# Check for a compiled file first
with io.open_code(fname) as f:
code = read_code(f)
if code is None:
# That didn't work, so try it as normal source code
with io.open_code(fname) as f:
code = compile(f.read(), fname, 'exec')
return code, fname
def run_path(path_name, init_globals=None, run_name=None):
"""Execute code located at the specified filesystem location
Returns the resulting top level namespace dictionary
The file path may refer directly to a Python script (i.e.
one that could be directly executed with execfile) or else
it may refer to a zipfile or directory containing a top
level __main__.py script.
"""
if run_name is None:
run_name = "<run_path>"
pkg_name = run_name.rpartition(".")[0]
importer = get_importer(path_name)
# Trying to avoid importing imp so as to not consume the deprecation warning.
is_NullImporter = False
if type(importer).__module__ == 'imp':
if type(importer).__name__ == 'NullImporter':
is_NullImporter = True
if isinstance(importer, type(None)) or is_NullImporter:
# Not a valid sys.path entry, so run the code directly
# execfile() doesn't help as we want to allow compiled files
code, fname = _get_code_from_file(run_name, path_name)
return _run_module_code(code, init_globals, run_name,
pkg_name=pkg_name, script_name=fname)
else:
# Finder is defined for path, so add it to
# the start of sys.path
sys.path.insert(0, path_name)
try:
# Here's where things are a little different from the run_module
# case. There, we only had to replace the module in sys while the
# code was running and doing so was somewhat optional. Here, we
# have no choice and we have to remove it even while we read the
# code. If we don't do this, a __loader__ attribute in the
# existing __main__ module may prevent location of the new module.
mod_name, mod_spec, code = _get_main_module_details()
with _TempModule(run_name) as temp_module, \
_ModifiedArgv0(path_name):
mod_globals = temp_module.module.__dict__
return _run_code(code, mod_globals, init_globals,
run_name, mod_spec, pkg_name).copy()
finally:
try:
sys.path.remove(path_name)
except ValueError:
pass
if __name__ == "__main__":
# Run the module specified as the next command line argument
if len(sys.argv) < 2:
print("No module specified for execution", file=sys.stderr)
else:
del sys.argv[0] # Make the requested module sys.argv[0]
_run_module_as_main(sys.argv[0])
|
the-stack_0_7074 | import os
import io
import sys
import csv
import random
import hashlib
import pandas as pd
import numpy as np
import tensorflow as tf
from PIL import Image
import xml.etree.ElementTree as ET
from matplotlib import pyplot as plt
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
sys.path.append("/home/tensorflow/models/research/object_detection/")
from object_detection.utils import ops as utils_ops
from object_detection.utils import dataset_util
def save_img_as_jpg(input_record, path_to_test_img_folder):
"""
Used to make sure that generating record files from images/annotations worked
"""
record_iterator = tf.python_io.tf_record_iterator(input_record)
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
fname = example.features.feature["image/filename"].bytes_list.value[0].decode("utf-8")
image = example.features.feature["image/encoded"].bytes_list.value[0]
decoded_png = tf.image.decode_image(image, channels=3).numpy()
Image.fromarray(decoded_png).save(path_to_test_img_folder + fname)
# High Level Functions
def xml_path_to_filelist(xml_path):
filename_list = tf.io.match_filenames_once(xml_path)
init = (tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
sess = tf.compat.v1.Session()
sess.run(init)
files_list = sess.run(filename_list)
files_list = sorted(files_list)
return files_list
def split_train_val_test_praefixes_rdm(unique_praefix, SEED, TRAIN_VAL_RATIO ,TEST_RATIO):
unique_train_val_praefix, unique_test_praefix = split_praefix(unique_praefix, SEED, TEST_RATIO)
unique_train_praefix, unique_val_praefix = split_praefix(unique_train_val_praefix, SEED, TRAIN_VAL_RATIO)
return unique_train_praefix, unique_val_praefix, unique_test_praefix
def filelists_from_praefixes(unique_train_praefix,unique_val_praefix, unique_test_praefix,files_list):
train_list, _ = fileList_from_praefix(unique_train_praefix, files_list)
eval_list, _ = fileList_from_praefix(unique_val_praefix, files_list)
test_list, _ = fileList_from_praefix(unique_test_praefix, files_list)
return train_list, eval_list, test_list
def write_records_from_filelists(train_list, eval_list, test_list, REC_NAME, img_path, SEED, unique_test_praefix, output_path):
print(f"Writing {len(train_list)} Images to train_{REC_NAME}.record")
train_arr = write_list_to_tf(train_list, "train_" + REC_NAME, img_path, SEED, output_path)
print(f"Writing {len(eval_list)} Images to val_{REC_NAME}.record")
eval_arr = write_list_to_tf(eval_list, "val_" + REC_NAME, img_path, SEED, output_path)
print(f"Writing {len(test_list)} Images to test_{REC_NAME}.record")
test_arr = write_list_to_tf(test_list, "test_" + REC_NAME, img_path, SEED, output_path)
for test_pd in unique_test_praefix:
test_pd_list,_ = fileList_from_praefix([test_pd], test_list)
print(f"Writing {len(test_pd_list)} Images to PD_{test_pd}.record")
#write_PD_to_tf(test_pd_list, "PD_" + test_pd, img_path, SEED, output_path)
write_list_to_tf(test_pd_list, "bPD_" + test_pd, img_path, SEED, output_path + "PD/", bPD=True)
return train_arr, eval_arr, test_arr
def write_summary(unique_train_praefix, unique_val_praefix, unique_test_praefix, train_list, eval_list, test_list, train_arr,eval_arr,test_arr,output_path, REC_NAME):
print("Writing Summary")
un_train, count_im_train, count_el_train, ratio_im_train, ratio_el_train = get_ratios(train_arr)
un_val, count_im_eval, count_el_eval, ratio_im_eval, ratio_el_eval = get_ratios(eval_arr)
un_test, count_im_test, count_el_test, ratio_im_test, ratio_el_test = get_ratios(test_arr)
with open(output_path + 'summary_{}.txt'.format(REC_NAME), mode='w') as csv_file:
csv_reader = csv.writer(csv_file, delimiter=',')
csv_reader.writerow(["Summary for the generated record files"])
csv_reader.writerow(["", "# Petri dishes", "cls_name", "# GT"])
csv_reader.writerow(["TRAIN", len(train_list), un_train, count_im_train, count_el_train])
csv_reader.writerow(["VAL", len(eval_list), un_val, count_im_eval, count_el_eval])
csv_reader.writerow(["TEST", len(test_list), un_test, count_im_test, count_el_test])
csv_reader.writerow(["TRAIN", ratio_im_train, ratio_el_train])
csv_reader.writerow(["VAL", ratio_im_eval, ratio_el_eval])
csv_reader.writerow(["TEST", ratio_im_test, ratio_el_test])
csv_reader.writerow(["TRAIN_PREFIX", unique_train_praefix])
csv_reader.writerow(["VAL_PREFIX", unique_val_praefix])
csv_reader.writerow(["TEST_PREFIX", unique_test_praefix])
# Low Level Functions
def split_praefix(unique_train_val_praefix, SEED, RATIO):
random.seed(SEED)
random.shuffle(unique_train_val_praefix)
b = int(len(unique_train_val_praefix) * RATIO)
unique_train_praefix = unique_train_val_praefix[:b]
unique_val_praefix = unique_train_val_praefix[b:len(unique_train_val_praefix)]
return unique_train_praefix, unique_val_praefix
def fileList_from_praefix(unique_train_praefix, files_list):
train_list = list()
for _, val in enumerate(unique_train_praefix):
# ADDED + "_" because "zm2_1" in str(s) will also mean "zm2_11" and "zm2_12"
matching = [s for s in files_list if val + "_" in str(s)]
train_list.append(matching)
train_list_flat = list()
for sublist in train_list:
for item in sublist:
train_list_flat.append(item)
return train_list_flat, train_list
def get_praefix_from_fileList(files_list):
praefix_files = list()
for _, val in enumerate(files_list):
praefix_files.append(str(val).split("/")[-1].split("_")
[0] + "_" + str(val).split("/")[-1].split("_")[1])
unique_praefix = np.unique(praefix_files)
return unique_praefix
def create_example(xml_file, img_path):
# process the xml file
tree = ET.parse(xml_file)
root = tree.getroot()
image_name = root.find('filename').text
file_name = image_name.encode('utf8')
size = root.find('size')
width = int(size[0].text)
height = int(size[1].text)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
classes_text_str = []
for member in root.findall('object'):
classes_text.append(member[0].text.encode('utf8'))
classes_text_str.append(member[0].text)
for bnd in member.findall("bndbox"):
xmin.append(float(bnd[0].text) / width)
ymin.append(float(bnd[1].text) / height)
xmax.append(float(bnd[2].text) / width)
ymax.append(float(bnd[3].text) / height)
classes.append(class_text_to_int(member[0].text))
# read corresponding image
full_path = os.path.join(img_path, '{}'.format(image_name)) # provide the path of images directory
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
# create TFRecord Example
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(file_name),
'image/source_id': dataset_util.bytes_feature(file_name),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes)
}))
return classes_text_str, example
def write_list_to_tf(train_list_flat, filename, img_path, seed,output_path, bPD=False):
train_arr = []
writer_train = tf.io.TFRecordWriter('{}{}.record'.format(output_path, filename))
if bPD==False:
random.seed(seed)
random.shuffle(train_list_flat) # randomizes the list --> random order on how images are saved in .record
for _, train_file in enumerate(train_list_flat):
train_classes, example = create_example(train_file, img_path)
writer_train.write(example.SerializeToString())
train_arr.append(train_classes)
writer_train.close()
return train_arr
def write_PD_to_tf(train_file, filename, img_path, seed,output_path): #TODO: CHECK IF LOOP NECESSARY
train_arr = []
writer_train = tf.io.TFRecordWriter('{}{}.record'.format(output_path, filename))
train_classes, example = create_example(train_file, img_path)
writer_train.write(example.SerializeToString())
train_arr.append(train_classes)
writer_train.close()
return train_arr
def class_text_to_int(row_label):
if "_im" in row_label:
return 1
if "_el" in row_label:
return 2
def get_ratios(train_arr):
flat_list = []
for sublist in train_arr:
for item in sublist:
flat_list.append(item)
# if no zm_el in dataset, then train_counts[1] doesnt exist
unique, train_counts = np.unique(flat_list, return_counts=True)
if train_counts.shape[0] == 1:
train_sum = train_counts[0]
train_zmim_ratio = 1
train_zmel_ratio = 0
return unique, train_counts[0], 0, train_zmim_ratio, train_zmel_ratio
train_sum = train_counts[0] + train_counts[1]
train_zmim_ratio = round(float(train_counts[0] / train_sum), 3)
train_zmel_ratio = round(float(train_counts[1] / train_sum), 3)
return unique, train_counts[0], train_counts[1], train_zmim_ratio, train_zmel_ratio
## Functions for "predict_image"
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.compat.v1.Session() as sess:
# Get handles to input and output tensors
ops = tf.compat.v1.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def detect_seeds_in_image(image_path, category_index, detection_graph, PATH_TO_TEST_IMAGES_OUTDIR):
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
#image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=2)
plt.imsave(PATH_TO_TEST_IMAGES_OUTDIR + image_path.split("/")[-1].split(".")[0] + "_detection.jpg", image_np) |
the-stack_0_7075 | from crfnet.utils.transform import random_transform_generator
from crfnet.utils.anchor_parameters import AnchorParameters
from crfnet.data_processing.generator.splits.nuscenes_splits import Scenes
from crfnet.utils.anchor_calc import anchor_targets_bbox
from crfnet.utils.anchor import guess_shapes
def create_generators(cfg, backbone):
""" Create generators for training and validation and test data.
:param cfg: <Configuration> Config class with config parameters.
:param backbone: <Backbone> Backbone class e.g. VGGBackbone
:return train_generator: <Generator> The generator for creating training data.
:return validation_generator: <Generator> The generator for creating validation data.
TODO: @Max make the create generators consistently return train, val and test
"""
if cfg.anchor_params:
if 'small' in cfg.anchor_params:
anchor_params = AnchorParameters.small
else:
anchor_params = None
else:
anchor_params = None
common_args = {
'batch_size': cfg.batchsize,
'config': None,
'image_min_side': cfg.image_size[0],
'image_max_side': cfg.image_size[1],
'filter_annotations_enabled': False,
'preprocess_image': backbone.preprocess_image,
'normalize_radar': cfg.normalize_radar,
'camera_dropout': cfg.dropout_image,
'radar_dropout': cfg.dropout_radar,
'channels': cfg.channels,
'distance': cfg.distance_detection,
'sample_selection': cfg.sample_selection,
'only_radar_annotated': cfg.only_radar_annotated,
'n_sweeps': cfg.n_sweeps,
'noise_filter': cfg.noise_filter_cfg,
'noise_filter_threshold': cfg.noise_filter_threshold,
'noisy_image_method': cfg.noisy_image_method,
'noise_factor': cfg.noise_factor,
'perfect_noise_filter': cfg.noise_filter_perfect,
'radar_projection_height': cfg.radar_projection_height,
'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(),
'inference': cfg.inference,
'anchor_params': anchor_params,
}
# create random transform generator for augmenting training data
if cfg.random_transform:
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.0,
)
else:
transform_generator = random_transform_generator(flip_x_chance=0.5)
category_mapping = cfg.category_mapping
if 'nuscenes' in cfg.data_set:
# import here to prevent unnecessary dependency on nuscenes
from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator
from nuscenes.nuscenes import NuScenes
if 'mini' in cfg.data_set:
nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)
else:
try:
nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)
except ValueError:
nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)
if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set:
scenes = Scenes.debug
else:
scenes = Scenes.default
train_generator = NuscenesGenerator(
nusc,
scene_indices=scenes.train,
transform_generator=transform_generator,
category_mapping=category_mapping,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
shuffle_groups=True,
group_method='random',
**common_args
)
# no dropouts in validation
common_args['camera_dropout'] = 0
common_args['radar_dropout'] = 0
validation_generator = NuscenesGenerator(
nusc,
scene_indices=scenes.val,
category_mapping=category_mapping,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
**common_args
)
test_generator = NuscenesGenerator(
nusc,
scene_indices=scenes.test,
category_mapping=category_mapping,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
**common_args
)
test_night_generator = NuscenesGenerator(
nusc,
scene_indices=scenes.test_night,
category_mapping=category_mapping,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
**common_args
)
test_rain_generator = NuscenesGenerator(
nusc,
scene_indices=scenes.test_rain,
category_mapping=category_mapping,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
**common_args
)
return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator
else:
raise ValueError('Invalid data type received: {}'.format(cfg.data_set))
|
the-stack_0_7076 | # DENG: dynamic engine - powerful 3D game engine
# licence: Apache, see LICENCE file
# file: BackendChooser.py - Embeddable Python script to select a correct backend to use for required program
# author: Karl-Mihkel Ott
import tkinter as tk
import tkinter.messagebox as msgbox
from enum import IntEnum
class ApiType(IntEnum):
Vulkan = 1
OpenGL = 2
DirectX = 3
Unknown = 4
# Default value
api = ApiType.Unknown
win = tk.Tk()
# Button handler class
class ApiButtonHandlers:
pixel_virtual: tk.PhotoImage
content: tk.Frame
opengl: tk.Button
vulkan: tk.Button
directx: tk.Button
@staticmethod
def SelectVulkanBackend():
global api, win
api = ApiType.Vulkan
win.destroy()
@staticmethod
def SelectOpenGLBackend():
global api, win
api = ApiType.OpenGL
win.destroy()
@staticmethod
def SelectDirectXBackend():
msgbox.showerror("Error", "DirectX backend is not supported")
def __init__(self, win: tk.Tk):
self.pixel_virtual = tk.PhotoImage(width=1, height=1)
# OpenGL button
self.opengl = tk.Button(
win,
text="OpenGL",
image=self.pixel_virtual,
width=50,
height=20,
command=ApiButtonHandlers.SelectOpenGLBackend,
compound='c'
)
self.opengl.grid(column=0, row=1)
# Vulkan button
self.vulkan = tk.Button(
win,
text="Vulkan",
image=self.pixel_virtual,
width=50,
height=20,
command=ApiButtonHandlers.SelectVulkanBackend,
compound='c'
)
self.vulkan.grid(column=0, row=1)
# DirectX button
self.directx = tk.Button(
win,
text="DirectX",
image=self.pixel_virtual,
width=50,
height=20,
command=ApiButtonHandlers.SelectDirectXBackend,
compound='c'
)
self.directx.grid(column=0, row=1)
def Prompt():
global api, win
win.title("Select renderer API")
win.geometry('350x100')
win.resizable(False, False)
label = tk.Label(win, text="Select renderer API to use for DENG application")
btn_handler = ApiButtonHandlers(win)
# POSITIONS
label.grid(row=0, column=0, columnspan=3, padx=20, pady=10)
btn_handler.opengl.grid(row=2)
btn_handler.vulkan.grid(row=2, column=1)
btn_handler.directx.grid(row=2, column=2)
win.mainloop()
return api
|
the-stack_0_7080 | """Script to download the entire Box directory structure.
Skips anything that has been downloaded before.
Syncs to LOCAL_BOX_DIR
To obtain a developer token, navigate to
https://salesforcecorp.app.box.com/developers/console/app/1366340/configuration
and select "Generate Developer Token", then copy-paste it below.
Exampe Usage:
python download_box_data.py
"""
import box_auth
from box_auth import BoxNavigator
DEVELOPER_TOKEN_60MINS="uu4OyqV78GydCvVLAvzZvXh1kpkHeGnL"
LOCAL_BOX_DIR="/export/medical_ai/ucsf/box_data"
if __name__ == "__main__":
bn = BoxNavigator(token=DEVELOPER_TOKEN_60MINS)
bn.locally_recreate_filesystem_directory_structure(root_path=LOCAL_BOX_DIR)
bn.maybe_download_filesystem(root_path=LOCAL_BOX_DIR)
|
the-stack_0_7081 | import os
import sys
import argparse
import yaml
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
from .processor import Processor
from .data_tools import *
from copy import deepcopy
from torch.distributions.uniform import Uniform
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv1d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class REC_Processor(Processor):
def load_model(self):
self.model = self.io.load_model(self.arg.model, **(self.arg.model_args))
self.model.apply(weights_init)
V, W, U = 26, 10, 5
off_diag_joint, off_diag_part, off_diag_body = np.ones([V, V])-np.eye(V, V), np.ones([W, W])-np.eye(W, W), np.ones([U, U])-np.eye(U, U)
self.relrec_joint = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_joint)[1]), dtype=np.float32)).to(self.dev)
self.relsend_joint = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_joint)[0]), dtype=np.float32)).to(self.dev)
self.relrec_part = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_part)[1]), dtype=np.float32)).to(self.dev)
self.relsend_part = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_part)[0]), dtype=np.float32)).to(self.dev)
self.relrec_body = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_body)[1]), dtype=np.float32)).to(self.dev)
self.relsend_body = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_body)[0]), dtype=np.float32)).to(self.dev)
self.lower_body_joints = [1,2,3]# [1,2,3,4,5]# [1,2,3]#[0, 1, 2, 3, 4, 5, 6, 7]
self.dismodel_args = deepcopy(self.arg.model_args)
d_mode =3
if d_mode == 2:
self.dismodel_args.pop('n_in_dec', None)
self.dismodel_args.pop('n_hid_dec', None)
self.dismodel_args.pop('n_hid_enc', None)
self.dismodel_args['edge_weighting'] =True
self.dismodel_args['fusion_layer'] = 0
self.discriminator = self.io.load_model('net.model.Discriminatorv2', **(self.dismodel_args))
else:
self.dismodel_args.pop('n_in_enc', None)
self.dismodel_args.pop('n_hid_enc', None)
self.dismodel_args.pop('fusion_layer', None)
self.dismodel_args.pop('cross_w', None)
self.dismodel_args.pop('graph_args_p', None)
self.dismodel_args.pop('graph_args_b', None)
self.discriminator = self.io.load_model('net.model.Discriminatorv3', **(self.dismodel_args))
# self.dismodel_args['edge_weighting'] =True
# self.dismodel_args['fusion_layer'] = 0
self.discriminator.apply(weights_init)
self.discriminator.cuda()
self.criterion = nn.BCEWithLogitsLoss()# nn.BCELoss()
self.visual_sigmoid = nn.Sigmoid()
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(params=self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(params=self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
self.netD_optimizer =optim.Adam(params=self.discriminator.parameters(),
lr=0.000004,
weight_decay=self.arg.weight_decay)
def adjust_lr(self):
if self.arg.optimizer == 'SGD' and self.arg.step:
lr = self.arg.base_lr * (0.5**np.sum(self.meta_info['iter']>= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
elif self.arg.optimizer == 'Adam' and self.arg.step:
lr = self.arg.base_lr * (0.98**np.sum(self.meta_info['iter']>= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
for param_group in self.netD_optimizer.param_groups:
param_group['lr'] = self.lr
else:
raise ValueError('No such Optimizer')
def loss_l2(self, pred, target, mask=None):
dist = torch.square(pred-target).mean(-1).mean(1).mean(0)
if mask is not None:
dist = dist * mask
loss = torch.mean(dist)
return loss
def vae_loss_function(self, pred, target, mean_val, log_var):
assert pred.shape == target.shape
reconstruction_loss = self.loss_l2(pred, target)
mean_val = mean_val.mean(-1).mean(1).mean(0)
log_var = log_var.mean(-1).mean(1).mean(0)
KLD = - 0.5 * torch.sum(1+ log_var - mean_val.pow(2) - log_var.exp())
return reconstruction_loss + 0.1*KLD
'''
def build_masking_matrix_add_noise(self, unmasked_matrix, joint_indices):
r"""
Build masking matrix with same shape as `unmasked_matrix`
"""
M = np.zeros_like(unmasked_matrix)
M = M.reshape(M.shape[0], M.shape[1], -1, 3) # batch size, T, J, 3
for i in range(M.shape[0]):
for j in range(M.shape[1]):
for k in range(M.shape[2]):
if k in joint_indices:
M[i, j, k, :] = np.random.normal(0,0.5,1)
#M[:, :, joint_indices, :] = np.random.normal(0,0.5,3)
M = M.reshape(unmasked_matrix.shape)
return M
'''
def build_masking_matrix(self, unmasked_matrix, joint_indices):
r"""
Build masking matrix with same shape as `unmasked_matrix`
"""
M = np.ones_like(unmasked_matrix)
M = M.reshape(M.shape[0], M.shape[1], -1, 3) # batch size, T, J, 3
M[:, :, joint_indices, :] = np.zeros((3,))
M = M.reshape(unmasked_matrix.shape)
return M
def build_noise_matrix(self, pose_matrix, masking_matrix):
r"""
Build noise matrix with same shape as `pose_matrix`. We replace
each masked joint angle by an IID Gaussian noise signal following
distribution N(0, 0.5)
:param pose_matrix: matrix of poses
:param masking_matrix: binary masking matrix for `pose_matrix`
Return:
Noise matrix with same shape as `pose_matrix`
"""
M = np.random.normal(loc=0, scale=0.5, size=pose_matrix.shape)
inverted_mask_matrix = (~masking_matrix.astype(np.bool)).astype(np.float32)
M = np.multiply(M, inverted_mask_matrix)
return M
def build_lower_body_masking_matrices(self, lower_body_joints, encoder_inputs, decoder_inputs):
# build encoder input mask
M_enc_in = self.build_masking_matrix(encoder_inputs, lower_body_joints)
# build decoder input mask
M_dec_in = self.build_masking_matrix(decoder_inputs, lower_body_joints)
# build decoder output / target mask
#M_dec_out = self.build_masking_matrix(targets, lower_body_joints)
return M_enc_in, M_dec_in
def build_random_masking_matrices(self, encoder_inputs, decoder_inputs, seed=None, p=0.8):
# set seed
if seed is not None:
np.random.seed(seed)
# build encoder input mask
M_enc_in = np.random.binomial(n=1, p=p, size=encoder_inputs.shape).astype(np.float32)
# build decoder input mask
M_dec_in = np.random.binomial(n=1, p=p, size=decoder_inputs.shape).astype(np.float32)
return M_enc_in, M_dec_in
def train(self, masking_type="lower-body"):
if self.meta_info['iter'] % 2 == 0:
with torch.no_grad():
mean, var, gan_decoder_inputs, \
gan_targets, gan_decoder_inputs_previous, \
gan_decoder_inputs_previous2, \
gan_disc_encoder_inputs = self.train_generator(
mode='discriminator', masking_type=masking_type)
self.train_decoderv3(
mean,
var,
gan_decoder_inputs,
gan_targets,
gan_decoder_inputs_previous,
gan_decoder_inputs_previous2,
gan_disc_encoder_inputs)
else:
self.train_generator(mode='generator', masking_type=masking_type)
def train_decoder(self, mean, var, gan_decoder_inputs, gan_targets, gan_decoder_inputs_previous, gan_decoder_inputs_previous2):
with torch.no_grad():
dec_mean = mean.clone()
dec_var = var.clone()
dec_var = torch.exp(0.5 * dec_var) # TBD
epsilon = torch.randn_like(dec_var)
z = dec_mean + dec_var * epsilon
dis_pred = self.model.generate_from_decoder(z, gan_decoder_inputs, gan_decoder_inputs_previous, \
gan_decoder_inputs_previous2,self.arg.target_seq_len) #[32, 26, 10, 3]
dis_pred = dis_pred.detach()
dis_pred = dis_pred.requires_grad_()
dis_pred = dis_pred.permute(0, 2, 1, 3).contiguous().view(32, 10, -1)
dis_o = self.discriminator(dis_pred, self.relrec_joint,
self.relsend_joint,
self.relrec_part,
self.relsend_part,
self.relrec_body,
self.relsend_body,
self.arg.lamda)# .view(-1)
# dis_o = dis_o.detach()
# dis_o =dis_o.requires_grad_()
self.netD_optimizer.zero_grad()
N = dis_o.size()[0]
# label = torch.full((N,), 0.0, dtype=torch.float, device='cuda:0')
# label = Uniform(0.0, 0.1).sample((N,1)).cuda()
fake_labels = torch.FloatTensor(1).fill_(0.0)
fake_labels = fake_labels.requires_grad_(False)
fake_labels = fake_labels.expand_as(dis_o).cuda()
# print(fake_labels.size())
# print(dis_o.size())
errD_fake= self.criterion(dis_o, fake_labels)
# Calculate gradients for D in backward pass
# errD_fake.backward()
D_x_fake = dis_o.mean().item() # to display
# for the real
targets = gan_targets#.permute(0, 2, 1, 3).contiguous().view(32, 10, -1)
dis_oreal = self.discriminator(targets, self.relrec_joint,
self.relsend_joint,
self.relrec_part,
self.relsend_part,
self.relrec_body,
self.relsend_body,
self.arg.lamda)# .view(-1)
# real_labels = torch.full((N,), 1.0, dtype=torch.float, device='cuda:0')
# real_labels = Uniform(0.9, 1.0).sample((N,1)).cuda()
real_labels = torch.FloatTensor(1).fill_(1.0)
real_labels = real_labels.requires_grad_(False)
real_labels = real_labels.expand_as(dis_oreal).cuda()
# print(real_labels.requires_grad)
errD_real= self.criterion(dis_oreal, real_labels)
# errD_real.backward()
errD = 0.5*(errD_real + errD_fake)
errD.backward()
self.netD_optimizer.step()
D_x_real = dis_oreal.mean().item()
self.iter_info['discriminator loss'] = errD
self.iter_info['discriminator real out'] = D_x_real
self.iter_info['discriminator fake out'] = D_x_fake
self.iter_info['discriminator real loss'] = errD_real
self.iter_info['discriminator fake loss'] = errD_fake
self.show_iter_info()
self.meta_info['iter'] += 1
# writer.add_scalar("Loss/train", loss, epoch)
def train_decoderv3(self, mean, var, gan_decoder_inputs, gan_targets, gan_decoder_inputs_previous, gan_decoder_inputs_previous2, gan_disc_encoder_inputs):
with torch.no_grad():
dec_mean = mean.clone()
dec_var = var.clone()
dec_var = torch.exp(0.5 * dec_var) # TBD
epsilon = torch.randn_like(dec_var)
z = dec_mean + dec_var * epsilon
dis_pred = self.model.generate_from_decoder(z, gan_decoder_inputs, gan_decoder_inputs_previous, \
gan_decoder_inputs_previous2, self.arg.target_seq_len) #[32, 26, 10, 3]
dis_pred = dis_pred.detach()
dis_pred = dis_pred.requires_grad_()
dis_pred = dis_pred.permute(0, 2, 1, 3).contiguous().view(32, 10, -1)
disc_in = torch.cat([gan_disc_encoder_inputs.clone(), dis_pred], dim=1)
dis_o = self.discriminator(disc_in)# .view(-1)
# dis_o = dis_o.detach()
# dis_o =dis_o.requires_grad_()
self.netD_optimizer.zero_grad()
N = dis_o.size()[0]
# label = torch.full((N,), 0.0, dtype=torch.float, device='cuda:0')
# label = Uniform(0.0, 0.1).sample((N,1)).cuda()
fake_labels = torch.FloatTensor(1).fill_(0.0)
fake_labels = fake_labels.requires_grad_(False)
fake_labels = fake_labels.expand_as(dis_o).cuda()
# print(fake_labels.size())
# print(dis_o.size())
errD_fake= self.criterion(dis_o, fake_labels)
# Calculate gradients for D in backward pass
# errD_fake.backward()
D_x_fake = dis_o.mean().item() # to display
# for the real
targets = gan_targets#.permute(0, 2, 1, 3).contiguous().view(32, 10, -1)
disc_targets_in = torch.cat([gan_disc_encoder_inputs.clone(), targets], dim=1)
dis_oreal = self.discriminator(disc_targets_in)# .view(-1)
# real_labels = torch.full((N,), 1.0, dtype=torch.float, device='cuda:0')
# real_labels = Uniform(0.9, 1.0).sample((N,1)).cuda()
real_labels = torch.FloatTensor(1).fill_(1.0)
real_labels = real_labels.requires_grad_(False)
real_labels = real_labels.expand_as(dis_oreal).cuda()
# print(real_labels.requires_grad)
errD_real= self.criterion(dis_oreal, real_labels)
# errD_real.backward()
errD = 0.5*(errD_real + errD_fake)
errD.backward()
self.netD_optimizer.step()
for p in self.discriminator.parameters():
p.data.clamp_(-0.25, 0.25)
# nn.utils.clip_grad_norm_(self.discriminator.parameters(), 0.1)
D_x_real = dis_oreal.mean().item()
self.iter_info['discriminator_loss'] = errD
self.iter_info['discriminator real out'] = D_x_real
self.iter_info['discriminator fake out'] = D_x_fake
self.iter_info['discriminator real loss'] = errD_real
self.iter_info['discriminator fake loss'] = errD_fake
self.show_iter_info()
self.meta_info['iter'] += 1
def train_generator(self, mode='generator', masking_type="lower-body"):
self.model.train()
self.adjust_lr()
loss_value = []
normed_train_dict = normalize_data(self.train_dict, self.data_mean, self.data_std, self.dim_use)
encoder_inputs, decoder_inputs, targets = train_sample(normed_train_dict,
self.arg.batch_size,
self.arg.source_seq_len,
self.arg.target_seq_len,
len(self.dim_use))
# unmasked
gan_disc_encoder_inputs = torch.Tensor(encoder_inputs).float().to(self.dev) #encoder_inputs #.clone().detach().requires_grad_(True)
gan_disc_en_in = torch.Tensor(encoder_inputs).float().to(self.dev) # encoder_inputs_p.clone().detach().requires_grad_(True)
#build masking matrices
if masking_type == "lower-body":
self.M_enc_in, self.M_dec_in = self.build_lower_body_masking_matrices(
self.lower_body_joints,
encoder_inputs,
decoder_inputs
)
elif masking_type == "random":
self.M_enc_in, self.M_dec_in = self.build_random_masking_matrices(
encoder_inputs,
decoder_inputs,
p=0.8
)
else:
raise NotImplementedError
# mask encoder inputs and decoder inputs
encoder_inputs = np.multiply(self.M_enc_in, encoder_inputs)
decoder_inputs = np.multiply(self.M_dec_in, decoder_inputs)
# add noise to masked encoder/decoder inputs
encoder_noise = self.build_noise_matrix(encoder_inputs, self.M_enc_in)
decoder_noise = self.build_noise_matrix(decoder_inputs, self.M_dec_in)
encoder_inputs = np.add(encoder_inputs, encoder_noise)
decoder_inputs = np.add(decoder_inputs, decoder_noise)
encoder_inputs_v = np.zeros_like(encoder_inputs)
encoder_inputs_v[:, 1:, :] = encoder_inputs[:, 1:, :]-encoder_inputs[:, :-1, :]
encoder_inputs_a = np.zeros_like(encoder_inputs)
encoder_inputs_a[:, :-1, :] = encoder_inputs_v[:, 1:, :]-encoder_inputs_v[:, :-1, :]
encoder_inputs_p = torch.Tensor(encoder_inputs).float().to(self.dev)
encoder_inputs_v = torch.Tensor(encoder_inputs_v).float().to(self.dev)
encoder_inputs_a = torch.Tensor(encoder_inputs_a).float().to(self.dev)
decoder_inputs = torch.Tensor(decoder_inputs).float().to(self.dev)
decoder_inputs_previous = torch.Tensor(encoder_inputs[:, -1, :]).unsqueeze(1).to(self.dev)
decoder_inputs_previous2 = torch.Tensor(encoder_inputs[:, -2, :]).unsqueeze(1).to(self.dev)
targets = torch.Tensor(targets).float().to(self.dev)
gan_targets = targets.clone().detach().requires_grad_(True)
N, T, D = targets.size() # N = 64(batchsize), T=10, D=63
targets = targets.contiguous().view(N, T, -1, 3).permute(0, 2, 1, 3) # [64, 21, 10, 3]
gan_decoder_inputs = decoder_inputs.clone().detach().requires_grad_(True)
gan_decoder_inputs_previous = decoder_inputs_previous.clone().detach().requires_grad_(True)
gan_decoder_inputs_previous2 = decoder_inputs_previous2.clone().detach().requires_grad_(True)
# v3
# gan_disc_encoder_inputs = encoder_inputs_p.clone().detach().requires_grad_(True)
# gan_disc_en_in = encoder_inputs_p.clone().detach().requires_grad_(True)
outputs, mean, log_var = self.model(encoder_inputs_p,
encoder_inputs_v,
encoder_inputs_a,
decoder_inputs,
decoder_inputs_previous,
decoder_inputs_previous2,
self.arg.target_seq_len,
self.relrec_joint,
self.relsend_joint,
self.relrec_part,
self.relsend_part,
self.relrec_body,
self.relsend_body,
self.arg.lamda)
# convert spatio-temporal masking matrix to a tensor
#st_mask = torch.from_numpy(self.M_dec_out).to(self.dev)
#loss = self.vae_loss_function(outputs, targets, mean, log_var, st_mask = st_mask)
if mode =='generator':
loss = self.vae_loss_function(outputs, targets, mean, log_var)
outputs = outputs.permute(0, 2, 1, 3).contiguous().view(32, 10, -1)
if True:
disc_in = torch.cat([gan_disc_en_in, outputs], dim=1)
gen_disco = self.discriminator(disc_in)
# adversrial loss
real_labels = torch.FloatTensor(1).fill_(1.0)
real_labels = real_labels.requires_grad_(False)
real_labels = real_labels.expand_as(gen_disco).cuda()
# print(real_labels.requires_grad)
gan_loss = self.criterion(gen_disco, real_labels)
loss = 0.93* loss + 0.07*gan_loss
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizer.step()
self.iter_info['generator_loss'] = loss.data.item()
if False:
self.iter_info['gan_loss'] = gan_loss.data.item()
self.show_iter_info()
self.meta_info['iter'] += 1
self.epoch_info['mean_loss'] = np.mean(loss_value)
return mean, log_var, gan_decoder_inputs, gan_targets, gan_decoder_inputs_previous, gan_decoder_inputs_previous2, gan_disc_encoder_inputs
def test(
self,
evaluation=True,
iter_time=0,
save_motion=False,
phase=False,
masking_type="lower-body",
fix_rand_masking_seed=False):
self.model.eval()
loss_value = []
normed_test_dict = normalize_data(self.test_dict, self.data_mean, self.data_std, self.dim_use)
self.actions = ["basketball", "basketball_signal", "directing_traffic",
"jumping", "running", "soccer", "walking", "washwindow"]
self.io.print_log(' ')
print_str = "{0: <16} |".format("milliseconds")
for ms in [40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 560, 1000]:
print_str = print_str + " {0:5d} |".format(ms)
self.io.print_log(print_str)
for action_num, action in enumerate(self.actions):
encoder_inputs, decoder_inputs, targets = srnn_sample(normed_test_dict, action,
self.arg.source_seq_len,
self.arg.target_seq_len,
len(self.dim_use))
#build masking matrices
if masking_type == "lower-body":
self.M_enc_in, self.M_dec_in = self.build_lower_body_masking_matrices(
self.lower_body_joints,
encoder_inputs,
decoder_inputs
)
elif masking_type == "random":
rand_masking_seed = None
if fix_rand_masking_seed:
rand_masking_seed = 0
self.M_enc_in, self.M_dec_in = self.build_random_masking_matrices(
encoder_inputs,
decoder_inputs,
seed=rand_masking_seed,
p=0.8
)
else:
raise NotImplementedError
# mask encoder inputs and decoder inputs
encoder_inputs = np.multiply(self.M_enc_in, encoder_inputs)
decoder_inputs = np.multiply(self.M_dec_in, decoder_inputs)
# add noise to masked encoder/decoder inputs
encoder_noise = self.build_noise_matrix(encoder_inputs, self.M_enc_in)
decoder_noise = self.build_noise_matrix(decoder_inputs, self.M_dec_in)
encoder_inputs = np.add(encoder_inputs, encoder_noise)
decoder_inputs = np.add(decoder_inputs, decoder_noise)
encoder_inputs_v = np.zeros_like(encoder_inputs)
encoder_inputs_v[:, 1:, :] = encoder_inputs[:, 1:, :]-encoder_inputs[:, :-1, :]
encoder_inputs_a = np.zeros_like(encoder_inputs)
encoder_inputs_a[:, :-1, :] = encoder_inputs_v[:, 1:, :]-encoder_inputs_v[:, :-1, :]
encoder_inputs_p = torch.Tensor(encoder_inputs).float().to(self.dev)
encoder_inputs_v = torch.Tensor(encoder_inputs_v).float().to(self.dev)
encoder_inputs_a = torch.Tensor(encoder_inputs_a).float().to(self.dev)
# for saving motion
N, T, D = encoder_inputs_p.shape
encoder_inputs_p_4d = encoder_inputs_p.view(N, T, -1, 3).permute(0, 2, 1, 3) # Eric: [N, V, T, 3] same with targets for saving motion
decoder_inputs = torch.Tensor(decoder_inputs).float().to(self.dev)
decoder_inputs_previous = torch.Tensor(encoder_inputs[:, -1, :]).unsqueeze(1).to(self.dev)
decoder_inputs_previous2 = torch.Tensor(encoder_inputs[:, -2, :]).unsqueeze(1).to(self.dev)
targets = torch.Tensor(targets).float().to(self.dev)
N, T, D = targets.size()
targets = targets.contiguous().view(N, T, -1, 3).permute(0, 2, 1, 3) # [64, 21, 25, 3] same with outputs for validation loss
start_time = time.time()
with torch.no_grad():
outputs, mean, var = self.model(encoder_inputs_p,
encoder_inputs_v,
encoder_inputs_a,
decoder_inputs,
decoder_inputs_previous,
decoder_inputs_previous2,
self.arg.target_seq_len,
self.relrec_joint,
self.relsend_joint,
self.relrec_part,
self.relsend_part,
self.relrec_body,
self.relsend_body,
self.arg.lamda)
'''
p = self.model.cal_posterior(encoder_inputs_p,
encoder_inputs_v,
encoder_inputs_a,
decoder_inputs,
decoder_inputs_previous,
decoder_inputs_previous2,
self.arg.target_seq_len,
self.relrec_joint,
self.relsend_joint,
self.relrec_part,
self.relsend_part,
self.relrec_body,
self.relsend_body,
self.arg.lamda)
print("posterior {}".format(p))
'''
if evaluation:
num_samples_per_action = encoder_inputs_p_4d.shape[0]
mean_errors = np.zeros(
(num_samples_per_action, self.arg.target_seq_len), dtype=np.float32)
# Eric: create data structs to save unnormalized inputs, outputs and targets
inputs_denorm = np.zeros(
[num_samples_per_action,
encoder_inputs_p_4d.shape[2],
int(self.data_mean.shape[0]/3),
3]) # num_samples_per_action, t_in, 39, 3
outputs_denorm = np.zeros(
[num_samples_per_action,
outputs.shape[2],
int(self.data_mean.shape[0]/3),
3]) # [num_samples_per_action, t_out, 39, 3]
targets_denorm = np.zeros(
[num_samples_per_action,
targets.shape[2],
int(self.data_mean.shape[0]/3),
3]) # [num_samples_per_action, t_out, V, 3]
for i in np.arange(num_samples_per_action):
input = encoder_inputs_p_4d[i] # V, t_in, d
V, t, d = input.shape
input = input.permute(1,0,2).contiguous().view(t, V*d)
input_denorm = unnormalize_data(
input.cpu().numpy(), self.data_mean, self.data_std, self.dim_ignore, self.dim_use, self.dim_zero)
inputs_denorm[i] = input_denorm.reshape((t, -1, 3))
output = outputs[i] # output: [V, t, d] = [21, 25, 3]
V, t, d = output.shape
output = output.permute(1,0,2).contiguous().view(t, V*d)
output_denorm = unnormalize_data(
output.cpu().numpy(), self.data_mean, self.data_std, self.dim_ignore, self.dim_use, self.dim_zero)
outputs_denorm[i] = output_denorm.reshape((t, -1, 3))
t, D = output_denorm.shape
output_euler = np.zeros((t,D) , dtype=np.float32) # [21, 99]
for j in np.arange(t):
for k in np.arange(0,115,3):
output_euler[j,k:k+3] = rotmat2euler(expmap2rotmat(output_denorm[j,k:k+3]))
target = targets[i]
target = target.permute(1,0,2).contiguous().view(t, V*d)
target_denorm = unnormalize_data(
target.cpu().numpy(), self.data_mean, self.data_std, self.dim_ignore, self.dim_use, self.dim_zero)
targets_denorm[i] = target_denorm.reshape((t, -1, 3))
target_euler = np.zeros((t,D) , dtype=np.float32)
for j in np.arange(t):
for k in np.arange(0,115,3):
target_euler[j,k:k+3] = rotmat2euler(expmap2rotmat(target_denorm[j,k:k+3]))
target_euler[:,0:6] = 0
idx_to_use1 = np.where(np.std(target_euler,0)>1e-4)[0]
idx_to_use2 = self.dim_nonzero
idx_to_use = idx_to_use1[np.in1d(idx_to_use1,idx_to_use2)]
euc_error = np.power(target_euler[:,idx_to_use]-output_euler[:,idx_to_use], 2)
euc_error = np.sqrt(np.sum(euc_error, 1)) # [25]
mean_errors[i,:euc_error.shape[0]] = euc_error
mean_mean_errors = np.mean(np.array(mean_errors), 0)
if save_motion==True:
save_dir = os.path.join(self.save_dir,'motions_exp'+str(iter_time*self.arg.savemotion_interval))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# save unnormalized inputs
np.save(save_dir+f"/motions_{action}_inputs.npy", inputs_denorm)
# save unnormalized outputs
np.save(save_dir+f"/motions_{action}_outputs.npy", outputs_denorm)
# save unnormalized targets
np.save(save_dir+f"/motions_{action}_targets.npy", targets_denorm)
print_str = "{0: <16} |".format(action)
for ms_idx, ms in enumerate([0,1,2,3,4,5,6,7,8,9,13,24]):
if self.arg.target_seq_len >= ms+1:
print_str = print_str + " {0:.3f} |".format(mean_mean_errors[ms])
if phase is not True:
self.MAE_tensor[iter_time, action_num, ms_idx] = mean_mean_errors[ms]
else:
print_str = print_str + " n/a |"
if phase is not True:
self.MAE_tensor[iter_time, action_num, ms_idx] = 0
print_str = print_str + 'T: {0:.3f} ms |'.format((time.time()-start_time)*1000/8)
self.io.print_log(print_str)
self.io.print_log(' ')
@staticmethod
def get_parser(add_help=False):
parent_parser = Processor.get_parser(add_help=False)
parser = argparse.ArgumentParser(add_help=add_help, parents=[parent_parser], description='Spatial Temporal Graph Convolution Network')
parser.add_argument('--base_lr', type=float, default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[], nargs='+', help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool, default=True, help='use nesterov or not')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay for optimizer')
parser.add_argument('--lamda', type=float, default=1.0, help='adjust part feature')
parser.add_argument('--fusion_layer_dir', type=str, default='fusion_1', help='lamda a dir')
parser.add_argument('--learning_rate_dir', type=str, default='adam_1e-4', help='lamda a dir')
parser.add_argument('--lamda_dir', type=str, default='nothing', help='adjust part feature')
parser.add_argument('--crossw_dir', type=str, default='nothing', help='adjust part feature')
parser.add_argument('--note', type=str, default='nothing', help='whether seperate')
parser.add_argument('--debug', type=bool, default=False, help='whether seperate')
return parser |
the-stack_0_7082 | import numpy as np
import pandas as pd
from scipy.stats import rankdata
def rolling_mean(data, period):
rm = pd.rolling_mean(data, period)
rm = rm[~np.isnan(rm)]
return rm
def mean(value):
value = np.mean(value)
if np.isnan(value):
return 0.
return value
class DCA:
def __init__(self, period=30, cash=300.):
self.period = period
self.cash = cash
class Investor:
def __init__(self, ticket, dist, dca=DCA()):
self.ticket = ticket
self.cash = 0.
self.invested = 0.
self.history = []
self.invested_history = []
self.ror_history = []
self.shares = []
self.dist = dist
self.dca = dca
self.rms_list = []
self.means = []
self.rank = 0.
self.m = 0.
self.std = 0.
def compute_means(self):
for i in range(1, 11):
rms = rolling_mean(np.array(self.ror_history), i * 365)
m = mean(rms)
if m > 0:
self.rms_list.append(rms)
self.means.append(m.round(2))
else:
self.rms_list.append([0.])
self.means = np.array(self.means)
self.m = np.mean(self.means).round(2)
if np.isnan(self.m):
self.m = 0.
self.std = np.std(self.means).round(4)
if np.isnan(self.std):
self.std = 0.
def compute_rank(self):
self.rank = (self.m + (1. - self.std)) / 2.
class BuyAndHoldInvestmentStrategy:
def __init__(self, investor, tr_cost):
self.investor = investor
self.tr_cost = tr_cost
def invest(self, data, etf):
if len(data.keys()) == 0:
return
self.investor.shares = np.zeros(len(data.keys()))
day = 0
last_index = -1
for i in data.index:
prices = data.loc[i].values
etf_index = -1
# 30 = 0, 60=1, 90 = 2, 120 = 3, 150 = 4
if day % 30 == 0:
last_index += 1
etf_index = last_index % len(etf)
if etf_index > -1:
price = data[etf[etf_index]].loc[i]
if (etf_index > -1 and price == 0.) or (prices == 0).all():
day += 1
continue
portfolio = self.investor.cash + np.dot(prices, self.investor.shares)
if np.isnan(portfolio):
portfolio = 0.
self.investor.history.append(portfolio)
self.investor.invested_history.append(self.investor.invested)
if self.investor.invested == 0:
ror = 0
else:
ror = (portfolio - self.investor.invested) / self.investor.invested
self.investor.ror_history.append(ror)
if etf_index > -1:
self.investor.cash += self.investor.dca.cash
self.investor.invested += self.investor.dca.cash
s = np.floor((self.investor.cash - self.tr_cost) / price)
self.investor.shares[etf_index] += s
self.investor.cash -= s*price - self.tr_cost
day += 1
|
the-stack_0_7083 | def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Children, obj[6]: Education, obj[7]: Occupation, obj[8]: Income, obj[9]: Bar, obj[10]: Coffeehouse, obj[11]: Restaurant20to50, obj[12]: Direction_same, obj[13]: Distance
# {"feature": "Age", "instances": 34, "metric_value": 0.99, "depth": 1}
if obj[4]>0:
# {"feature": "Distance", "instances": 27, "metric_value": 0.9911, "depth": 2}
if obj[13]<=2:
# {"feature": "Income", "instances": 23, "metric_value": 0.9986, "depth": 3}
if obj[8]>1:
# {"feature": "Restaurant20to50", "instances": 20, "metric_value": 0.971, "depth": 4}
if obj[11]<=1.0:
# {"feature": "Occupation", "instances": 16, "metric_value": 1.0, "depth": 5}
if obj[7]<=20:
# {"feature": "Education", "instances": 14, "metric_value": 0.9852, "depth": 6}
if obj[6]>0:
# {"feature": "Coupon", "instances": 8, "metric_value": 0.9544, "depth": 7}
if obj[2]<=2:
# {"feature": "Coffeehouse", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[10]>1.0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[0]<=1:
return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
elif obj[10]<=1.0:
return 'False'
else: return 'False'
elif obj[2]>2:
return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Passanger", "instances": 6, "metric_value": 0.65, "depth": 7}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]>20:
return 'True'
else: return 'True'
elif obj[11]>1.0:
return 'True'
else: return 'True'
elif obj[8]<=1:
return 'False'
else: return 'False'
elif obj[13]>2:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'True'
else: return 'True'
|
the-stack_0_7084 | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
from apricot import TestWithServers
from general_utils import bytes_to_human, human_to_bytes
from server_utils import ServerFailed
class PoolTestBase(TestWithServers):
"""Base pool test class.
:avocado: recursive
"""
def setUp(self):
"""Set up each test case."""
# Create test-case-specific DAOS log files
self.update_log_file_names()
super().setUp()
self.dmg = self.get_dmg_command()
def get_max_pool_sizes(self, scm_ratio=0.9, nvme_ratio=0.9):
"""Get the maximum pool sizes for the current server configuration.
Args:
scm_ratio (float, optional): percentage of the maximum SCM
capacity to use for the pool sizes. Defaults to 0.9 (90%).
nvme_ratio (float, optional): percentage of the maximum NVMe
capacity to use for the pool sizes. Defaults to 0.9 (90%).
Returns:
list: a list of bytes representing the maximum pool creation
SCM size and NVMe size
"""
try:
sizes = self.server_managers[0].get_available_storage()
except ServerFailed as error:
self.fail(error)
ratios = (scm_ratio, nvme_ratio)
for index, size in enumerate(sizes):
if size and ratios[index] < 1:
# Reduce the size by the specified percentage
sizes[index] *= ratios[index]
self.log.info(
"Adjusted %s size by %.2f%%: %s (%s)",
"SCM" if index == 0 else "NVMe", 100 * ratios[index],
str(sizes[index]), bytes_to_human(sizes[index]))
return sizes
def get_pool_list(self, quantity, scm_ratio, nvme_ratio, svcn=None):
"""Get a list of TestPool objects.
Set each TestPool's scm_size and nvme_size attributes using the
specified ratios and the largest SCM or NVMe size common to all the
configured servers.
Args:
quantity (int): number of TestPool objects to create
scm_ratio (float): percentage of the maximum SCM capacity to use
for the pool sizes, e.g. 0.9 for 90%
nvme_ratio (float): percentage of the maximum NVMe capacity to use
for the pool sizes, e.g. 0.9 for 90%. Specifying None will
setup each pool without NVMe.
svcn (int): Number of pool service replicas. The default value
of None will use the default set on the server.
Returns:
list: a list of TestPool objects equal in length to the quantity
specified, each configured with the same SCM and NVMe sizes.
"""
sizes = self.get_max_pool_sizes(
scm_ratio, 1 if nvme_ratio is None else nvme_ratio)
pool_list = [
self.get_pool(create=False, connect=False) for _ in range(quantity)]
for pool in pool_list:
pool.svcn.update(svcn)
pool.scm_size.update(bytes_to_human(sizes[0]), "scm_size")
if nvme_ratio is not None:
if sizes[1] is None:
self.fail(
"Unable to assign a max pool NVMe size; NVMe not "
"configured!")
# The I/O Engine allocates NVMe storage on targets in multiples
# of 1GiB per target. A server with 8 targets will have a
# minimum NVMe size of 8 GiB. Specify the largest NVMe size in
# GiB that can be used with the configured number of targets and
# specified capacity in GiB.
targets = self.server_managers[0].get_config_value("targets")
increment = human_to_bytes("{}GiB".format(targets))
nvme_multiple = increment
while nvme_multiple + increment <= sizes[1]:
nvme_multiple += increment
self.log.info(
"Largest NVMe multiple based on %s targets in %s: %s (%s)",
targets, str(sizes[1]), str(nvme_multiple),
bytes_to_human(nvme_multiple))
pool.nvme_size.update(
bytes_to_human(nvme_multiple), "nvme_size")
return pool_list
def check_pool_creation(self, max_duration):
"""Check the duration of each pool creation meets the requirement.
Args:
max_duration (int): max pool creation duration allowed in seconds
"""
durations = []
for index, pool in enumerate(self.pool):
start = float(time.time())
pool.create()
durations.append(float(time.time()) - start)
self.log.info(
"Pool %s creation: %s seconds", index + 1, durations[-1])
exceeding_duration = 0
for index, duration in enumerate(durations):
if duration > max_duration:
exceeding_duration += 1
self.assertEqual(
exceeding_duration, 0,
"Pool creation took longer than {} seconds on {} pool(s)".format(
max_duration, exceeding_duration))
|
the-stack_0_7085 | from __future__ import annotations
from typing import Tuple, NoReturn
from ...base import BaseEstimator
import numpy as np
from itertools import product
from ...metrics import misclassification_error
class DecisionStump(BaseEstimator):
"""
A decision stump classifier for {-1,1} labels according to the CART algorithm
Attributes
----------
self.threshold_ : float
The threshold by which the data is split
self.j_ : int
The index of the feature by which to split the data
self.sign_: int
The label to predict for samples where the value of the j'th feature is about the threshold
"""
def __init__(self) -> DecisionStump:
"""
Instantiate a Decision stump classifier
"""
super().__init__()
self.threshold_, self.j_, self.sign_ = None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits a decision stump to the given data
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
th = 0
mis = np.Inf
feature_index = 0
sign = 1
for j in range(X.shape[1]):
new_th, new_mis = self._find_threshold(X[:, j], y, 1)
if new_mis < mis:
mis = new_mis
th = new_th
feature_index = j
sign = 1
new_th, new_mis = self._find_threshold(X[:, j], y, -1)
if new_mis < mis:
mis = new_mis
th = new_th
feature_index = j
sign = -1
self.threshold_ = th
self.j_ = feature_index
self.sign_ = sign
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
Notes
-----
Feature values strictly below threshold are predicted as `-sign` whereas values which equal
to or above the threshold are predicted as `sign`
"""
# y_pred = np.zeros(X.shape[0])
values = X[:, self.j_]
y_pred = np.where(values < self.threshold_, -self.sign_, self.sign_)
return y_pred
def _find_threshold(self, values: np.ndarray, labels: np.ndarray, sign: int) -> Tuple[float, float]:
"""
Given a feature vector and labels, find a threshold by which to perform a split
The threshold is found according to the value minimizing the misclassification
error along this feature
Parameters
----------
values: ndarray of shape (n_samples,)
A feature vector to find a splitting threshold for
labels: ndarray of shape (n_samples,)
The labels to compare against
sign: int
Predicted label assigned to values equal to or above threshold
Returns
-------
thr: float
Threshold by which to perform split
thr_err: float between 0 and 1
Misclassificaiton error of returned threshold
Notes
-----
For every tested threshold, values strictly below threshold are predicted as `-sign` whereas values
which equal to or above the threshold are predicted as `sign`
"""
th = values[0]
mis = np.inf
for i in range(values.shape[0]):
y_pred = np.where(values < values[i], -sign, sign)
# new_mis = misclassification_error(labels, y_pred)
new_mis = np.sum(np.where(np.sign(labels) != np.sign(y_pred), abs(labels), 0))
if new_mis < mis:
mis = new_mis
th = values[i]
return th, mis
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
y_pred = self._predict(X)
# return misclassification_error(y, y_pred)
loss = np.sum(np.where(np.sign(y) != np.sign(y_pred), abs(y), 0))
# loss = np.sum(np.sign(y) != np.sign(y_pred))
# if normalize:
return loss
# return loss
|
the-stack_0_7086 | # -*- coding: utf-8 -*-
import json
import logging
import re
from concurrent import futures
from urllib.parse import quote, unquote, urlparse
from bs4 import BeautifulSoup
from bs4.element import Tag
from ..utils.crawler import Crawler
logger = logging.getLogger('BABELNOVEL')
search_url = 'https://babelnovel.com/api/books?page=0&pageSize=8&fields=id,name,canonicalName,lastChapter&ignoreStatus=false&query=%s'
novel_page_url = 'https://babelnovel.com/api/books/%s'
chapter_list_url = 'https://babelnovel.com/api/books/%s/chapters?bookId=%s&page=%d&pageSize=100&fields=id,name,canonicalName,hasContent,isBought,isFree,isLimitFree'
chapter_json_url = 'https://babelnovel.com/api/books/%s/chapters/%s/content'
# https://babelnovel.com/api/books/f337b876-f246-40c9-9bcf-d7f31db00296/chapters/ac1ebce2-e62e-4176-a2e7-6012c606ded4/content
chapter_page_url = 'https://babelnovel.com/books/%s/chapters/%s'
class BabelNovelCrawler(Crawler):
base_url = 'https://babelnovel.com/'
def search_novel(self, query):
# to get cookies
self.get_response(self.home_url)
url = search_url % quote(query.lower())
logger.debug('Visiting: %s', url)
data = self.get_json(url)
results = []
for item in data['data']:
if not item['canonicalName']:
continue
# end if
info = None
if item['lastChapter']:
info = 'Latest: %s' % item['lastChapter']['name']
# end if
results.append({
'title': item['name'],
'url': novel_page_url % item['canonicalName'],
'info': info,
})
# end for
return results
# end def
def read_novel_info(self):
# to get cookies and session info
self.parse_content_css(self.home_url)
# Determine cannonical novel name
path_fragments = urlparse(self.novel_url).path.split('/')
if path_fragments[1] == 'books':
self.novel_hash = path_fragments[2]
else:
self.novel_hash = path_fragments[-1]
# end if
self.novel_url = novel_page_url % self.novel_hash
logger.info('Canonical name: %s', self.novel_hash)
logger.debug('Visiting %s', self.novel_url)
data = self.get_json(self.novel_url)
self.novel_id = data['data']['id']
logger.info('Novel ID: %s', self.novel_id)
self.novel_title = data['data']['name']
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = data['data']['cover']
logger.info('Novel cover: %s', self.novel_cover)
chapter_count = int(data['data']['chapterCount'])
self.get_list_of_chapters(chapter_count)
# end def
def get_list_of_chapters(self, chapter_count):
futures_to_check = dict()
temp_chapters = dict()
for page in range(1 + chapter_count // 100):
list_url = chapter_list_url % (self.novel_id, self.novel_id, page)
future = self.executor.submit(self.parse_chapter_item, list_url)
futures_to_check[future] = str(page)
# end for
for future in futures.as_completed(futures_to_check):
page = int(futures_to_check[future])
temp_chapters[page] = future.result()
# end for
for page in sorted(temp_chapters.keys()):
self.volumes.append({'id': page + 1})
for chap in temp_chapters[page]:
chap['volume'] = page + 1
chap['id'] = 1 + len(self.chapters)
self.chapters.append(chap)
# end for
# end for
# end def
def parse_chapter_item(self, list_url):
logger.debug('Visiting %s', list_url)
data = self.get_json(list_url)
chapters = list()
for item in data['data']:
if not (item['isFree']): # or item['isLimitFree'] or item['isBought']):
continue
# end if
chapters.append({
'title': item['name'],
'url': chapter_page_url % (self.novel_hash, item['canonicalName']),
'json_url': chapter_json_url % (self.novel_hash, item['id']),
})
# end for
return chapters
# end def
def parse_content_css(self, url):
try:
soup = self.get_soup(url)
content = re.findall('window.__STATE__ = "([^"]+)"', str(soup), re.MULTILINE)
data = json.loads(unquote(content[0]))
cssUrl = self.absolute_url(data['chapterDetailStore']['cssUrl'])
logger.info('Getting %s', cssUrl)
css = self.get_response(cssUrl).text
baddies = css.split('\n')[-1].split('{')[0].strip()
self.bad_selectors = baddies
logger.info('Bad selectors: %s', self.bad_selectors)
except:
self.bad_selectors = []
logger.exception('Fail to get bad selectors')
# end for
# end def
def download_chapter_body(self, chapter):
logger.info('Visiting %s', chapter['json_url'])
data = self.get_json(chapter['json_url'])
soup = BeautifulSoup(data['data']['content'], 'lxml')
if self.bad_selectors:
for tag in soup.select(self.bad_selectors):
tag.extract()
# end for
# end if
body = soup.find('body')
self.clean_contents(body)
for tag in body.contents:
if not str(tag).strip():
tag.extract()
elif isinstance(tag, Tag):
tag.name = 'p'
# end if
# end for
# body = data['data']['content']
result = str(body)
result = re.sub(r'\n\n', '<br><br>', result)
return result
# end def
# end class
|
the-stack_0_7087 | import tkinter as tk
class AutoScrollbar(tk.Scrollbar):
"""Create a scrollbar that hides iteself if it's not needed. Only
works if you use the pack geometry manager from tkinter.
https://stackoverflow.com/questions/57030781/auto-hiding-scrollbar-not-showing-as-expected-with-tkinter-pack-method
"""
def set(self, low, high):
if float(low) <= 0.0 and float(high) >= 1.0:
self.pack_forget()
else:
if self.cget("orient") == tk.HORIZONTAL:
self.pack(fill=tk.X, side=tk.BOTTOM)
else:
self.pack(fill=tk.Y, side=tk.RIGHT)
tk.Scrollbar.set(self, low, high)
def grid(self, **kw):
raise tk.TclError("cannot use grid with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
|
the-stack_0_7089 | # Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory,
# National Institutes of Health Clinical Center, July 2019
"""Utilities for DeepLesion"""
import numpy as np
#from openpyxl import load_workbook
import json
from collections import Counter
#from maskrcnn.utils.miscellaneous import unique
from fcos_core.config import cfg
def gen_mask_polygon_from_recist(recist):
"""Generate ellipse from RECIST for weakly-supervised segmentation"""
x11, y11, x12, y12, x21, y21, x22, y22 = recist
axis1 = np.linalg.solve(np.array([[x11, y11], [x12, y12]]), np.array([1, 1]))
axis2 = np.linalg.solve(np.array([[x21, y21], [x22, y22]]), np.array([1, 1]))
center = np.linalg.solve(np.array([[axis1[0], axis1[1]], [axis2[0], axis2[1]]]), np.array([1, 1]))
centered_recist = recist - np.tile(center, (4,))
centered_recist = np.reshape(centered_recist, (4, 2))
pt_angles = np.arctan2(centered_recist[:, 1], centered_recist[:, 0])
pt_lens = np.sqrt(np.sum(centered_recist ** 2, axis=1))
ord = [0, 2, 1, 3, 0]
grid = .1
rotated_pts = []
for p in range(4):
# pt1 = centered_recist[ord[p]]
# pt2 = centered_recist[ord[p+1]]
if (pt_angles[ord[p]] < pt_angles[ord[p + 1]] and pt_angles[ord[p + 1]] - pt_angles[ord[p]] < np.pi) \
or (pt_angles[ord[p]] - pt_angles[ord[p + 1]] > np.pi): # counter-clockwise
angles = np.arange(0, np.pi / 2, grid)
else:
angles = np.arange(0, -np.pi / 2, -grid)
xs = np.cos(angles) * pt_lens[ord[p]]
ys = np.sin(angles) * pt_lens[ord[p + 1]]
r = pt_angles[ord[p]]
rotated_pts1 = np.matmul(np.array([[np.cos(r), -np.sin(r)], [np.sin(r), np.cos(r)]]),
np.vstack((xs, ys)))
rotated_pts.append(rotated_pts1)
rotated_pts = np.hstack(rotated_pts)
decentered_pts = rotated_pts + center.reshape((2, 1))
polygon = decentered_pts.transpose().ravel()
# for p in polygon:
# print('%.4f'%p, ',',)
# print('\n',recist)
return polygon.tolist()
def load_tag_dict_from_xlsfile(fn):
"""Load ontology"""
cellname = lambda row, col: '%s%d' % (chr(ord('A') + col - 1), row)
wb = load_workbook(fn)
sheet = wb.get_active_sheet()
tag_dicts = []
for p in range(2, sheet.max_row + 1):
ex = sheet[cellname(p, 6)].value
ex = [] if ex is None else ex.split(' | ')
parents = sheet[cellname(p, 7)].value
parents = [] if parents is None else parents.split(' | ')
children = sheet[cellname(p, 8)].value
children = [] if children is None else children.split(' | ')
tag_dict = {'id': sheet[cellname(p, 1)].value, # useless
'class': sheet[cellname(p, 2)].value,
'tag': sheet[cellname(p, 3)].value,
'synonyms': sheet[cellname(p, 4)].value.split(' | '),
'num_detected': sheet[cellname(p, 5)].value,
'exclusive': ex,
'parents': parents,
'children': children
}
tag_dicts.append(tag_dict)
return tag_dicts
def load_lesion_tags(split_file, tag_dict):
"""Load training labels for tags"""
with open(split_file, 'r') as f:
data = json.load(f)
print('loaded', split_file)
term_list = data['term_list']
num_labels = len(term_list)
prefix = 'train'
smp_idxs, labels, uncertain_labels = \
data['%s_lesion_idxs' % prefix], data['%s_relevant_labels' % prefix], \
data['%s_uncertain_labels' % prefix]
tag_dict_filtered = {idx: unique(r+u) for idx,r,u in zip(smp_idxs, labels, uncertain_labels)}
tag_list_dict = []
class_map = {t['tag']: t['class'] for t in tag_dict}
for i in range(num_labels):
tag_dict = {'ID': i, 'tag': term_list[i], 'class': class_map[term_list[i]]}
tag_list_dict.append(tag_dict)
return tag_list_dict, tag_dict_filtered
def gen_parent_list(tag_dicts, tag_list):
"""Hierarchical label relations"""
parents_map = {t['tag']: t['parents'] for t in tag_dicts}
parent_list = []
for t in tag_list:
ps = parents_map[t]
parent_list.append([tag_list.index(p) for p in ps if p in tag_list])
return parent_list
def gen_children_list(parent_list, tag_list):
"""Hierarchical label relations"""
all_children_list = [[] for _ in tag_list]
for i, parent in enumerate(parent_list):
for p1 in parent:
all_children_list[p1].append(i)
direct_children_list = [[] for _ in tag_list]
for i, children in enumerate(all_children_list):
direct_children_list[i] = [c for c in children if not any([p in children for p in parent_list[c]])]
return all_children_list, direct_children_list
def gen_tree_depth(tag_list, parent_list):
"""Hierarchical label relations"""
tag_depth = np.ones((len(tag_list),), dtype=int)
while True:
last_depth = tag_depth.copy()
for p in range(len(parent_list)):
if len(parent_list[p]) > 0:
tag_depth[p] = np.max([tag_depth[idx] for idx in parent_list[p]])+1
if np.all(last_depth == tag_depth):
break
return tag_depth
def gen_exclusive_list(tag_dicts, tag_list, parent_list, all_children_list):
"""Infer exclusive label relations according to hierarchical relations"""
exclusive_list = []
all_d_tags = [t['tag'] for t in tag_dicts]
for p in range(len(tag_list)):
idx = all_d_tags.index(tag_list[p])
exclusive_list.append([tag_list.index(ex) for ex in
tag_dicts[idx]['exclusive'] if ex in tag_list])
while True:
flag = False
for p in range(len(tag_list)):
cur_ex = exclusive_list[p]
next_ex = cur_ex[:]
for ex in cur_ex:
next_ex += all_children_list[ex]
for parent in parent_list[p]:
next_ex += exclusive_list[parent]
next_ex = unique(next_ex)
flag = flag or (set(next_ex) != set(cur_ex))
exclusive_list[p] = next_ex
if not flag:
break
return exclusive_list
|
the-stack_0_7092 | import requests
import datetime
import random
import time
import json
def get_time():
return datetime.datetime.now().strftime("%H:%M:%S %Y-%m-%d")
def get_token():
return open('token.txt', 'r', encoding='UTF-8').read()
def change_status_text(token, text):
url = 'https://discord.com/api/v9/users/@me/settings'
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": token
}
payload = {"custom_status": {"text": text}}
r = requests.patch(url, headers=headers, data=json.dumps(payload))
token = get_token()
while True:
change_status_text(token, str(get_time()))
time.sleep(0.5)
|
the-stack_0_7094 | import asyncio
import functools
import random
import time
from testing import Client
from testing import default_test_setup
from testing import gen_data
from testing import gen_points
from testing import gen_series
from testing import InsertError
from testing import PoolError
from testing import QueryError
from testing import run_test
from testing import Series
from testing import Server
from testing import ServerError
from testing import SiriDB
from testing import TestBase
from testing import UserAuthError
from testing import parse_args
class TestServer(TestBase):
title = 'Test server object'
Server.SERVER_ADDRESS = 'localhost'
Server.IP_SUPPORT = 'IPV4ONLY'
@default_test_setup(4)
async def run(self):
await self.client0.connect()
await self.db.add_pool(self.server1)
await self.assertIsRunning(self.db, self.client0, timeout=20)
await asyncio.sleep(5)
await self.client1.connect()
for port in (9010, 9011):
result = await self.client0.query(
'alter server "localhost:{}" set log_level error'.format(port))
self.assertEqual(
result.pop('success_msg'),
"Successfully set log level to 'error' on 'localhost:{}'."
.format(port))
result = await self.client1.query('list servers log_level')
self.assertEqual(result.pop('servers'), [['error'], ['error']])
result = await self.client1.query('list servers uuid')
for uuid in result.pop('servers'):
result = await self.client0.query(
'alter server {} set log_level debug'.format(uuid[0]))
result = await self.client1.query('list servers log_level')
self.assertEqual(result.pop('servers'), [['debug'], ['debug']])
result = await self.client0.query('alter servers set log_level info')
self.assertEqual(
result.pop('success_msg'),
"Successfully set log level to 'info' on 2 servers.")
result = await self.client1.query('list servers log_level')
self.assertEqual(result.pop('servers'), [['info'], ['info']])
result = await self.client1.query(
'list servers active_tasks where active_tasks == 1 and '
'idle_time >= 0 and idle_percentage <= 100')
self.assertEqual(result.pop('servers'), [[1], [1]])
result = await self.client0.query(
'alter servers where active_handles > 1 set log_level debug')
result = await self.client1.query('list servers log_level')
self.assertEqual(result.pop('servers'), [['debug'], ['debug']])
with self.assertRaisesRegex(
QueryError,
"Query error at position 42. Expecting "
"debug, info, warning, error or critical"):
await self.client0.query(
'alter server "localhost:{}" set log_level unknown')
self.client1.close()
result = await self.server1.stop()
self.assertTrue(result)
self.server1.listen_backend_port = 9111
self.server1.create()
await self.server1.start(sleep=20)
await asyncio.sleep(35)
result = await self.client0.query('list servers status')
self.assertEqual(result.pop('servers'), [['running'], ['running']])
await self.client1.connect()
result = await self.client1.query('show server')
self.assertEqual(result.pop('data'), [
{'name': 'server', 'value': 'localhost:9111'}])
await self.db.add_replica(self.server2, 1)
await self.assertIsRunning(self.db, self.client0, timeout=35)
with self.assertRaisesRegex(
QueryError,
"Cannot remove server 'localhost:9010' "
"because this is the only server for pool 0"):
await self.client1.query('drop server "localhost:9010"')
with self.assertRaisesRegex(
QueryError,
"Cannot remove server 'localhost:9012' "
"because the server is still online.*"):
await self.client1.query('drop server "localhost:9012"')
result = await self.server1.stop()
self.assertTrue(result)
result = await self.server2.stop()
self.assertTrue(result)
await self.server1.start(sleep=30)
result = await self.client1.query('show status')
self.assertEqual(result.pop('data'), [
{'name': 'status', 'value': 'running | synchronizing'}])
result = await self.client0.query('drop server "localhost:9012"')
self.assertEqual(
result.pop('success_msg'),
"Successfully dropped server 'localhost:9012'.")
self.db.servers.remove(self.server2)
time.sleep(1)
for client in (self.client0, self.client1):
result = await client.query('list servers status')
self.assertEqual(result.pop('servers'), [['running'], ['running']])
await self.db.add_replica(self.server3, 1)
await self.assertIsRunning(self.db, self.client0, timeout=35)
self.client0.close()
self.client1.close()
# return False
if __name__ == '__main__':
parse_args()
run_test(TestServer())
|
the-stack_0_7095 | """ The MIT License (MIT)
Copyright (c) 2014 Kyle Hollins Wray, University of Massachusetts
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import os.path
def print_target(f, name, sdir, odir):
""" Prints a target rule to the specified file.
Parameters:
f -- The file where the output will be printed to.
name -- The name of the target rule.
sdir -- The source directory to compile this rule.
odir -- The object directory to store the .o files.
"""
f.write(name + ': ' + sdir + '/*.cpp \n')
print_commands(f, sdir, odir)
def print_commands(f, sdir, odir):
""" Prints a set of bash comands to create an object directory (if it
doesn't exists), compile all source files in a source directory and
move the object files to the object directory.
Parameters:
f -- The file where the output will be printed to.
sdir -- The directory where the source files are stored.
odir -- The directory to where the .o files will be stored.
"""
f.write('\tmkdir -p ' + odir + ' \n' +
'\t$(CC) $(CFLAGS) -c ' + sdir + '/*.cpp \n' +
'\tmv *.o ' + odir + '\n\n')
srcdir = 'librbr/src'
objdir = 'librbr/obj'
testdir = 'librbr_tests'
coresubdir = ['states', 'actions', 'observations',
'state_transitions', 'observation_transitions',
'policy', 'rewards','agents']
f = open('Makefile', 'w')
# Test if the 'librbr/obj', 'librbr_tests/obj', and 'tmp'
# directories exist and make them if they do not.
directories = ['librbr/obj', 'librbr_tests/obj', 'librbr_tests/tmp']
for d in directories:
if not os.path.exists(d):
os.makedirs(d)
# Printing flags and directory wildcards.
f.write('CC = g++\n' +
'CFLAGS = -std=c++11 -g\n' +
'COINFLAGS = `pkg-config --cflags --libs Coin` ' +
'`pkg-config --cflags --libs clp` ' +
'`pkg-config --cflags --libs osi` ' +
'`pkg-config --libs coinutils` ' +
'`pkg-config --cflags --libs osi-clp`\n\n')
# Printing target rule for tests.
f.write('tests: all.o ' +
testdir + '/src/core/*.cpp ' +
testdir + '/src/mdp/*.cpp ' +
#testdir + '/src/ssp/*.cpp ' +
testdir + '/src/pomdp/*.cpp ' +
#testdir + '/src/dec_pomdp/*.cpp' +
testdir + '/src/management/*.cpp ' +
testdir + '/src/utilities/*.cpp\n')
f.write('\tmkdir -p ' + testdir + '/obj\n')
f.write('\t$(CC) $(CFLAGS) -c -I.. ' +
testdir + '/src/core/*.cpp ' +
testdir + '/src/mdp/*.cpp ' +
#testdir + '/src/ssp/*.cpp ' +
testdir + '/src/pomdp/*.cpp ' +
#testdir + '/src/dec_pomdp/*.cpp' +
testdir + '/src/management/*.cpp ' +
testdir + '/src/utilities/*.cpp ' +
testdir + '/src/*.cpp\n')
f.write('\t$(CC) $(CFLAGS) $(COINFLAGS) -o perform_tests ' +
objdir + '/*.o *.o\n')
f.write('\tmv *.o ' + testdir + '/obj\n\n')
# Printing target rules for all object files.
for sd in coresubdir:
print_target(f, sd + '.o', srcdir + '/core/' + sd, objdir)
print_target(f, 'core.o', srcdir + '/core', objdir)
print_target(f, 'utilities.o', srcdir + '/utilities', objdir)
print_target(f, 'management.o', srcdir + '/management', objdir)
print_target(f, 'mdp.o', srcdir + '/mdp', objdir)
print_target(f, 'ssp.o', srcdir + '/ssp', objdir)
print_target(f, 'pomdp.o', srcdir + '/pomdp', objdir)
print_target(f, 'dec_pomdp.o', srcdir + '/dec_pomdp', objdir)
f.write('make all.o: ')
for sd in coresubdir:
f.write(sd + '.o ')
f.write('core.o utilities.o management.o mdp.o ssp.o pomdp.o dec_pomdp.o\n\n')
f.close()
|
the-stack_0_7096 | import glob
import cv2
import numpy as np
import pickle
def _initialize_object_points(n_horizontal, n_vertical):
objp = np.zeros((n_horizontal * n_vertical, 3), np.float32)
objp[:, :2] = np.mgrid[0:n_horizontal, 0:n_vertical].T.reshape(-1, 2)
return objp
def get_distortion_matrix(input_path, image_dims, grid_shape=(9, 6)):
objp = _initialize_object_points(grid_shape[0], grid_shape[1])
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(input_path)
for index, file_name in enumerate(images):
img = cv2.imread(file_name)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (grid_shape[0], grid_shape[1]), None)
# If found, add object points, image points
if ret:
objpoints.append(objp)
imgpoints.append(corners)
_, mtx, dist, _, _ = cv2.calibrateCamera(objpoints, imgpoints, image_dims, None, None)
return mtx, dist
def setup_undistort(calibration_matrix_path):
distortion_matrix = pickle.load(open(calibration_matrix_path, "rb"))
mtx = distortion_matrix["mtx"]
dist = distortion_matrix["dist"]
return lambda img: cv2.undistort(img, mtx, dist, None, mtx)
|
the-stack_0_7097 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import ggrc
import ggrc.builder
import ggrc.services
import json
import random
import time
from datetime import datetime
from ggrc import db
from ggrc.models.mixins import Base
from ggrc.services.common import Resource
from integration.ggrc import TestCase
from urlparse import urlparse
from wsgiref.handlers import format_date_time
from nose.plugins.skip import SkipTest
class ServicesTestMockModel(Base, ggrc.db.Model):
__tablename__ = 'test_model'
foo = db.Column(db.String)
code = db.Column(db.String, unique=True)
# REST properties
_publish_attrs = ['modified_by_id', 'foo', 'code']
_update_attrs = ['foo', 'code']
URL_MOCK_COLLECTION = '/api/mock_resources'
URL_MOCK_RESOURCE = '/api/mock_resources/{0}'
Resource.add_to(
ggrc.app.app, URL_MOCK_COLLECTION, model_class=ServicesTestMockModel)
COLLECTION_ALLOWED = ['HEAD', 'GET', 'POST', 'OPTIONS']
RESOURCE_ALLOWED = ['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS']
class TestResource(TestCase):
def setUp(self):
super(TestResource, self).setUp()
# Explicitly create test tables
if not ServicesTestMockModel.__table__.exists(db.engine):
ServicesTestMockModel.__table__.create(db.engine)
with self.client.session_transaction() as session:
session['permissions'] = {
"__GGRC_ADMIN__": {"__GGRC_ALL__": {"contexts": [0]}}
}
def tearDown(self):
super(TestResource, self).tearDown()
# Explicitly destroy test tables
# Note: This must be after the 'super()', because the session is
# closed there. (And otherwise it might stall due to locks).
if ServicesTestMockModel.__table__.exists(db.engine):
ServicesTestMockModel.__table__.drop(db.engine)
def mock_url(self, resource=None):
if resource is not None:
return URL_MOCK_RESOURCE.format(resource)
return URL_MOCK_COLLECTION
def mock_json(self, model):
format = '%Y-%m-%dT%H:%M:%S'
updated_at = unicode(model.updated_at.strftime(format))
created_at = unicode(model.created_at.strftime(format))
return {
u'id': int(model.id),
u'selfLink': unicode(URL_MOCK_RESOURCE.format(model.id)),
u'type': unicode(model.__class__.__name__),
u'modified_by': {
u'href': u'/api/people/1',
u'id': model.modified_by_id,
u'type': 'Person',
u'context_id': None
} if model.modified_by_id is not None else None,
u'modified_by_id': int(model.modified_by_id),
u'updated_at': updated_at,
u'created_at': created_at,
u'context':
{u'id': model.context_id}
if model.context_id is not None else None,
u'foo': (unicode(model.foo) if model.foo else None),
}
def mock_model(self, id=None, modified_by_id=1, **kwarg):
if 'id' not in kwarg:
kwarg['id'] = random.randint(0, 999999999)
if 'modified_by_id' not in kwarg:
kwarg['modified_by_id'] = 1
mock = ServicesTestMockModel(**kwarg)
ggrc.db.session.add(mock)
ggrc.db.session.commit()
return mock
def http_timestamp(self, timestamp):
return format_date_time(time.mktime(timestamp.utctimetuple()))
def get_location(self, response):
"""Ignore the `http://localhost` prefix of the Location"""
return response.headers['Location'][16:]
def assertRequiredHeaders(self, response,
headers={'Content-Type': 'application/json'}):
self.assertIn('Etag', response.headers)
self.assertIn('Last-Modified', response.headers)
self.assertIn('Content-Type', response.headers)
for k, v in headers.items():
self.assertEqual(v, response.headers.get(k))
def assertAllow(self, response, allowed=None):
self.assert405(response)
self.assertIn('Allow', response.headers)
if allowed:
self.assertItemsEqual(allowed, response.headers['Allow'].split(', '))
def assertOptions(self, response, allowed):
self.assertIn('Allow', response.headers)
self.assertItemsEqual(allowed, response.headers['Allow'].split(', '))
def headers(self, *args, **kwargs):
ret = list(args)
ret.append(('X-Requested-By', 'Unit Tests'))
ret.extend(kwargs.items())
return ret
def test_X_Requested_By_required(self):
response = self.client.post(self.mock_url())
self.assert400(response)
response = self.client.put(self.mock_url() + '/1', data='blah')
self.assert400(response)
response = self.client.delete(self.mock_url() + '/1')
self.assert400(response)
def test_empty_collection_get(self):
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
def test_missing_resource_get(self):
response = self.client.get(self.mock_url('foo'), headers=self.headers())
self.assert404(response)
@SkipTest
def test_collection_get(self):
date1 = datetime(2013, 4, 17, 0, 0, 0, 0)
date2 = datetime(2013, 4, 20, 0, 0, 0, 0)
mock1 = self.mock_model(
modified_by_id=42, created_at=date1, updated_at=date1)
mock2 = self.mock_model(
modified_by_id=43, created_at=date2, updated_at=date2)
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertRequiredHeaders(
response,
{
'Last-Modified': self.http_timestamp(date2),
'Content-Type': 'application/json',
})
self.assertIn('test_model_collection', response.json)
self.assertEqual(2, len(response.json['test_model_collection']))
self.assertIn('selfLink', response.json['test_model_collection'])
self.assertIn('test_model', response.json['test_model_collection'])
collection = response.json['test_model_collection']['test_model']
self.assertEqual(2, len(collection))
self.assertDictEqual(self.mock_json(mock2), collection[0])
self.assertDictEqual(self.mock_json(mock1), collection[1])
@SkipTest
def test_resource_get(self):
date1 = datetime(2013, 4, 17, 0, 0, 0, 0)
mock1 = self.mock_model(
modified_by_id=42, created_at=date1, updated_at=date1)
response = self.client.get(self.mock_url(mock1.id), headers=self.headers())
self.assert200(response)
self.assertRequiredHeaders(
response,
{
'Last-Modified': self.http_timestamp(date1),
'Content-Type': 'application/json',
})
self.assertIn('services_test_mock_model', response.json)
self.assertDictEqual(self.mock_json(mock1),
response.json['services_test_mock_model'])
def test_collection_put(self):
self.assertAllow(
self.client.put(URL_MOCK_COLLECTION, headers=self.headers()),
COLLECTION_ALLOWED)
def test_collection_delete(self):
self.assertAllow(
self.client.delete(URL_MOCK_COLLECTION, headers=self.headers()),
COLLECTION_ALLOWED)
def test_collection_post_successful(self):
data = json.dumps(
{'services_test_mock_model': {'foo': 'bar', 'context': None}})
response = self.client.post(
URL_MOCK_COLLECTION,
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assertStatus(response, 201)
self.assertIn('Location', response.headers)
response = self.client.get(
self.get_location(response), headers=self.headers())
self.assert200(response)
self.assertIn('Content-Type', response.headers)
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertIn('services_test_mock_model', response.json)
self.assertIn('foo', response.json['services_test_mock_model'])
self.assertEqual('bar', response.json['services_test_mock_model']['foo'])
# check the collection, too
response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers())
self.assert200(response)
self.assertEqual(
1, len(response.json['test_model_collection']['test_model']))
self.assertEqual(
'bar', response.json['test_model_collection']['test_model'][0]['foo'])
def test_collection_post_successful_single_array(self):
data = json.dumps(
[{'services_test_mock_model': {'foo': 'bar', 'context': None}}])
response = self.client.post(
URL_MOCK_COLLECTION,
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
self.assertEqual(type(response.json), list)
self.assertEqual(len(response.json), 1)
response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers())
self.assert200(response)
self.assertEqual(
1, len(response.json['test_model_collection']['test_model']))
self.assertEqual(
'bar', response.json['test_model_collection']['test_model'][0]['foo'])
def test_collection_post_successful_multiple(self):
data = json.dumps([
{'services_test_mock_model': {'foo': 'bar1', 'context': None}},
{'services_test_mock_model': {'foo': 'bar2', 'context': None}},
])
response = self.client.post(
URL_MOCK_COLLECTION,
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
self.assertEqual(type(response.json), list)
self.assertEqual(len(response.json), 2)
self.assertEqual(
'bar1', response.json[0][1]['services_test_mock_model']['foo'])
self.assertEqual(
'bar2', response.json[1][1]['services_test_mock_model']['foo'])
response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers())
self.assert200(response)
self.assertEqual(
2, len(response.json['test_model_collection']['test_model']))
def test_collection_post_successful_multiple_with_errors(self):
data = json.dumps([
{'services_test_mock_model':
{'foo': 'bar1', 'code': 'f1', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar1', 'code': 'f1', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar2', 'code': 'f2', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar2', 'code': 'f2', 'context': None}},
])
response = self.client.post(
URL_MOCK_COLLECTION,
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assertEqual(403, response.status_code)
self.assertEqual([201, 403, 201, 403], [i[0] for i in response.json])
self.assertEqual(
'bar1', response.json[0][1]['services_test_mock_model']['foo'])
self.assertEqual(
'bar2', response.json[2][1]['services_test_mock_model']['foo'])
response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers())
self.assert200(response)
self.assertEqual(
2, len(response.json['test_model_collection']['test_model']))
def test_collection_post_bad_request(self):
response = self.client.post(
URL_MOCK_COLLECTION,
content_type='application/json',
data='This is most definitely not valid content.',
headers=self.headers(),
)
self.assert400(response)
def test_collection_post_bad_content_type(self):
response = self.client.post(
URL_MOCK_COLLECTION,
content_type='text/plain',
data="Doesn't matter, now does it?",
headers=self.headers(),
)
self.assertStatus(response, 415)
def test_put_successful(self):
mock = self.mock_model(foo='buzz')
response = self.client.get(self.mock_url(mock.id), headers=self.headers())
self.assert200(response)
self.assertRequiredHeaders(response)
obj = response.json
self.assertEqual('buzz', obj['services_test_mock_model']['foo'])
obj['services_test_mock_model']['foo'] = 'baz'
url = urlparse(obj['services_test_mock_model']['selfLink']).path
original_headers = dict(response.headers)
# wait a moment so that we can be sure to get differing Last-Modified
# after the put - the lack of latency means it's easy to end up with
# the same HTTP timestamp thanks to the standard's lack of precision.
time.sleep(1.1)
response = self.client.put(
url,
data=json.dumps(obj),
headers=self.headers(
('If-Unmodified-Since', original_headers['Last-Modified']),
('If-Match', original_headers['Etag']),
),
content_type='application/json',
)
self.assert200(response)
response = self.client.get(url, headers=self.headers())
self.assert200(response)
self.assertNotEqual(
original_headers['Last-Modified'], response.headers['Last-Modified'])
self.assertNotEqual(
original_headers['Etag'], response.headers['Etag'])
self.assertEqual('baz', response.json['services_test_mock_model']['foo'])
def test_put_bad_request(self):
mock = self.mock_model(foo='tough')
response = self.client.get(self.mock_url(mock.id), headers=self.headers())
self.assert200(response)
self.assertRequiredHeaders(response)
url = urlparse(response.json['services_test_mock_model']['selfLink']).path
response = self.client.put(
url,
content_type='application/json',
data='This is most definitely not valid content.',
headers=self.headers(
('If-Unmodified-Since', response.headers['Last-Modified']),
('If-Match', response.headers['Etag']))
)
self.assert400(response)
@SkipTest
def test_put_and_delete_conflict(self):
mock = self.mock_model(foo='mudder')
response = self.client.get(self.mock_url(mock.id), headers=self.headers())
self.assert200(response)
self.assertRequiredHeaders(response)
obj = response.json
obj['services_test_mock_model']['foo'] = 'rocks'
mock = ggrc.db.session.query(ServicesTestMockModel).filter(
ServicesTestMockModel.id == mock.id).one()
mock.foo = 'dirt'
ggrc.db.session.add(mock)
ggrc.db.session.commit()
url = urlparse(obj['services_test_mock_model']['selfLink']).path
original_headers = dict(response.headers)
response = self.client.put(
url,
data=json.dumps(obj),
headers=self.headers(
('If-Unmodified-Since', original_headers['Last-Modified']),
('If-Match', original_headers['Etag'])
),
content_type='application/json',
)
self.assertStatus(response, 409)
response = self.client.delete(
url,
headers=self.headers(
('If-Unmodified-Since', original_headers['Last-Modified']),
('If-Match', original_headers['Etag'])
),
content_type='application/json',
)
self.assertStatus(response, 409)
@SkipTest
def test_put_and_delete_missing_precondition(self):
mock = self.mock_model(foo='tricky')
response = self.client.get(self.mock_url(mock.id), headers=self.headers())
self.assert200(response)
obj = response.json
obj['services_test_mock_model']['foo'] = 'strings'
url = urlparse(obj['services_test_mock_model']['selfLink']).path
response = self.client.put(
url,
data=json.dumps(obj),
content_type='application/json',
headers=self.headers(),
)
self.assertStatus(response, 428)
response = self.client.delete(url, headers=self.headers())
self.assertStatus(response, 428)
@SkipTest
def test_delete_successful(self):
mock = self.mock_model(foo='delete me')
response = self.client.get(self.mock_url(mock.id), headers=self.headers())
self.assert200(response)
url = urlparse(response.json['services_test_mock_model']['selfLink']).path
response = self.client.delete(
url,
headers=self.headers(
('If-Unmodified-Since', response.headers['Last-Modified']),
('If-Match', response.headers['Etag']),
),
)
self.assert200(response)
response = self.client.get(url, headers=self.headers())
# 410 would be nice! But, requires a tombstone.
self.assert404(response)
def test_options(self):
mock = self.mock_model()
response = self.client.open(
self.mock_url(mock.id), method='OPTIONS', headers=self.headers())
self.assertOptions(response, RESOURCE_ALLOWED)
def test_collection_options(self):
response = self.client.open(
self.mock_url(), method='OPTIONS', headers=self.headers())
self.assertOptions(response, COLLECTION_ALLOWED)
def test_get_bad_accept(self):
mock1 = self.mock_model(foo='baz')
response = self.client.get(
self.mock_url(mock1.id),
headers=self.headers(('Accept', 'text/plain')))
self.assertStatus(response, 406)
self.assertEqual('text/plain', response.headers.get('Content-Type'))
self.assertEqual('application/json', response.data)
def test_collection_get_bad_accept(self):
response = self.client.get(
URL_MOCK_COLLECTION,
headers=self.headers(('Accept', 'text/plain')))
self.assertStatus(response, 406)
self.assertEqual('text/plain', response.headers.get('Content-Type'))
self.assertEqual('application/json', response.data)
def test_get_if_none_match(self):
mock1 = self.mock_model(foo='baz')
response = self.client.get(
self.mock_url(mock1.id),
headers=self.headers(('Accept', 'application/json')))
self.assert200(response)
previous_headers = dict(response.headers)
response = self.client.get(
self.mock_url(mock1.id),
headers=self.headers(
('Accept', 'application/json'),
('If-None-Match', previous_headers['Etag']),
),
)
self.assertStatus(response, 304)
self.assertIn('Etag', response.headers)
@SkipTest
def test_collection_get_if_non_match(self):
self.mock_model(foo='baz')
response = self.client.get(
URL_MOCK_COLLECTION,
headers=self.headers(('Accept', 'application/json')))
self.assert200(response)
previous_headers = dict(response.headers)
response = self.client.get(
URL_MOCK_COLLECTION,
headers=self.headers(
('Accept', 'application/json'),
('If-None-Match', previous_headers['Etag']),
),
)
self.assertStatus(response, 304)
self.assertIn('Etag', response.headers)
|
the-stack_0_7099 | from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import gridspec
class Plotter:
def __init__(self, target, glide_angle_deg, bounds_radius_km, target_spawn_area_radius_km,
target_radius_km, aircraft_initial_position, runway_angle=90):
self.target_position = target
self.bounds_radius_km = bounds_radius_km
self.target_spawn_area_radius_km = target_spawn_area_radius_km
self.target_radius_km = target_radius_km
self.runway_angle_deg = runway_angle
self.aircraft_initial_position = aircraft_initial_position
self.glide_angle_deg = glide_angle_deg
print("#### Plotter ####")
print("target_position", self.target_position)
print("bounds_radius_km", self.bounds_radius_km)
print("target_spawn_area_radius_km", self.target_spawn_area_radius_km)
print("target_radius_km", self.target_radius_km)
print("runway_angle_deg", self.runway_angle_deg)
print("aircraft_initial_position", self.aircraft_initial_position)
print("glide_angle_deg", self.glide_angle_deg)
def render_rgb_array_simple(self, infos) -> np.array:
xs = []
ys = []
in_area = []
in_area_colors = []
for info in infos:
xs.append(info["aircraft_y"])
ys.append(info["aircraft_x"])
in_area.append(info["in_area"])
if info["in_area"] == True:
in_area_colors.append([255, 0, 0])
else:
in_area_colors.append([0, 0, 255])
figure = plt.figure(figsize=[10,9])
canvas = FigureCanvas(figure)
ax = plt.subplot()
ax.set_xlabel('x')
ax.set_ylabel('y')
lim_scale = 2
ax.set_xlim([-lim_scale*self.bounds_radius_km + self.aircraft_initial_position.x,
lim_scale*self.bounds_radius_km + self.aircraft_initial_position.x])
ax.set_ylim([-lim_scale*self.bounds_radius_km - self.aircraft_initial_position.y,
lim_scale*self.bounds_radius_km + self.aircraft_initial_position.y])
bounds = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y),
self.bounds_radius_km, fill=False, color='red')
target = plt.Circle((self.target_position.x + self.aircraft_initial_position.x,
self.target_position.y + self.aircraft_initial_position.y),
self.target_radius_km, fill=False, color='green')
target_spawn_area = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y),
self.target_spawn_area_radius_km, fill=False, color='grey')
ax.set_aspect(1)
ax.add_artist(bounds)
ax.add_artist(target)
ax.add_artist(target_spawn_area)
ax.scatter(xs, ys, c=np.array(in_area)/255.0, s=0.1)
canvas.draw()
rendered = np.array(canvas.renderer.buffer_rgba())
plt.close('all')
return rendered
def render_rgb_array(self, infos) -> np.array:
xs = []
ys = []
track_angles = []
rewards = []
time_steps = []
runway_angles = []
runway_angle_errors = []
runway_angle_thresholds = []
aircraft_true_headings = []
track_errors = []
vertical_track_errors = []
cross_track_errors = []
pitches = []
gammas = []
alphas = []
altitude_rates_fps = []
altitudes = []
altitude_errors = []
aircraft_zs = []
in_area = []
winds_north_fps = []
winds_east_fps = []
drifts = []
in_area_colors = []
for info in infos:
xs.append(info["aircraft_y"])
ys.append(info["aircraft_x"])
track_angles.append(info["aircraft_track_angle_deg"])
drifts.append(info["drift_deg"])
aircraft_true_headings.append(info["aircraft_heading_true_deg"])
rewards.append(info["reward"])
winds_north_fps.append(info["total_wind_north_fps"])
winds_east_fps.append(info["total_wind_east_fps"])
altitude_rates_fps.append(info["altitude_rate_fps"])
runway_angle_errors.append(info["runway_angle_error"])
runway_angle_thresholds.append(info["runway_angle_threshold_deg"])
time_steps.append(info["simulation_time_step"])
runway_angles.append(info["runway_angle"])
altitudes.append(info["altitude"])
pitches.append(np.degrees(info["pitch_rad"]))
gammas.append(info["gamma_deg"])
alphas.append(np.degrees(info["alpha_rad"]))
aircraft_zs.append(info["aircraft_z"])
altitude_errors.append(info["altitude_error"])
track_errors.append(info["track_error"])
vertical_track_errors.append(info["vertical_track_error"])
cross_track_errors.append(info["cross_track_error"])
in_area.append(info["in_area"])
if info["in_area"] == True:
in_area_colors.append([255, 0, 0])
else:
in_area_colors.append([0, 0, 255])
# current_time_step = len(rewards)
#
figure = plt.figure(figsize=[10,9])
gs = gridspec.GridSpec(4, 2, width_ratios=[2, 2])
canvas = FigureCanvas(figure)
ax1 = plt.subplot(gs[0])
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax2 = plt.subplot(gs[2])
ax2.set_xlabel('reward')
ax3 = plt.subplot(gs[3])
ax3.set_xlabel('track error')
ax4 = plt.subplot(gs[1])
ax4.set_axis_off()
ax5 = plt.subplot(gs[4])
ax5.set_xlabel('altitude (ft)')
ax6 = plt.subplot(gs[5])
ax6.set_xlabel('wind east & north (fps)')
ax1.set_xlim([-self.bounds_radius_km + self.aircraft_initial_position.x,
self.bounds_radius_km + self.aircraft_initial_position.x])
ax1.set_ylim([-self.bounds_radius_km + self.aircraft_initial_position.y,
self.bounds_radius_km + self.aircraft_initial_position.y])
bounds = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y),
self.bounds_radius_km, fill=False, color='red')
target = plt.Circle((self.target_position.x + self.aircraft_initial_position.x,
self.target_position.y + self.aircraft_initial_position.y),
self.target_radius_km, fill=False, color='green')
target_spawn_area = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y),
self.target_spawn_area_radius_km, fill=False, color='grey')
text = plt.Text(x=0, y=0, text=f'angle error: {np.round(runway_angle_errors[-1], 2)},'
f'runway_angle: {np.round(self.runway_angle_deg, 2)},'
f'altitude error: {np.round(altitude_errors[-1], 2)} \n'
f'wind north: {np.round(winds_north_fps[-1], 2)}, '
f'wind east: {np.round(winds_east_fps[-1], 2)} \n'
f'track angle: {np.round(track_errors[-1], 2)} \n'
f'drift angle: {np.round(drifts[-1], 2)} \n'
f'rewards {np.round(np.sum(rewards), 2)}')
ax1.set_aspect(1)
ax1.add_artist(bounds)
ax1.add_artist(target)
ax1.add_artist(target_spawn_area)
ax4.add_artist(text)
# See https://stackoverflow.com/questions/33287156/specify-color-of-each-point-in-scatter-plot-matplotlib
ax1.scatter(xs, ys, c=np.array(in_area)/255.0, s=0.1)
ax2.plot(time_steps, rewards, c='red')
ax3.plot(time_steps, track_errors)
ax3.plot(time_steps, cross_track_errors)
ax3.plot(time_steps, vertical_track_errors)
ax3.legend(["track", "cross", "vertical"])
ax5.plot(time_steps, altitudes)
ax6.plot(time_steps, winds_east_fps)
ax6.plot(time_steps, winds_north_fps)
ax6.legend(["wind east", "wind north"])
canvas.draw()
rendered = np.array(canvas.renderer.buffer_rgba())
plt.close('all')
return rendered
def plot_html(self, infos, path="./htmls/test.html"):
xs = []
ys = []
track_angles = []
rewards = []
time_steps = []
runway_angles = []
runway_angle_errors = []
runway_angle_thresholds = []
aircraft_true_headings = []
track_errors = []
altitude_rates_fps = []
altitudes = []
altitude_errors = []
aircraft_zs = []
in_area = []
in_area_colors = []
for info in infos:
xs.append(info["aircraft_y"])
ys.append(info["aircraft_x"])
track_angles.append(info["aircraft_track_angle_deg"])
aircraft_true_headings.append(info["aircraft_heading_true_deg"])
rewards.append(info["reward"])
altitude_rates_fps.append(info["altitude_rate_fps"])
runway_angle_errors.append(info["runway_angle_error"])
runway_angle_thresholds.append(info["runway_angle_threshold_deg"])
time_steps.append(info["simulation_time_step"])
runway_angles.append(info["runway_angle"])
altitudes.append(info["altitude"])
aircraft_zs.append(info["aircraft_z"])
altitude_errors.append(info["altitude_error"])
track_errors.append(info["track_error"])
in_area.append(info["in_area"])
if info["in_area"] == True:
in_area_colors.append([255, 0, 0])
else:
in_area_colors.append([0, 0, 255])
fig = make_subplots(
rows=3,
cols=2,
column_widths=[0.6, 0.4],
row_heights=[0.6, 0.4, 0.6],
specs=[[{"type": "scatter3d", "rowspan": 2}, {"type": "scatter"}],
[None, {"type": "scatter"}],
[{"type": "scatter"}, {"type": "scatter"}]]
)
fig.add_trace(
go.Scatter3d(
x=xs,
y=ys,
z=aircraft_zs,
customdata=time_steps,
hovertemplate='x: %{x}' + '<br>y: %{y}<br>' + 'altitude: %{z}<br>' + 'time: %{customdata} s<br>',
mode='markers',
marker=dict(
size=2,
color=aircraft_zs, # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=1
)
),
row=1, col=[1,2]
)
fig.write_html(path)
|
the-stack_0_7100 | import sys
import os
import crawler
import parser
import json
import datetime
if('COURT' in os.environ):
court = os.environ['COURT']
else:
sys.stderr.write("Invalid arguments, missing parameter: 'COURT'.\n")
os._exit(1)
if('YEAR' in os.environ):
year = os.environ['YEAR']
else:
sys.stderr.write("Invalid arguments, missing parameter: 'YEAR'.\n")
os._exit(1)
if('MONTH' in os.environ):
month = os.environ['MONTH']
month = month.zfill(2)
else:
sys.stderr.write("Invalid arguments, missing parameter: 'MONTH'.\n")
os._exit(1)
if('DRIVER_PATH' in os.environ):
driver_path = os.environ['DRIVER_PATH']
else:
sys.stderr.write("Invalid arguments, missing parameter: 'DRIVER_PATH'.\n")
os._exit(1)
if('OUTPUT_FOLDER' in os.environ):
output_path = os.environ['OUTPUT_FOLDER']
else:
output_path = "/output"
if('GIT_COMMIT' in os.environ):
crawler_version = os.environ['GIT_COMMIT']
else:
sys.stderr.write("crawler_version cannot be empty")
os._exit(1)
now = datetime.datetime.now()
current_year = now.year
current_month = now.month
# Main execution
def main():
file_names = crawler.crawl(court, year, month, driver_path, output_path)
employees = parser.parse(file_names)
cr = {
'aid': court.lower(),
'month': month,
'year': year,
'files': file_names,
'crawler': {
'id': court.lower(),
'version': crawler_version,
},
'employees': employees,
# https://hackernoon.com/today-i-learned-dealing-with-json-datetime-when-unmarshal-in-golang-4b281444fb67
'timestamp': now.astimezone().replace(microsecond=0).isoformat(),
}
print(json.dumps({'cr': cr}, ensure_ascii=False))
if __name__ == '__main__':
main()
|
the-stack_0_7105 | # read_numbers.py
#
# Sample program to read numbers from a file, count them and sum them.
# Assumes each line in the file contains a valid number.
# CSC 110
# Winter 2012
# open the file 'numbers.txt' for reading
infile = open('numbers.txt', 'r')
total = 0 # initialization
count = 0 # initialization
line = infile.readline() # read in first line (initialization)
# as long as 'line' isn't an empty string,
# we haven't reached the end of the file
while line != '':
value = float(line) # convert from string to number
print(value)
total += value
count += 1
line = infile.readline() # this is the update -- read another line
infile.close() # close the connection to the file
print('There were ' + str(count) + ' numbers, totaling ' + str(total))
|
the-stack_0_7106 | """."""
import networkx as nx
from regraph import Rule
from regraph import NXHierarchy, NXGraph
# from regraph import print_graph
# from regraph import (HierarchyError)
import regraph.primitives as prim
class TestRelations(object):
def __init__(self):
hierarchy = NXHierarchy()
base = NXGraph()
prim.add_nodes_from(base, [
("circle", {"a": {1, 2, 3}}),
("square", {"b": {1, 2, 3}})
])
prim.add_edges_from(base, [
("circle", "circle"),
("square", "square"),
("circle", "square", {"c": {5, 6, 7}}),
("square", "circle")
])
hierarchy.add_graph("base", base)
a1 = NXGraph()
prim.add_nodes_from(a1, [
("black_circle", {"a": {1}}),
("white_circle", {"a": {2}}),
("black_square", {"b": {1}}),
("white_square", {"b": {1}})
])
prim.add_edges_from(a1, [
("white_circle", "white_circle"),
("white_circle", "white_square", {"c": {5}}),
("black_circle", "black_square"),
("black_square", "white_square"),
("black_circle", "white_square", {"c": {6}})
])
hierarchy.add_graph("a1", a1)
hierarchy.add_typing(
"a1", "base",
{
"black_circle": "circle",
"white_circle": "circle",
"white_square": "square",
"black_square": "square"
}
)
a2 = NXGraph()
prim.add_nodes_from(a2, [
("right_circle", {"a": {1, 2}}),
("middle_square", {"b": {1}}),
("left_circle", {"a": 1})
])
prim.add_edges_from(a2, [
("right_circle", "middle_square", {"c": {5, 6, 7}}),
("left_circle", "middle_square", {"c": {6, 7}})
])
hierarchy.add_graph("a2", a2)
hierarchy.add_typing(
"a2", "base",
{
"right_circle": "circle",
"middle_square": "square",
"left_circle": "circle"
}
)
self.hierarchy = hierarchy
def test_add_relation(self):
self.hierarchy.add_relation(
"a2", "a1",
{
"right_circle": {"white_circle", "black_circle"},
"middle_square": "white_square",
"left_circle": "black_circle"
},
{"name": "Some relation"})
g, l, r = self.hierarchy.relation_to_span(
"a1", "a2", edges=True, attrs=True)
# print_graph(g)
# print(l)
# print(r)
# print(self.hierarchy)
# self.hierarchy.remove_graph("a1")
# print(self.hierarchy.relation)
lhs = NXGraph()
lhs.add_nodes_from(["s", "c"])
rule = Rule.from_transform(lhs)
rule.inject_clone_node("s")
# instances = self.hierarchy.find_matching(
# "base",
# rule.lhs
# )
self.hierarchy.rewrite(
"base", rule, {"s": "square", "c": "circle"})
# g, l, r = new_hierarchy.relation_to_span("a1", "a2")
# print_graph(g)
# print(l)
# print(r)
|
the-stack_0_7107 | # Run Validation test. Use functions to test run and get output
import util
import time
def create_service(nspc, image):
port = "-p 80/http"
fullName = util.rioRun(nspc, port, image)
return fullName
def stage_service(image, fullName, version):
util.rioStage(image, fullName, version)
return
def get_app_info(fullName, field):
time.sleep(10)
inspect = util.rioInspect(fullName, field)
return inspect
def get_version_endpoint(fullName, version):
fullNameVersion = (f"{fullName}:{version}")
time.sleep(10)
endpoint = "status.endpoints[0]"
print(f"{fullNameVersion}")
inspect = util.rioInspect(fullNameVersion, endpoint)
return inspect
def test_rio_app_endpoint(nspc):
image = "ibuildthecloud/demo:v1"
image2 = "ibuildthecloud/demo:v3"
fullName = create_service(nspc, image)
stage_service(image2, fullName, "v3")
appEndpoint = get_app_info(fullName, "status.endpoints[0]")
results = util.run(f"curl {appEndpoint}")
print(f"{results}")
assert results == 'Hello World'
def test_rio_svc_endpoint1(nspc):
image = "ibuildthecloud/demo:v1"
image2 = "ibuildthecloud/demo:v3"
fullName = create_service(nspc, image)
stage_service(image2, fullName, "v3")
svcEndpoint = get_version_endpoint(fullName, "v0")
svcEndpoint2 = get_version_endpoint(fullName, "v3")
results1 = util.run(f"curl {svcEndpoint}")
results2 = util.run(f'curl {svcEndpoint2}')
print(f"{results1}")
assert results1 == 'Hello World'
assert results2 == 'Hello World v3'
|
the-stack_0_7110 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'AdDestinationTypeEnum',
},
)
class AdDestinationTypeEnum(proto.Message):
r"""Container for enumeration of Google Ads destination types.
"""
class AdDestinationType(proto.Enum):
r"""Enumerates Google Ads destination types"""
UNSPECIFIED = 0
UNKNOWN = 1
NOT_APPLICABLE = 2
WEBSITE = 3
APP_DEEP_LINK = 4
APP_STORE = 5
PHONE_CALL = 6
MAP_DIRECTIONS = 7
LOCATION_LISTING = 8
MESSAGE = 9
LEAD_FORM = 10
YOUTUBE = 11
UNMODELED_FOR_CONVERSIONS = 12
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_7111 | import numpy
import theano
import theano.tensor as tensor
from nmt import RNNsearch
from binmt import BiRNNsearch
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import tools
from layer import LayerFactory
from config import *
from optimizer import adadelta, SGD, adam, adam_slowstart
from data import DataCollection, getbatch
from mrt_utils import getMRTBatch
import cPickle
import json
import argparse
import signal
import time
import datetime
import logging
import types
parser = argparse.ArgumentParser("the script for training the NMT model")
parser.add_argument('-c', '--config', help = 'path to configuration file', required = True)
parser.add_argument('--debug', action = 'store_true', help = 'set verbose level for debugging')
parser.add_argument('--map', help = 'path to the mapping file')
parser.add_argument('--save-all', action = 'store_true', help = 'save all intermediate models')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level = logging.DEBUG,
format = '[%(asctime)s %(levelname)s] %(message)s',
datefmt = '%d %b %H:%M:%S')
logging.debug('training with debug info')
else:
logging.basicConfig(level = logging.INFO,
format = '[%(asctime)s %(levelname)s] %(message)s',
datefmt = '%d %b %H:%M:%S')
if __name__ == '__main__':
# initialize config
config = config()
if args.config:
config = update_config(config, load_config(open(args.config, 'r').read()))
print_config(config)
if config['MRT']:
config['batchsize'] = 1 # the mini-batch size must be 1 for MRT
mapping = None
if args.map:
mapping = cPickle.load(open(args.map, 'r'))
logging.info('STEP 2: Training')
# prepare data
logging.info('STEP 2.1: Loading training data')
data = DataCollection(config)
logging.info('Done!\n')
# build model
logging.info('STEP 2.2: Building model')
model = eval(config['model'])(config)
model.build()
logging.info('Done!\n')
logging.info('STEP 2.3: Building optimizer')
trainer = eval(config['optimizer'])(config, model.creater.params)
update_grads, update_params = trainer.build(model.cost, model.inputs)
logging.info('Done!\n')
# load checkpoint
logging.info('STEP 2.4: Loading checkpoint')
data.load_status(config['checkpoint_status'])
model.load(config['checkpoint_model'])
logging.info('Done!\n')
# train
logging.info('STEP 2.5: Online training')
while data.num_iter < config['max_iter']:
try:
st = time.time()
data.num_iter += 1
trainx, trainy = data.next()
x, xmask, y, ymask = getbatch(trainx, trainy, config)
if 'MRT' in config and config['MRT'] is True:
x, xmask, y, ymask, MRTLoss = getMRTBatch(x, xmask, y, ymask, config, model, data)
if config['semi_learning']:
xm, ym = data.next_mono()
xm, xmask, ym, ymask = getbatch(xm, ym, config)
x, xmask, y, ymask, valid = model.get_inputs_batch(x, y, xm, ym)
# saving checkpoint
if data.num_iter % config['checkpoint_freq'] == 0:
model.save(config['checkpoint_model'], data = data, mapping = mapping)
data.save_status(config['checkpoint_status'])
# saving and validating intermediate models
if config['save']:
if data.num_iter % config['save_freq'] == 0:
if args.save_all:
logging.info('Saving an intermediate model')
model.save(config['save_path'] + '/model_iter' + str(data.num_iter) + '.npz', data = data, mapping = mapping)
logging.info('Validating the model at iteration ' + str(data.num_iter))
output_path = config['valid_dir'] + '/iter_' + str(data.num_iter) + '.trans'
valid_input = open(config['valid_src'], 'r')
valid_output = open(output_path, 'w')
line = valid_input.readline()
valid_num = 0
# translating
while line != '':
line = line.strip()
result = model.translate(data.toindex_source(line.split(' ')))
print >> valid_output, data.print_target(numpy.asarray(result))
valid_num += 1
if valid_num % 100 == 0:
logging.info('%d sentences translated' % valid_num)
line = valid_input.readline()
valid_output.close()
valid_refs = tools.get_ref_files(config['valid_ref'])
# logging
data.valid_result[data.num_iter] = 100 * tools.bleu_file(output_path, valid_refs)
data.valid_time[data.num_iter] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
f = open('log', 'w')
f.write(data.print_log())
f.close()
data.print_valid()
logging.info('Done!\n')
# update the best model
if data.last_improved(last = True) == 0:
model.save(config['save_path'] + '/model_best.npz', data = data, mapping = mapping)
if data.last_improved() >= config['try_iter']:
logging.info('No improvement for %d iterations. Stop training.\n' % data.last_improved())
break
# updating gradients
upst = time.time()
if 'MRT' in config and config['MRT'] is True:
cost, grad_norm = update_grads(x, xmask, y, ymask, MRTLoss)
elif config['semi_learning']:
cost, grad_norm = update_grads(x, xmask, y, ymask, y, ymask, x, xmask, valid)
else:
cost, grad_norm = update_grads(x, xmask, y, ymask)
# NaN processing
if numpy.isinf(cost.mean()) or numpy.isnan(cost.mean()):
logging.warning('There is an NaN!')
update_params()
ed = time.time()
data.time += ed - st
data.updatetime += ed - upst
data.train_cost.append(cost.mean())
logging.debug('iteration %d: cost = %.4f, grad_norm = %.3e,' % (data.num_iter, cost.mean(), grad_norm)+
' iter_time = %.3f, total_time: %s' % (ed - st, tools.print_time(data.time)))
except KeyboardInterrupt:
logging.info('\nStop training by keyboard interruption.')
break
# save checkpoint
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
logging.info('Saving model and status\n')
model.save(config['checkpoint_model'], data = data, mapping = mapping)
data.save_status(config['checkpoint_status'])
logging.info('The training is completed.\n')
signal.signal(signal.SIGINT, s)
|
the-stack_0_7112 | # Copyright (c) 2020 Portworx
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import base64
import os
from kubernetes import client, config
from openstorage import api_pb2
from openstorage import api_pb2_grpc
# Environment Variables
ENDPOINT_ENV_KEY = 'OPENSTORAGE_SDK_ENDPOINT'
SECURE_ENV_KEY = 'OPENSTORAGE_SDK_SECURE'
TOKEN_ENV_KEY = 'OPENSTORAGE_SDK_TOKEN'
CAFILE_ENV_KEY = 'OPENSTORAGE_SDK_CAFILE'
SECRET_NAME_ENV_KEY = 'OPENSTORAGE_SDK_SECRET_NAME'
SECRET_NAMESPACE_ENV_KEY = 'OPENSTORAGE_SDK_SECRET_NAMESPACE'
class Connector(object):
"""
Connects to OpenStorage SDK server.
Manages connection and setup of the gRPC when using tokens and TLS.
The token may be passed in or fetched from a Kubernetes secret.
"""
def __init__(self, endpoint='', secure=False, token='', cafile='',
token_secret_namespace='', token_secret_name=''):
"""
:param endpoint: gRPC endpoint to OpenStorage SDK server
:type endpoint: str
:param secure: use TLS for the connection
:type secure: bool
:param token: OpenStorage Auth token
:type token: str
:param cafile: Path to CA file if required for TLS. If not provided
and 'secure' is enabled, then the CA must be part of the host.
:type cafile: str
:param token_secret_name: Name of the Kubernetes secret containing
the OpenStorage Auth token
:type token_secret_name: str
:param token_secret_namespace: Name of the namespace in Kubernetes
containing the secret object with the OpenStorage Auth token
:type token_secret_namespace: str
"""
self.endpoint = endpoint
self.secure = secure
self.token = token
self.cafile = cafile
self.token_secret_name = token_secret_name
self.token_secret_namespace = token_secret_namespace
# Overrite settings using environment
self._from_environment()
if self.endpoint == '':
raise Exception('Endpoint not provided')
# Check if secret must be fetched from Kubernetes
if self._use_k8s_secret():
self.token = self._get_kubernetes_secret()
def connect(self, opts=None):
"""
Connect to server
:param opts:gRPC channel options if any
:return: A gRPC channel
"""
if self._is_secure():
return self._get_secure_channel(opts)
elif self._is_authenticated():
return self._get_auth_insecure_channel(opts)
return grpc.insecure_channel(self.endpoint, opts)
def _from_environment(self):
e = os.getenv(ENDPOINT_ENV_KEY)
if e:
self.endpoint = e
e = os.getenv(SECURE_ENV_KEY)
if e:
self.secure = e.lower() in ['true', '1', 't', 'y', 'yes']
e = os.getenv(TOKEN_ENV_KEY)
if e:
self.token = e
e = os.getenv(CAFILE_ENV_KEY)
if e:
self.cafile = e
e = os.getenv(SECRET_NAME_ENV_KEY)
if e:
self.token_secret_name = e
e = os.getenv(SECRET_NAMESPACE_ENV_KEY)
if e:
self.token_secret_namespace = e
def _use_k8s_secret(self):
return self.token_secret_name != '' and self.token_secret_namespace != ''
def _is_secure(self):
return self.secure or self.cafile != ''
def _is_authenticated(self):
return self.token != ''
def _get_kubernetes_secret(self):
config.load_kube_config()
v1 = client.CoreV1Api()
ret = v1.read_namespaced_secret(self.token_secret_name, self.token_secret_namespace)
return str(base64.b64decode(ret.data['auth-token']), "utf-8")
def _get_secure_channel(self, opts=None):
# Setup CA if any
with open(self.cafile, 'rb') as f:
capem = f.read()
creds = grpc.ssl_channel_credentials(root_certificates=capem)
# Setup authentication if any
if self._is_authenticated():
auth = grpc.access_token_call_credentials(self.token)
return grpc.secure_channel(self.endpoint, grpc.composite_channel_credentials(creds, auth), opts)
return grpc.secure_channel(self.endpoint, creds, opts)
def _get_auth_insecure_channel(self, opts=None):
channel = grpc.insecure_channel(self.endpoint, opts)
return grpc.intercept_channel(channel, TokenAuthentication(self.token))
class TokenAuthentication(grpc.UnaryUnaryClientInterceptor):
"""
gRPC interceptor which allows authentication to a non-TLS server
"""
def __init__(self, token):
self.token = token
def intercept_unary_unary(self, continuation, client_call_details, request):
try:
client_call_details.metadata.append(("authorization", "bearer "+self.token))
except AttributeError:
md = []
md.append(("authorization", "bearer "+self.token))
client_call_details = client_call_details._replace(metadata=md)
return continuation(client_call_details, request)
|
the-stack_0_7113 | """ nftfw - Geoip2 support
Requires python3-geoip2 and geoupupdate packages
and a license from MaxMind
https://dev.maxmind.com/geoip/geoip2/geolite2/
"""
import os.path
class GeoIPCountry:
"""Lookup ip addresses in geoip2 """
# Set up reader
countryreader = None
# Country database
country = '/var/lib/GeoIP/GeoLite2-Country.mmdb'
# Errors
# pylint: disable=invalid-name
AddressNotFoundError = None
InvalidDatabaseError = None
def __init__(self):
"""Check geoip2 availability
See if the country database file can be found
"""
# geoip2 may not be installed
# but pylint will complain on bullseye with import-outside-toplevel
# if the disable code is installed, pylint will complain on buster
# about the disable code below (now deactivated)
# pylint argument disable=import-outside-toplevel
# All this is to allow the system to run when geoip2 is not installed
# so we don't insist on it
try:
from geoip2.database import Reader
from geoip2.errors import AddressNotFoundError
from maxminddb import InvalidDatabaseError
self.AddressNotFoundError = AddressNotFoundError
self.InvalidDatabaseError = InvalidDatabaseError
if os.path.exists(self.country):
self.countryreader = Reader(self.country)
except ImportError:
return
def isinstalled(self):
"""Return True if we have a reader """
return self.countryreader is not None
def lookup(self, ip):
"""Lookup an ip in the geoip2 database
Parameters
----------
ip : str
Ip to lookup
Returns
-------
tuple (name, iso)
name : str
Country name
None if no reader
or no result
iso : str
Two character ISO code for the country
"""
# pylint: disable=no-member
if self.countryreader is None:
return(None, None)
# remove any mask from ip
if ip[-3] == '/':
ip = ip[0:-3]
elif ip[-2] == '/':
ip = ip[0:-2]
try:
cn = self.countryreader.country(ip)
iso = None
cname = None
if cn.country.iso_code:
iso = cn.country.iso_code
if cn.country.name:
cname = cn.country.name
return(cname, iso)
except (ValueError, AttributeError, self.AddressNotFoundError, self.InvalidDatabaseError):
return(None, None)
|
the-stack_0_7114 | """
coast - Plot land and water.
"""
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
args_in_kwargs,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@use_alias(
R="region",
J="projection",
A="area_thresh",
C="lakes",
B="frame",
D="resolution",
E="dcw",
I="rivers",
L="map_scale",
N="borders",
W="shorelines",
G="land",
S="water",
U="timestamp",
V="verbose",
X="xshift",
Y="yshift",
c="panel",
p="perspective",
t="transparency",
)
@kwargs_to_strings(R="sequence", c="sequence_comma", p="sequence")
def coast(self, **kwargs):
r"""
Plot continents, shorelines, rivers, and borders on maps
Plots grayshaded, colored, or textured land-masses [or water-masses] on
maps and [optionally] draws coastlines, rivers, and political
boundaries. Alternatively, it can (1) issue clip paths that will
contain all land or all water areas, or (2) dump the data to an ASCII
table. The data files come in 5 different resolutions: (**f**)ull,
(**h**)igh, (**i**)ntermediate, (**l**)ow, and (**c**)rude. The full
resolution files amount to more than 55 Mb of data and provide great
detail; for maps of larger geographical extent it is more economical to
use one of the other resolutions. If the user selects to paint the
land-areas and does not specify fill of water-areas then the latter
will be transparent (i.e., earlier graphics drawn in those areas will
not be overwritten). Likewise, if the water-areas are painted and no
land fill is set then the land-areas will be transparent.
A map projection must be supplied.
Full option list at :gmt-docs:`coast.html`
{aliases}
Parameters
----------
{J}
{R}
*Required if this is the first plot command.*
{A}
{B}
lakes : str or list
*fill*\ [**+l**\|\ **+r**].
Set the shade, color, or pattern for lakes and river-lakes. The
default is the fill chosen for wet areas set by the ``water``
parameter. Optionally, specify separate fills by appending
**+l** for lakes or **+r** for river-lakes, and passing multiple
strings in a list.
resolution : str
**f**\|\ **h**\|\ **i**\|\ **l**\|\ **c**.
Selects the resolution of the data set to: (**f**\ )ull,
(**h**\ )igh, (**i**\ )ntermediate, (**l**\ )ow,
and (**c**\ )rude.
land : str
Select filling or clipping of "dry" areas.
rivers : int or str or list
*river*\ [/*pen*].
Draw rivers. Specify the type of rivers and [optionally] append
pen attributes [Default pen is width = default, color = black,
style = solid].
Choose from the list of river types below; pass a list to
``rivers`` to use multiple arguments.
0 = Double-lined rivers (river-lakes)
1 = Permanent major rivers
2 = Additional major rivers
3 = Additional rivers
4 = Minor rivers
5 = Intermittent rivers - major
6 = Intermittent rivers - additional
7 = Intermittent rivers - minor
8 = Major canals
9 = Minor canals
10 = Irrigation canals
You can also choose from several preconfigured river groups:
a = All rivers and canals (0-10)
A = All rivers and canals except river-lakes (1-10)
r = All permanent rivers (0-4)
R = All permanent rivers except river-lakes (1-4)
i = All intermittent rivers (5-7)
c = All canals (8-10)
map_scale : str
[**g**\|\ **j**\|\ **J**\|\ **n**\|\ **x**]\ *refpoint*\
**+w**\ *length*.
Draws a simple map scale centered on the reference point specified.
borders : int or str or list
*border*\ [/*pen*].
Draw political boundaries. Specify the type of boundary and
[optionally] append pen attributes [Default pen is width = default,
color = black, style = solid].
Choose from the list of boundaries below. Pass a list to
``borders`` to use multiple arguments.
1 = National boundaries
2 = State boundaries within the Americas
3 = Marine boundaries
a = All boundaries (1-3)
water : str
Select filling or clipping of "wet" areas.
{U}
shorelines : int or str or list
[*level*\ /]\ *pen*.
Draw shorelines [Default is no shorelines]. Append pen attributes
[Default is width = default, color = black, style = solid] which
apply to all four levels. To set the pen for a single level,
pass a string with *level*\ /*pen*\ , where level is
1-4 and represent coastline, lakeshore, island-in-lake shore, and
lake-in-island-in-lake shore. Pass a list of *level*\ /*pen*
strings to ``shorelines`` to set multiple levels. When specific
level pens are set, those not listed will not be drawn.
dcw : str or list
*code1,code2,…*\ [**+l**\|\ **L**\ ][**+g**\ *fill*\ ]
[**+p**\ *pen*\ ][**+z**].
Select painting or dumping country polygons from the
`Digital Chart of the World
<https://en.wikipedia.org/wiki/Digital_Chart_of_the_World>`__.
Append one or more comma-separated countries using the 2-character
`ISO 3166-1 alpha-2 convention
<https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`__.
To select a state of a country (if available), append
.\ *state*, (e.g, US.TX for Texas). To specify a whole continent,
prepend **=** to any of the continent codes (e.g. =EU for Europe).
Append **+p**\ *pen* to draw polygon outlines
(default is no outline) and **+g**\ *fill* to fill them
(default is no fill). Append **+l**\|\ **+L** to =\ *continent* to
only list countries in that continent; repeat if more than one
continent is requested.
{XY}
{c}
{p}
{t}
{V}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
if not args_in_kwargs(args=["C", "G", "S", "I", "N", "E", "Q", "W"], kwargs=kwargs):
raise GMTInvalidInput(
"""At least one of the following parameters must be specified:
lakes, land, water, rivers, borders, dcw, Q, or shorelines"""
)
with Session() as lib:
lib.call_module(module="coast", args=build_arg_string(kwargs))
|
the-stack_0_7116 | from PIL import Image, ImageOps
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import time
from timeit import default_timer as timer
#########from IPython.display import display
#Création des fonctions de tri
def triParComptage(Tab):
# Initialisation des variables
bSup=max(Tab)
TabComptage=[]
x = 0
# Initialisation du tableau de comptage à 0
for i in range (max(Tab)+1):
TabComptage.append(0)
# Création du tableau de comptage
for i in range (len(Tab)):
TabComptage[Tab[i]]+=1
# Création du tableau trié
for i in range (bSup+1):
for j in range (TabComptage[i]):
Tab[x] = i
x+=1
return Tab
def tri_fusion(m):
if len(m) <= 1:
return m
milieu = len(m) // 2
#on prend le mid
gauche = m[:milieu]
droite = m[milieu:]
#on le refait encore et encore
gauche = tri_fusion(gauche)
droite = tri_fusion(droite)
#"créer une liste avec la fonction fusion
return list(fusion(gauche,droite))
def fusion(gauche,droite):
resultat = []
#on definie la position de recherche
index_gauche=0
index_droite=0
#boucle qui a pour condition que la position de recherche soit inferieur a la taille de la liste
while index_gauche < len(gauche) and index_droite < len(droite):
#si la liste1[i] et inferieur ou egale a la liste2[i]
if gauche[index_gauche] <= droite[index_droite]:
#alors a liste final aprend l'element liste[i]
resultat.append(gauche[index_gauche])
#i1=i1+1
index_gauche += 1
else:
#alors la liste final aprend l'element liste2[i]
resultat.append(droite[index_droite])
#i2=i2+1
index_droite += 1
#si liste existe alors liste final aprend les "elements" (exemple gauche=[1,2,3,4,5,6]
#sera egale gauche[2:]=[3,4,5,6] avec extend on fait enleve chaque element et donc on
# obtiens liste_final=[3,4,5,6] et non liste_final=[[3,4,5,6]])
if gauche:
resultat.extend(gauche[index_gauche:])
if droite:
resultat.extend(droite[index_droite:])
return resultat
def tri_selection(tab):
for i in range(len(tab)):
# Trouver le min
min = i
for j in range(i+1, len(tab)):
if tab[min] > tab[j]:
min = j
tmp = tab[i]
tab[i] = tab[min]
tab[min] = tmp
return tab
#Fonction pour calculer la dilatation d'histogramme
def dilatation_histo(Pixel,h):
#Tri de la liste
#triParComptage(Pixel)
imin=h[0]
imax=h[-1]
l, g = Pixel.size
for y in range(g):
for x in range(l):
c = Pixel.getpixel((x, y))
dilat = int((c)*(c-imin)/(imax-imin))
if dilat>255:
dilat=255
Pixel.putpixel((x, y), dilat)
return Pixel
#Fonction pour changer la luminosité de l'image
def lumino(img,lum):
a=input('voulez vous up ou down la lumino \nTape 1 ou Tape 2\nVotre réponse: ')
h,l = img.size
if a == '1': # le if doit être avant les deux boucles qui parcours l'image
# Sinon après chaque rebouclage le prog attend la valeur a de l'utilisateur
for y in range(h-1):
for x in range(l-1):
c = img.getpixel((y, x))
c += lum
if c > 255:
img.putpixel((y,x), 255)
else:
img.putpixel((y, x), c)
elif a == '2':
for y in range(h-1):
for x in range(l-1):
c = img.getpixel((y, x))
c -= lum
if c < 0:
img.putpixel((y,x), 0)
else:
img.putpixel((y, x), c)
return img
#Liste contenant tous les pixels de l'image
def création(Img):
Pixel=[]
(l,h)=Img.size
for i in range(l):
for j in range(h):
p = Img.getpixel((i, j))
Pixel.append(p)
return Pixel
#Appel de l'image
Img = Image.open("D:\Programe\Python\APP1\APP\APP3\lena_ng.png")
#Menu
print("\n\nBienvenue,\nvous allez lancer le programme de modification d'image.")
n=int(input("veuillez choisir un programme à lancer: \n1 Tri des couleurs de l'image \n2 modifier la luminosité \n3 Augmenter le contraste \n\n Entrer ici le numéro: "))
if n==1:
u=int(input("Quel algorithme de tri voulez vous utiliser ?\n\n1 Tri par comptage \n2 Tri par fusion \n3 Tri par séléction \n Entrer ici le numéro: "))
Pixel = création(Img)
if u==1:
start_time = time.time()
h=triParComptage(Pixel)
print(h)
print("Temps d execution : %s secondes ---" % (time.time() - start_time))
if u==2:
start_time = time.time()
h=tri_fusion(Pixel)
print(h)
print("Temps d execution : %s secondes ---" % (time.time() - start_time))
if u==3:
print(" Cela va prendre un peu de temps...")
start_time = time.time()
h=tri_selection(Pixel)
print(h)
print("Temps d execution : %s secondes ---" % (time.time() - start_time))
plt.hist(h, range = (0, 255), bins = 255, color = 'yellow', edgecolor = 'red')
plt.xlabel('Niveaux de gris')
plt.ylabel('Effectif')
plt.title('Histogramme des différentes valeurs des pixels dans l\'image')
plt.show
if n==2:
Img.show()
# Appel de la fonction
value = int(input("entrer un contraste entre 0 et 255: "))
img1 = lumino(Img,value)
# Affichage des images
img1.show()
if n==3:
print("\nImage originale :")
Img.show()
Pixel = création(Img)
h=triParComptage(Pixel)
plt.hist(h, range = (0, 255), bins = 255, color = 'green', edgecolor = 'blue')
plt.xlabel('Niveaux de gris')
plt.ylabel('Effectif')
plt.title('Histogramme des différentes valeurs des pixels dans l\'image')
plt.show()
dilatation_histo(Img,h)
m=création(Img)
Img.show()
plt.hist(m, range = (0, 255), bins = 255, color = 'yellow', edgecolor = 'red')
plt.xlabel('Niveaux de gris')
plt.ylabel('Effectif')
plt.title('Histogramme des différentes valeurs des pixels dans l\'image')
plt.show()
print("\nImage contrastée :")
print("\nHistogramme de dilatation : en bleu avant la dilatation, en rouge après la dilatation\n")
#Fin du menu |
the-stack_0_7118 | """
Note: When using this api many of the commands come with an option to skip the initilization of the comms e.g. ...
def read_word(self, address, initialize_comms=True):
setting initialize_comms=False will skip the comms initialization step and save ~0.2 seconds. However one intialization
needs to be done to get things running. Therefore the fastest way to perform 5 register reads is...
pystlink.read_word(0x08000000, initialize_comms=True)
pystlink.read_word(0x08000000, initialize_comms=False)
pystlink.read_word(0x08000000, initialize_comms=False)
pystlink.read_word(0x08000000, initialize_comms=False)
pystlink.read_word(0x08000000, initialize_comms=False)
"""
import time
from textwrap import wrap
from pystlink import lib
from pystlink.lib import stlinkv2
from pystlink.lib import stlinkusb
from pystlink.lib import stm32
from pystlink.lib import stm32fp
from pystlink.lib import stm32fs
from pystlink.lib import stm32l0
from pystlink.lib import stm32l4
from pystlink.lib import stm32h7
from pystlink.lib import stm32devices
from pystlink.lib import stlinkex
from pystlink.lib import dbg
from pystlink.lib.srec import Srec
class PyStlink():
CPUID_REG = 0xe000ed00
def __init__(self, verbosity=0):
self.stlink = None
self.driver = None
self._dbg = dbg.Dbg(verbosity)
self._serial = None
self._index = 0
self._hard = False
self._connector = stlinkusb.StlinkUsbConnector(dbg=self._dbg, serial=self._serial, index=self._index)
self.comms_initialized = False
try:
self.initialize_comms()
except stlinkex.StlinkException:
pass
def initialize_comms(self):
self.initialize_stlink_comms()
if self.stlink.coreid == 0:
raise stlinkex.StlinkException('STLink could not connect to microcontroller')
self._core = stm32.Stm32(self.stlink, dbg=self._dbg)
self.find_mcus_by_core()
self._dbg.info("CORE: %s" % self._mcus_by_core['core'])
self.find_mcus_by_devid()
self.find_mcus_by_flash_size()
self._dbg.info("MCU: %s" % '/'.join([mcu['type'] for mcu in self._mcus]))
self._dbg.info("FLASH: %dKB" % self._flash_size)
self.load_driver()
self.comms_initialized = True
def initialize_stlink_comms(self):
self.stlink = stlinkv2.Stlink(self._connector, dbg=self._dbg)
self._dbg.info("DEVICE: ST-Link/%s" % self.stlink.ver_str)
self._dbg.info("SUPPLY: %.2fV" % self.stlink.target_voltage)
self._dbg.verbose("COREID: %08x" % self.stlink.coreid)
def get_target_voltage(self):
self.initialize_stlink_comms()
return self.stlink.target_voltage
def find_mcus_by_core(self):
if (self._hard):
self._core.core_hard_reset_halt()
else:
self._core.core_halt()
cpuid = self.stlink.get_debugreg32(PyStlink.CPUID_REG)
if cpuid == 0:
raise stlinkex.StlinkException('Not connected to CPU')
self._dbg.verbose("CPUID: %08x" % cpuid)
partno = 0xfff & (cpuid >> 4)
for mcu_core in stm32devices.DEVICES:
if mcu_core['part_no'] == partno:
self._mcus_by_core = mcu_core
return
raise stlinkex.StlinkException('PART_NO: 0x%03x is not supported' % partno)
def find_mcus_by_devid(self):
# STM32H7 hack: this MCU has ID-CODE on different address than STM32F7
devid = 0x000
idcode_regs = self._mcus_by_core['idcode_reg']
if isinstance(self._mcus_by_core['idcode_reg'], int):
idcode_regs = [idcode_regs]
for idcode_reg in idcode_regs:
idcode = self.stlink.get_debugreg32(idcode_reg)
self._dbg.verbose("IDCODE: %08x" % idcode)
devid = 0xfff & idcode
for mcu_devid in self._mcus_by_core['devices']:
if mcu_devid['dev_id'] == devid:
self._mcus_by_devid = mcu_devid
return
raise stlinkex.StlinkException('DEV_ID: 0x%03x is not supported' % devid)
def find_mcus_by_flash_size(self):
self._flash_size = self.stlink.get_debugreg16(self._mcus_by_devid['flash_size_reg'])
self._mcus = []
for mcu in self._mcus_by_devid['devices']:
if mcu['flash_size'] == self._flash_size:
self._mcus.append(mcu)
if not self._mcus:
raise stlinkex.StlinkException('Connected CPU with DEV_ID: 0x%03x and FLASH size: %dKB is not supported. Check Protection' % (
self._mcus_by_devid['dev_id'], self._flash_size
))
def fix_cpu_type(self, cpu_type):
cpu_type = cpu_type.upper()
# now support only STM32
if cpu_type.startswith('STM32'):
# change character on 10 position to 'x' where is package size code
if len(cpu_type) > 9:
cpu_type = list(cpu_type)
cpu_type[9] = 'x'
cpu_type = ''.join(cpu_type)
return cpu_type
raise stlinkex.StlinkException('"%s" is not STM32 family' % cpu_type)
def filter_detected_cpu(self, expected_cpus):
cpus = []
for detected_cpu in self._mcus:
for expected_cpu in expected_cpus:
expected_cpu = self.fix_cpu_type(expected_cpu)
if detected_cpu['type'].startswith(expected_cpu):
cpus.append(detected_cpu)
break
if not cpus:
raise stlinkex.StlinkException('Connected CPU is not %s but detected is %s %s' % (
','.join(expected_cpus),
'one of' if len(self._mcus) > 1 else '',
','.join([cpu['type'] for cpu in self._mcus]),
))
self._mcus = cpus
def load_driver(self):
flash_driver = self._mcus_by_devid['flash_driver']
if flash_driver == 'STM32FP':
self.driver = stm32fp.Stm32FP(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32FPXL':
self.driver = stm32fp.Stm32FPXL(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32FS':
self.driver = stm32fs.Stm32FS(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32L0':
self.driver = stm32l0.Stm32L0(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32L4':
self.driver = stm32l4.Stm32L4(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32H7':
self.driver = stm32h7.Stm32H7(self.stlink, dbg=self._dbg)
else:
self.driver = self._core
def read_word(self, address, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
data = self.driver.get_mem(address, 4)
return f"{data[3]:02x}{data[2]:02x}{data[1]:02x}{data[0]:02x}"
def read_words(self, address, num_words, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
num_bytes = num_words*4
data = self.driver.get_mem(address, num_bytes)
if len(data) != num_bytes:
raise Exception("Error with data length when reading words")
words = [""] * num_words
for i in range(num_words):
words[i] = f"{data[3+(i*4)]:02x}{data[2+(i*4)]:02x}{data[1+(i*4)]:02x}{data[0+(i*4)]:02x}"
return words
def write_word(self, address, value, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
print("Warning: write_word() isn't as simple to use as the -w32 function from ST-LINK_CLI.exe")
print(" The memory location being written to may need to be unlocked\n")
if len(value) != 8:
raise Exception("Error with write_word(): value is invalid")
self.write_words(address, value)
def write_words(self, address, values, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
if type(values) != str:
raise Exception("Error with write_words(): values must be a string")
if len(values) % 8 != 0:
raise Exception("Error with write_words(): values is invalid")
data = []
words = wrap(values, 8)
for word in words:
hex_bytes = wrap(word, 2)
hex_bytes.reverse()
hex_bytes = list(map(lambda x: int(x, 16), hex_bytes))
data.extend(hex_bytes)
self.driver.set_mem(address, data)
def program_otp(self, address, hex_data, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
hex_data = hex_data.lower()
if len(hex_data) == 0:
raise Exception("OTP data can't be zero in length")
if len(hex_data) % 16 != 0:
raise Exception("OTP data is an invalid length")
num_words = int(len(hex_data) / 8)
# Read OTP before attempting to write
words = self.read_words(address, num_words, initialize_comms=False)
hex_data_read = "".join(words)
blank_value = "ffffffff" * num_words
if hex_data_read != hex_data:
if hex_data_read == blank_value:
# Unlock Flash
self.driver.flash.enable_flash_programming()
# Write to OTP
self.write_words(address, hex_data, initialize_comms=False)
# Lock Flash
self.driver.flash.disable_flash_programming()
# Check what was witten to the OTP
words = self.read_words(address, num_words, initialize_comms=False)
hex_data_read = "".join(words)
if hex_data_read != hex_data:
if hex_data_read == blank_value:
print("Unable to write to OTP")
return 1
else:
print("Data not written correctly to OTP")
return 1
else:
print("Unable to write to OTP as OTP isn't blank")
return 1
return 0
def write_word_to_flash(self, address, value, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
data_bytes = wrap(value, 2)
data_bytes.reverse()
data_bytes = list(map(lambda x: int(x, 16), data_bytes))
self.driver.flash_write(address, data_bytes, erase=True, erase_sizes=self._mcus_by_devid['erase_sizes'])
def program_flash(self, firmware, erase=True, verify=True, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
mem = self._read_file(str(firmware))
start_addr = stm32.Stm32.FLASH_START
for addr, data in mem:
if addr is None:
addr = start_addr
a = self._mcus_by_devid['erase_sizes']
self.driver.flash_write(addr, data, erase=erase, erase_sizes=self._mcus_by_devid['erase_sizes'])
self.driver.core_reset_halt()
time.sleep(0.1)
if verify:
self.driver.core_halt()
self.driver.flash_verify(addr, data)
self.driver.core_run()
def flash_erase_all(self):
flash_size = self.stlink.get_debugreg16(self._mcus_by_devid['flash_size_reg'])
self.driver.flash_erase_all(flash_size)
def _read_file(self, filename):
if filename.endswith('.srec'):
srec = Srec()
srec.encode_file(filename)
size = sum([len(i[1]) for i in srec.buffers])
self._dbg.info("Loaded %d Bytes from %s file" % (size, filename))
return srec.buffers
with open(filename, 'rb') as f:
data = list(f.read())
self._dbg.info("Loaded %d Bytes from %s file" % (len(data), filename))
return [(None, data)]
if __name__ == "__main__":
pystlink = PyStlink(verbosity=2)
input("press enter to continue")
print(pystlink.read_word(0x08000000))
|
the-stack_0_7119 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2021 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, platform
if sys.version_info >= (3,8,0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import opengeode
import opengeode_io_py_mesh as mesh_io
if __name__ == '__main__':
mesh_io.initialize_mesh_io()
test_dir = os.path.dirname(__file__)
data_dir = os.path.abspath(os.path.join(test_dir, "../../../../tests/data"))
surface = opengeode.load_polygonal_surface3D(os.path.join(data_dir, "TopHat.obj"))
if surface.nb_vertices() != 363:
raise ValueError("[Test] Number of vertices in the loaded Surface is not correct" )
if surface.nb_polygons() != 380:
raise ValueError("[Test] Number of polygons in the loaded Surface is not correct" )
opengeode.save_polygonal_surface3D(surface, "TopHat_save.obj")
|
the-stack_0_7121 | """
@author syt123450 / https://github.com/syt123450
"""
import os
import shutil
from tf.pb2json.pb2json_conversion import convert
import subprocess
input_format_config = '--input_format=tf_saved_model'
def preprocess_saved_model(input_path, output_path, output_node_names):
print("Preprocessing tensorflow saved model...")
os.makedirs(output_path + '/tmp', exist_ok=True)
print("Converting saved model to web friendly format...")
subprocess.check_call([
"tensorflowjs_converter",
input_format_config,
"--output_node_names=" + output_node_names,
"--saved_model_tags=serve",
input_path,
output_path + '/tmp'
])
path_now = os.getcwd()
os.chdir(output_path)
absolute_output_path = os.getcwd()
absolute_output_path_temp = absolute_output_path + '/tmp/'
os.chdir(path_now)
print("Converting pb to json...")
convert(
absolute_output_path_temp,
absolute_output_path
)
print("Removing temp pb model...")
shutil.rmtree(absolute_output_path_temp)
|
the-stack_0_7122 | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import yaml
from yaml import Loader
import os
from pathlib import Path
from pdb import set_trace as st
## Vocab code
class Vocab:
def __init__(self):
## set architecture vocab data structures
self.architecture_vocab, self.architecture2idx = self._init_architecture_vocab()
## set hyper param vocab data structures
self.hparms_vocab, self.hptype2idx = self._init_layers_hp_vocab()
## Optimizer
self.optimizer_vocab, self.opt2idx = self._init_optimizer_vocab()
## Optimizer hyperparams
self.hparams_opt_vocab, self.hpopt2idx = self._init_layers_hp_optimizer_vocab()
def _init_architecture_vocab(self):
'''
Initializes the architecture vocabulary
'''
architecture_vocab = ['PAD_TOKEN', 'SOS', 'EOS','Conv2d', 'Linear', 'MaxPool2d', 'BatchNorm2d', 'Dropout2d', 'ReLU', 'SELU', 'LeakyReLU', 'Flatten']
architecture2idx = { architecture_vocab[i]:i for i in range(len(architecture_vocab)) } # faster than using python's list.index(element)
return architecture_vocab, architecture2idx
def _init_layers_hp_vocab(self):
'''
Initializes the hyper param layers vocab
'''
hparms_vocab = ['PAD_TOKEN','SOS', 'EOS','in_features', 'out_features', 'kernel_size', 'stride', 'padding', 'dilation', 'ceil_mode', 'eps', 'momentum', 'affine', 'track_running_stats', 'p', 'bias']
hptype2idx = { hparms_vocab[i]:i for i in range(len(hparms_vocab))} # faster than using python's list.index(element)
return hparms_vocab, hptype2idx
def _init_optimizer_vocab(self):
'''
Initializes the hyper param layers vocab
'''
optimizer_vocab = ['PAD_TOKEN', 'SOS', 'EOS','SGD', 'Adam', 'Adadelta', 'Adagrad']
opt2idx = { optimizer_vocab[i]:i for i in range(len(optimizer_vocab))} # faster than using python's list.index(element)
return optimizer_vocab, opt2idx
def _init_layers_hp_optimizer_vocab(self):
'''
Initializes the hyper param layers vocab
'''
hparams_opt_vocab = ['PAD_TOKEN', 'SOS', 'EOS', 'dampening', 'lr', 'momentum', 'nesterov', 'weight_decay', 'rho']
hpopt2idx = { hparams_opt_vocab[i]:i for i in range(len(hparams_opt_vocab))} # faster than using python's list.index(element)
return hparams_opt_vocab, hpopt2idx
def get_type(vocab, layer_str):
'''
Get's the string type of the layer.
:param list vocab: a list of all the token types (probably as strings)
:param str layer_str: a string of a splitted layer e.g. ' Linear(in_features=4, out_features=3, bias=True)\n (1)'
:return str arch_token: string representation of layer type.
'''
for arch_token in vocab:
if arch_token in layer_str:
return arch_token
raise ValueError(f'The string you have {layer_str} doesn\'t match any of the architecture tokens in {vocab}')
def indices2onehot(indices, vocab_size):
'''
Returns the onehot matrix
'''
shape = (len(indices), vocab_size)
matrix = np.zeros(shape)
# for every symbol index i, place a 1 i the one hot vector in the vocab position symbol_idx
for i, symbol_idx in enumerate(indices):
matrix[i,symbol_idx] = 1
return matrix
## DataProcessing code
class DataProcessor:
'''
Class for converting models into vector forms to be used by neural nets.
'''
def __init__(self, vocab):
self.vocab = vocab
def arch_parse_string(self, layer_str):
"""
Parses the architecture layer string and gets all the parameters for each layer in list.
:param str layer_str: the string representation of a layer of a model
:return list param_vector: python list of parameters for the layer of the model
"""
params = self.vocab.hptype2idx # dictionary from hyper param type to its index
curr_word = ''
param_vector = [0]*len(params)
#go through the entire string and try to find keywords
for i in range(len(layer_str)):
#start a new sublayer_str if there is a space
if layer_str[i] == ' ':
curr_word = ''
else:
#add the next character to the substring
curr_word += layer_str[i]
#separate 'padding' from 'p'
if layer_str[i] == 'p':
#continues if the substring is padding
if layer_str[i+1] == 'a':
continue
#Separates function call from keywords
if layer_str[i] == '(' and layer_str[i-1] != '=':
curr_word = ''
#loop through the keys of the dictionary
for param in params.keys():
#check if our substring is a possible parameter
if curr_word in params.keys():
#if there is a match then add to the index corresponding to the parameter
if curr_word == param:
# print(curr_word, params[curr_word])
#if there is a ( then add the next character
if layer_str[i+2] == '(' and layer_str[i+1] == '=':
index = int(params[curr_word])
param_vector[index] = int(layer_str[i+3])
else:
#add a 0 if the word is 'False'
if layer_str[i+2] == 'F':
param_vector[int(params[curr_word])] = 0
#add a 1 if the word is 'True'
elif layer_str[i+2] == 'T':
param_vector[int(params[curr_word])] = 1
else:
val = ''
i += 2
#loop through the string until the entire value is found
while layer_str[i] != ',' and layer_str[i] != ')':
val += layer_str[i]
i += 1
param_vector[int(params[curr_word])] = eval(val)
return param_vector
def mdl_str2feature_vec(self, mdl_str):
"""
Makes a one hot matrix from each layer of the architecture data (note doesn't include meta data)
Note: the names of the layers have to be separated by colons for it to work
:param str mdl_str: model string e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return np.array feature_matrix: model vector with for layer + meta data e.g [conv,filters...etc...]
"""
## arch 2 one-hot
one_hot_arch_matrix = self.mdl_str2onehot(mdl_str)
## hparams 2 matrix
hp_params_matrix = self.mdl_str2hp_matrix(mdl_str)
##append arch + hparam vecs
feature_matrix = np.concatenate((one_hot_arch_matrix, hp_params_matrix),axis=1)
return feature_matrix
def parse_optimizer_string(self, opt_str):
"""
Parses the optimizer string and gets all its parameters
:param str opt_str: optimizer string for the model
:return list param_vector: python list of optimizer parameters
"""
params = self.vocab.hpopt2idx
curr_word = ''
param_vector = np.zeros(len(params))
for i in range(len(opt_str)):
#start a new substring if there is a space
if opt_str[i] == ' ':
curr_word = ''
else:
#add the next character to the substring
curr_word += opt_str[i]
for param in params.keys():
#check if our substring is a possible parameter
if curr_word in params.keys():
#if there is a match then add to the index corresponding to the parameter
if curr_word == param:
val = ''
i += 3
#loop through the string until the entire value is found
while opt_str[i] != ' ':
val += opt_str[i]
i += 1
if val == 'False':
param_vector[int(params[curr_word])] = int(0)
elif val == 'True':
param_vector[int(params[curr_word])] = int(1)
#if not true or false put the actual value
else:
try:
param_vector[int(params[curr_word])] = int(val)
except:
param_vector[int(params[curr_word])] = float(val)
return param_vector
def optimizer_feature_vec(self, opt_str, epochs):
"""
Makes a feature_vec for the optimizer used in the model.
param str opt_str: optimizer string for the model
return list feature_vector: vector of one-hot and hp_param data
TODO: its missing the epochs...
"""
indices = optimizer2indices(opt_str)
opt_onehot = indices2onehot(indices, len(self.vocab.optimizer_vocab))
#parses optimizer info for its parameters
params_vector = self.opt_parse_string(opt_str)
#add parameters to the one hot vector
feature_vector = np.concatenate( (opt_onehot, params_vector, [epochs]) )
return feature_vector
def calculate_weight_stats(self, weights):
"""
Calculates the Statistics for the weights.
param list weights: python list of weights (initial or final)
return list weight_stats: python list of the statistics of the weights
TODO: change these to torch_uu ops so that they are done on GPU
"""
length = len(weights)
new_weights = []
for i in range(length):
#flatten each tensor
flat_weights = weights[i].flatten()
#convert each tensor to a numpy array and concatenates it to a a list
new_weights.extend(flat_weights.cpu().detach().numpy())
#calculates the stats for the weights
sum_weights = np.sum(new_weights)
max_weight = np.max(new_weights)
min_weight = np.min(new_weights)
average_weight = np.mean(new_weights)
std_dev_weight = np.std(new_weights)
weight_stats = [sum_weights,max_weight,min_weight,average_weight,std_dev_weight]
return weight_stats
def mdl_str2onehot(self, mdl_str):
'''
Makes a one-hot matrix for the arch from the (whole) model string
:param str mdl_str: string of the model e.g. e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return np.array one_hot_arch_matrix: one-hot matrix representation of model (nb_layers, dim)
'''
indices = self.mdl_str2indices(mdl_str)
one_hot_arch_matrix = self.indices2arch_onehot(indices)
return one_hot_arch_matrix
def mdl_str2hp_matrix(self, mdl_str):
'''
Makes a matrix for the hps from the (whole) model string
:param str mdl_str: string of the model e.g. e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return np.array one_hot_arch_matrix: one hot vector representation of model
'''
data = mdl_str.split(':')[1:]
nb_layers = len(data)
hp_vocab_size = len(self.vocab.hparms_vocab)
hp_params_matrix = np.zeros((nb_layers,hp_vocab_size))
for i in range(nb_layers):
hparam_vector = self.arch_parse_string(data[i])
hp_params_matrix[i,:] = hparam_vector
return hp_params_matrix
def mdl_str2indices(self, mdl_str):
'''
Returns a list of indices corresponding to the model arch of given model string.
:param str mdl_str: string of the model e.g. e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return list arch_indices: list of corresponding indicies in vocab of each arch layer type
'''
data = mdl_str.split(':')[1:]
nb_layers = len(data)
arch_vocab_size = len(self.vocab.architecture_vocab)
arch_indices = []
for i in range(nb_layers):
layer_type = get_type(self.vocab.architecture_vocab, data[i] )
idx_layer_type = self.vocab.architecture2idx[layer_type]
arch_indices.append(idx_layer_type)
return arch_indices
def optimizer_str2indices(self, opt_str):
"""
Returns a list of indices corresponding to the optimization of given optimization string
param str opt_str: optimizer string for the model
return list opt_indices: list of corresponding indicies in vocab of each optimizer type
"""
#vocab
opt_indices = []
for opt_token in self.vocab.optimizer_vocab:
#if vocab is in the optimizer info then append a 1
if opt_token in opt_str:
opt_indx = self.vocab.opt2idx[opt_token]
opt_indices.append(opt_indx)
break
return opt_indices
def tokens2arch_indices(self, tokens):
'''
:param list tokens: list of (string) of tokens
:return list indicies: list of (ints) of indicies
TODO:
- add logic to receive things like torch_uu.nn.Conv2d etc
'''
## check if user passed a single string
if isinstance(str(tokens), str):
token_str = tokens
return self.vocab.architecture2idx[token_str]
indicies = [ self.vocab.architecture2idx[token_str] for token_str in tokens ]
return indicies
def indices2arch_onehot(self, indices):
if isinstance(indices, int):
return indices2onehot([indices], len(self.vocab.architecture_vocab))[0]
one_hot_arch_matrix = indices2onehot(indices, len(self.vocab.architecture_vocab))
return one_hot_arch_matrix
def indices2hp_matrix(self, indices):
'''
TODO implement but we need to also change mdl_str2hp_matrix
'''
if isinstance(indices, int):
return indices2onehot([indices], len(self.vocab.hparms_vocab))[0]
one_hot_arch_hp_matrix = indices2onehot(indices, len(self.vocab.hparms_vocab))
return one_hot_arch_hp_matrix
####
class MetaLearningDataset(Dataset):
'''
Data set for meta learning. It contains the architecture, hyperparams,
optimizer, Weights init and final, and train & test error.
note:
__len__ so that len(dataset) returns the size of the dataset.
__getitem__ to support the indexing such that dataset[i] can be used to get ith sample
'''
def __init__(self, data_path, vocab):
'''
'''
self.path = Path(data_path).expanduser()
print(str(data_path))
self.model_folders = [ f for f in self.path.iterdir() if f.is_dir() ]
self.data_processor = DataProcessor(vocab)
def __len__(self):
'''
Returns the number of data points (size of data set).
'''
return len(self.model_folders)
def __getitem__(self, idx):
'''
Gets you data point at the given index idx.
'''
## look for the model indexed with idx
mdl_name = ''
for f in self.model_folders:
# TODO fix
mdl_names = str(f)
if f'_{idx}' in mdl_names: # is this model the model # idx?
mdl_name = mdl_names
break
## generate strings to paths
data_path = str(self.path)
data_filepath = os.path.join(data_path, mdl_name)
#
metadata_filepath = os.path.join(data_filepath, f'meta_data.yml')
otherdata_filepath = os.path.join(data_filepath, f'other_data.yml')
param_stats_filepath = os.path.join(data_filepath, f'param_stats.yml')
#tensor_filepath = os.path.join(data_filepath, f'tensors.npz')
##
data_item = {}
with open(metadata_filepath, 'r') as f:
# loader of data
yamldata = yaml.load(f, Loader=Loader)
# get raw data
data_item['mdl_str'] = yamldata['arch_and_hp']
mdl_str = data_item['mdl_str']
data_item['opt_str'] = yamldata['optimizer']
opt_str = data_item['opt_str']
data_item['epochs'] = yamldata['epochs']
epochs = data_item['epochs']
data_item['batch_size_train'] = yamldata['batch_size_train']
data_item['batch_size_test'] = yamldata['batch_size_test']
data_item['batch_size_val'] = yamldata['batch_size_val']
try:
criterion = yamldata['criteron']
except:
criterion = yamldata['criterion']
opt_hp = self.data_processor.parse_optimizer_string(opt_str)
opt_hp = np.concatenate(([epochs],opt_hp) )
#
data_item['train_error'] = yamldata['train_error']
data_item['test_error'] = yamldata['test_error']
data_item['train_loss'] = yamldata['train_loss']
data_item['test_loss'] = yamldata['test_loss']
## get arch indices and hyperparams
arch_indices = self.data_processor.mdl_str2indices(mdl_str)
data_item['arch_indices'] = arch_indices
arch_hp = self.data_processor.mdl_str2hp_matrix(mdl_str)
data_item['arch_hp'] = arch_hp
## get hyperparams indices and hyperparams
opt_indices = self.data_processor.optimizer_str2indices(opt_str)
data_item['opt_indices'] = opt_indices
opt_hp = self.data_processor.parse_optimizer_string(opt_str)
data_item['opt_hp'] = opt_hp
with open(otherdata_filepath, 'r') as f:
yamldata = yaml.load(f, Loader=Loader)
#
data_item['test_accs'] = yamldata['test_accs']
data_item['test_errors'] = yamldata['test_errors']
data_item['test_losses'] = yamldata['test_losses']
#
data_item['train_accs'] = yamldata['train_accs']
data_item['train_errors'] = yamldata['train_errors']
data_item['train_losses'] = yamldata['train_losses']
#
data_item['val_accs'] = yamldata['val_accs']
data_item['val_errors'] = yamldata['val_errors']
data_item['val_losses'] = yamldata['val_losses']
with open(param_stats_filepath, 'r') as f:
yamldata = yaml.load(f, Loader=Loader)
#
data_item['init_params_mu'] = yamldata['init_params_mu']
data_item['final_params_mu'] = yamldata['final_params_mu']
#
data_item['init_params_std'] = yamldata['init_params_std']
data_item['final_params_std'] = yamldata['final_params_std']
#
data_item['init_params_l2'] = yamldata['init_params_l2']
data_item['final_params_l2'] = yamldata['final_params_l2']
##
return data_item
class Collate_fn_onehot_general_features(object):
'''
Custom collate function that gets onehot representation for Arch blocks
and gets general features for the rest. General features are such that they
are useful for any optimizer amnd initialization. e.g.
Optimizer might be anything (even a RNN itself) so having symbolic representation for this
even if its in onehot form isn't general (specially if a new unknown optimzer is used that the model has never seen).
Thus its better to use the training/validation statistics during training (say the first 10).
Similarly for initialization (or final weights). If we use the actual weights
then we don't need a symbolic representation for the initialization algorithm.
For (space) efficiency reasons we only use statistics of the initial (and final)
weights. Mean, Std and L2 of the weights.
Custom collate function to return everything in per batch as follow:
- OneHot representation for symbols
- Arch representation concate of OneHot for Arch and Arch hyperparams [A;A_hp]
- Opt representation train history
- Net stats representation
'''
def __init__(self, device, batch_first, vocab, padding_value=-1):
'''
NOTE: padding_value is -1 so to not get confused with 0 which stands for special characers (TODO: check this implemented correctly)
'''
self.device = device
self.batch_first = batch_first
self.data_processor = DataProcessor(vocab)
self.padding_value = padding_value
def arch2OneHot(self, indicies):
'''
Maps indices in the batch to tensor OneHot representation
'''
vocab_size = len(self.data_processor.vocab.architecture_vocab)
return torch.Tensor(indices2onehot(indicies, vocab_size)).to(self.device)
def opt2OneHot(self, indicies):
'''
Maps optimizer indicies in the batch to tensor OneHot representation
'''
vocab_size = len(self.data_processor.vocab.optimizer_vocab)
return torch.Tensor(indices2onehot(indicies, vocab_size)).to(self.device)
def Tensor(self, t):
'''
Maps to torch_uu tensor + proper device (cpu or gpu)
'''
return torch.Tensor(t).to(self.device)
def __call__(self, batch):
'''
Gets the batch in dictionary foorm ready to be processed by a NN (i.e. its a proper tensor)
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:return torch_uu.Tensor batch_arch_rep: OneHot for each layer type (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_arch_hp_rep: vector form for arch hp (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_hp_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_hp_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_opt: OneHot for which optimizer was used (batch_size, vocab_size)
:returned torch_uu.Tensor batch_opt_hp: vector form for opt hp (batch_size, vocab_size)
:return torch_uu.Tensor batch_W_init_rep: tensor with mean and std for each weight in the sequence. (batch_size, max_len, 2)
:return torch_uu.Tensor W_init_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor W_init_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_W_final_rep: tensor with mean and std for each weight in the sequence. (batch_size, max_len, 2)
:return torch_uu.Tensor W_final_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor W_final_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_train_errorr: tensor with train errors for each sample in the batch (batch_size)
'''
all_batch_info = {}
##
batch_mdl_str = [ sample['mdl_str'] for sample in batch ]
batch_mdl_str = {'mdl_str':batch_mdl_str}
## get arch representation, A
batch_arch_rep, arch_lengths, arch_mask = self.get_arch_rep(batch)
arch = {'batch_arch_rep':batch_arch_rep, 'arch_lengths':arch_lengths, 'arch_mask':arch_mask}
## get arch hyper param representation, Ahp
batch_arch_hp_rep, arch_hp_lengths, arch_hp_mask = self.get_arch_hp_rep(batch)
arch_hp ={'batch_arch_hp_rep':batch_arch_hp_rep, 'arch_hp_lengths':arch_hp_lengths, 'arch_hp_mask':arch_hp_mask}
## get opt representation, O
# batch_opt = self.get_opt_rep(batch)
# opt = {'batch_opt':batch_opt}
## get opt hp, Ohp
# batch_opt_hp = self.get_opt_hp_rep(batch)
# opt_hp = {'batch_opt_hp':batch_opt_hp}
train_history, val_history = self.get_training_validation_history(batch)
opt, opt_hp = {'train_history':train_history}, {'val_history':val_history}
## get W representation
weight_stats = self.get_all_weight_stats(batch)
## get train errors for models
batch_train_errorr = self.Tensor([ float(sample['train_error']) for sample in batch ])
train_error = {'batch_train_error':batch_train_errorr}
##
batch_test_errorr = self.Tensor([ float(sample['test_error']) for sample in batch ])
#test_error = {'batch_test_error':batch_test_errorr}
test_error = batch_test_errorr
## collect return batch
new_batch = ({**batch_mdl_str, **arch, **arch_hp, **opt, **opt_hp, **weight_stats, **train_error}, test_error)
#print(new_batch['train_history'])
return new_batch
#return batch_arch_rep, batch_arch_hp_rep, batch_opt, batch_W_init, batch_W_final, batch_train_errorr
def get_arch_rep(self, batch):
'''
Converts archictecture indicies to OneHot.
:param list batch: list of samples in a batch (in dictionary form)
:return torch_uu.Tensor batch_arch_rep: OneHot for each layer type (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
'''
## get lengths of sequences for each sample in the batch
arch_lengths = self.Tensor([ len(sample['arch_indices']) for sample in batch ]).long()
## make array of one hot tensors for each example in batch
batch = [ self.arch2OneHot(sample['arch_indices']) for sample in batch ]
## padd (and concatenate) the tensors in the whole batch
batch_arch_rep = torch.nn.utils.rnn.pad_sequence(batch, batch_first=self.batch_first, padding_value=self.padding_value)
## compute mask
arch_mask = (batch_arch_rep != self.padding_value)
##
return batch_arch_rep.to(self.device), arch_lengths.to(self.device), arch_mask.to(self.device)
def get_arch_hp_rep(self, batch):
'''
Converts architecture hyperparams to tensor form (not OneHot, just stacks values)
:param list batch: list of samples in a batch (in dictionary form)
:return torch_uu.Tensor batch_arch_hp_rep: vector form for arch hp (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_hp_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_hp_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
'''
## get lengths of sequences for each sample in the batch
arch_hp_lengths = self.Tensor([ len(sample['arch_hp']) for sample in batch ]).long()
## padd
batch = [ self.Tensor(sample['arch_hp']) for sample in batch ]
batch_arch_hp_rep = torch.nn.utils.rnn.pad_sequence(batch, batch_first=self.batch_first, padding_value=self.padding_value)
## compute mask
arch_hp_mask = (batch_arch_hp_rep != self.padding_value)
##
return batch_arch_hp_rep.to(self.device), arch_hp_lengths.to(self.device), arch_hp_mask.to(self.device)
def get_opt_rep(self, batch):
'''
Get OneHot for optimizer.
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:return torch_uu.Tensor batch_opt: OneHot for which optimizer was used (batch_size, vocab_size)
'''
batch = [ self.opt2OneHot(sample['opt_indices']) for sample in batch ]
batch_opt = torch.cat(batch,dim=0)
return batch_opt.to(self.device)
def get_opt_hp_rep(self, batch):
'''
Converts optimizer hyperparams to tensor form (not OneHot, just stacks values)
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:returned torch_uu.Tensor batch_opt_hp: vector form for opt hp (batch_size, vocab_size)
'''
batch = [ self.Tensor(sample['opt_hp']) for sample in batch ]
batch_opt_hp = torch.cat(batch, dim=0)
return batch_opt_hp.to(self.device)
def get_training_validation_history(self,batch):
Tensor = torch.Tensor
train_history_batch = []
val_history_batch = []
for sample in batch:
##
train_errors, train_losses = Tensor(sample['train_errors']), Tensor(sample['train_losses'])
train = torch.stack((train_errors,train_losses)) # (2,seq_len)
#train = train.unsqueeze(2) # so that convolution layers can take it (2,seq_len,1)
train_history_batch.append(train)
##
val_errors, val_losses = Tensor(sample['val_errors']), Tensor(sample['val_losses'])
val = torch.stack((val_errors,val_losses)) # (2,seq_len)
#val = val.unsqueeze(2) # so that convolution layers can take it (2,seq_len,1)
val_history_batch.append(val)
##
train_history_batch = torch.nn.utils.rnn.pad_sequence(train_history_batch, batch_first=self.batch_first, padding_value=self.padding_value)
val_history_batch = torch.nn.utils.rnn.pad_sequence(val_history_batch, batch_first=self.batch_first, padding_value=self.padding_value)
print(f'val_history_batch = {val_history_batch.size()}')
return train_history_batch.to(self.device), val_history_batch.to(self.device)
def get_all_weight_stats(self, batch):
'''
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:return torch_uu.Tensor batch_W_rep: tensor with mean and std for each weight in the sequence. (batch_size, max_len, 2)
:return torch_uu.Tensor W_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor W_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
'''
weight_stats = {}
with torch.no_grad():
##
batch_init_params_mu_rep, init_params_mu_lengths, init_params_mu_mask = self.get_weight_stat(batch,'init_params_mu')
batch_final_params_mu_rep, final_params_mu_lengths, final_params_mu_mask = self.get_weight_stat(batch,'final_params_mu')
new_weights_stats_init = {'batch_init_params_mu_rep':batch_init_params_mu_rep,'init_params_mu_lengths':init_params_mu_lengths, 'init_params_mu_mask':init_params_mu_mask}
new_weights_stats_final = {'batch_final_params_mu_rep':batch_final_params_mu_rep,'final_params_mu_lengths':final_params_mu_lengths,'final_params_mu_mask':final_params_mu_mask}
weight_stats = dict(weight_stats, **new_weights_stats_init)
weight_stats = dict(weight_stats, **new_weights_stats_final)
##
batch_init_params_std_rep, init_params_std_lengths, init_params_std_mask = self.get_weight_stat(batch,'init_params_std')
batch_final_params_std_rep, final_params_std_lengths, final_params_std_mask = self.get_weight_stat(batch,'final_params_std')
new_weights_stats_init = {'batch_init_params_std_rep':batch_init_params_std_rep,'init_params_std_lengths':init_params_std_lengths, 'init_params_std_mask':init_params_std_mask}
new_weights_stats_final = {'batch_final_params_std_rep':batch_final_params_std_rep,'final_params_std_lengths':final_params_std_lengths,'final_params_std_mask':final_params_std_mask}
weight_stats = dict(weight_stats, **new_weights_stats_init)
weight_stats = dict(weight_stats, **new_weights_stats_final)
##
batch_init_params_l2_rep, init_params_l2_lengths, init_params_l2_mask = self.get_weight_stat(batch,'init_params_l2')
batch_final_params_l2_rep, final_params_l2_lengths, final_params_l2_mask = self.get_weight_stat(batch,'final_params_l2')
new_weights_stats_init = {'batch_init_params_l2_rep':batch_init_params_l2_rep,'init_params_l2_lengths':init_params_l2_lengths, 'init_params_l2_mask':init_params_l2_mask}
new_weights_stats_final = {'batch_final_params_l2_rep':batch_final_params_l2_rep,'final_params_l2_lengths':final_params_l2_lengths,'final_params_l2_mask':final_params_l2_mask}
weight_stats = dict(weight_stats, **new_weights_stats_init)
weight_stats = dict(weight_stats, **new_weights_stats_final)
##
return weight_stats
def get_weight_stat(self, batch, W_type):
## get lengths of sequences for each sample in the batch
weight_lengths = self.Tensor([ len(sample[W_type]) for sample in batch ]).long()
## padd
#st()
new_batch = []
for i,sample in enumerate(batch):
try:
print(f'i = {i}')
print(f'sample = {sample}')
tensor_sample = self.Tensor(sample[W_type])
print(f'tensor_sample = {tensor_sample}')
new_batch.append(tensor_sample)
except:
print(f'\n ---- ERROR: i = {i}')
print(f'sample = {sample}')
st()
## padd batch sequences
batch_weight_rep = torch.nn.utils.rnn.pad_sequence(new_batch, batch_first=self.batch_first, padding_value=self.padding_value)
## compute mask
weight_mask = (batch_weight_rep != self.padding_value)
##
return batch_weight_rep.to(self.device), weight_lengths.to(self.device), weight_mask.to(self.device)
def testing():
pass
if __name__ == '__main__':
testing()
|
the-stack_0_7123 | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.initializers as KI
import keras.engine as KE
import keras.models as KM
import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else ""))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
print("using architecture:{}".format(architecture))
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(6000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(
self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are in image domain.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= tf.constant([height, width, height, width], dtype=tf.float32)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# Round and cast to int since we're deadling with pixels now
refined_rois = tf.to_int32(tf.rint(refined_rois))
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.to_float(tf.gather(pre_nms_rois, ixs)),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
detections = tf.concat([
tf.to_float(tf.gather(refined_rois, keep)),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are in image domain
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Run detection refinement graph on each item in the batch
_, _, window, _ = parse_image_meta_graph(image_meta)
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
############################################################
# Data Generator
############################################################
from keras.preprocessing.image import ImageDataGenerator
def argument_img_mask(image, mask, class_ids):
common_seed = 7
#print("origin image shape:{}, origina mask shape:{}".format(image.shape, mask.shape))
data_gen_args = dict(horizontal_flip=True,
vertical_flip=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1
)
xtr = np.expand_dims(image, 0)
image_datagen = ImageDataGenerator(**data_gen_args)
image_datagen.fit(xtr, seed=common_seed)
image_generator = image_datagen.flow(xtr, batch_size=1, seed=common_seed)
arg_image = image_generator.next()[0].astype(image.dtype)
arg_mask = np.zeros(mask.shape, dtype=mask.dtype)
for i in range(mask.shape[-1]):
mask_datagen = ImageDataGenerator(**data_gen_args)
masktr = np.expand_dims(mask[:,:,i], 0)
masktr = np.expand_dims(masktr, -1)
mask_datagen.fit(masktr, seed=common_seed)
mask_generator = mask_datagen.flow(masktr, batch_size=1, seed=common_seed)
arg_mask_ = np.squeeze(mask_generator.next()[0], axis=-1)
# print("arg_mask_ shape:{}".format(arg_mask_.shape))
arg_mask[:,:,i] = arg_mask_.astype(mask[:,:,i].dtype)
# remove the mask instance which doesn't contain any mask after argumentation
non_zero_mask = arg_mask[:,:, ~np.all(arg_mask == 0, axis=(0, 1))]
class_ids = class_ids[:non_zero_mask.shape[-1]]
#print("arg_mask shape:{}, non_zero_mask shape:{}, class_ids shape:{}".format(arg_mask.shape, non_zero_mask.shape, class_ids.shape))
# print("arg_mask shape:{}".format(arg_mask.shape))
return (arg_image,non_zero_mask, class_ids)
import cv2
def data_augmentation0(input_images,
h_flip=True,
v_flip=True,
rotation=360,
zoom=1.5,
brightness=0.5,
crop=False):
# first is input all other are output
# Data augmentation
output_images = input_images.copy()
# random crop
# if crop and random.randint(0, 1):
# h, w, c = output_images[0].shape
# upper_h, new_h, upper_w, new_w = locs_for_random_crop(h, w)
# output_images = [input_image[upper_h:upper_h + new_h, upper_w:upper_w + new_w, :] for input_image in output_images]
# random flip
if h_flip and random.randint(0, 1):
output_images = [cv2.flip(input_image, 1) for input_image in output_images]
if v_flip and random.randint(0, 1):
output_images = [cv2.flip(input_image, 0) for input_image in output_images]
factor = 1.0 + abs(random.gauss(mu=0.0, sigma=brightness))
if random.randint(0, 1):
factor = 1.0 / factor
table = np.array([((i / 255.0) ** factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
output_images[0] = cv2.LUT(output_images[0], table)
if rotation:
angle = random.randint(0, rotation)
else:
angle = 0.0
if zoom:
scale = random.randint(50, zoom * 100) / 100
else:
scale = 1.0
# print(angle, scale)
if rotation or zoom:
for i, input_image in enumerate(output_images):
M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), angle, scale)
# M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), 45, 1)
output_images[i] = cv2.warpAffine(input_image, M, (input_image.shape[1], input_image.shape[0]))
# print('len of output %s' % len(output_images))
return [input_image.astype(np.uint8) for input_image in output_images]
def data_augmentation(input_image, masks,
h_flip=True,
v_flip=True,
rotation=360,
zoom=1.5,
brightness=0.5,
crop=False):
# first is input all other are output
# Data augmentation
output_image = input_image.copy()
output_masks = masks.copy()
# random crop
# if crop and random.randint(0, 1):
# h, w, c = output_images[0].shape
# upper_h, new_h, upper_w, new_w = locs_for_random_crop(h, w)
# output_images = [input_image[upper_h:upper_h + new_h, upper_w:upper_w + new_w, :] for input_image in output_images]
# random flip
if h_flip and random.randint(0, 1):
output_image = np.fliplr(output_image)
output_masks = np.fliplr(output_masks)
if v_flip and random.randint(0, 1):
output_image = np.flipud(output_image)
output_masks = np.flipud(output_masks)
factor = 1.0 + abs(random.gauss(mu=0.0, sigma=brightness))
if random.randint(0, 1):
factor = 1.0 / factor
table = np.array([((i / 255.0) ** factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
output_image = cv2.LUT(output_image, table)
if rotation:
rotate_times = random.randint(0, rotation/90)
else:
rotate_times = 0.0
for r in range(0, rotate_times):
output_image = np.rot90(output_image)
output_masks = np.rot90(output_masks)
# if zoom:
# scale = random.randint(50, zoom * 100) / 100
# else:
# scale = 1.0
# # print(angle, scale)
# if rotation or zoom:
# for i, input_image in enumerate(output_images):
# M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), angle, scale)
# # M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), 45, 1)
# output_images[i] = cv2.warpAffine(input_image, M, (input_image.shape[1], input_image.shape[0]))
# # print('len of output %s' % len(output_images))
return output_image, output_masks
from skimage.transform import rescale, resize
def random_crop(img, mask, class_ids, width = 500, height = 500):
assert img.shape[0] == mask.shape[0]
assert img.shape[1] == mask.shape[1]
h, w, _ = img.shape
if h< height or w < width:
img = resize(img, (2*h, 2*w))
resized_mask = np.zeros((h*2, w*2, mask.shape[-1]), dtype=mask.dtype)
for i in range(0, mask.shape[-1]):
# resized_mask[:,:,i] = cv2.resize(mask[:, :, i], (2*w, 2*h))
resized_mask[:,:,i] = resize(mask[:, :, i], (2*h, 2*w))
mask = resized_mask
h, w, _ = img.shape
assert(h>=height)
assert(w>=width)
x = random.randint(0, img.shape[1] - width)
y = random.randint(0, img.shape[0] - height)
img = img[y:y+height, x:x+width]
mask = mask[y:y+height, x:x+width]
mask = mask[:, :, ~np.all(mask==0, axis=(0,1))]
class_ids = class_ids[:mask.shape[-1]]
return img, mask, class_ids
def load_image_gt(dataset, config, image_id, augment=False,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
# Random cropping.
if augment:
image, mask, class_ids = random_crop(image, mask, class_ids)
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
if random.randint(0, 1):
image = np.flipud(image)
mask = np.flipud(mask)
if random.randint(0, 1):
image = np.rot90(image)
mask = np.rot90(mask)
# #brightness
# brightness=0.5
# factor = 1.0 + abs(random.gauss(mu=0.0, sigma=brightness))
# if random.randint(0, 1):
# factor = 1.0 / factor
# table = np.array([((i / 255.0) ** factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
# image = cv2.LUT(image, table)
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
# Random horizontal flips.
# if augment:
# if random.randint(0, 1):
# image = np.fliplr(image)
# mask = np.fliplr(mask)
# remove the mask instance which doesn't contain any mask after argumentation
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, shape, window, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks.
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
interp='nearest') / 255.0).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = scipy.misc.imresize(
m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,
batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: If True, applies image augmentation to images (currently only
horizontal flips are supported)
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, size of image meta]
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
if config.USE_MINI_MASK:
batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],
config.MAX_GT_INSTANCES))
else:
batch_gt_masks = np.zeros(
(batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=config.IMAGE_SHAPE.tolist(), name="input_image")
input_image_meta = KL.Input(shape=[None], name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
h, w = K.shape(input_image)[1], K.shape(input_image)[2]
image_scale = K.cast(K.stack([h, w, h, w], axis=0), tf.float32)
gt_boxes = KL.Lambda(lambda x: x / image_scale)(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, config.RESNET, stage5=True)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Generate Anchors
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
anchors=self.anchors,
config=config)([rpn_class, rpn_bbox])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
_, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),
mask=[None, None, None, None])(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates to 0-1 range.
target_rois = KL.Lambda(lambda x: K.cast(
x, tf.float32) / image_scale[:4])(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Convert boxes to normalized coordinates
# TODO: let DetectionLayer return normalized coordinates to avoid
# unnecessary conversions
h, w = config.IMAGE_SHAPE[:2]
detection_boxes = KL.Lambda(
lambda x: x[..., :4] / np.array([h, w, h, w]))(detections)
# Create masks for detections
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
model = KM.Model([input_image, input_image_meta],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,decay = 0.1,
clipnorm=5.0)
adam_opt = keras.optimizers.Adam(lr=learning_rate)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
self.keras_model.add_loss(
tf.reduce_mean(layer.output, keep_dims=True))
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(optimizer=optimizer, loss=[
None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
self.keras_model.metrics_tensors.append(tf.reduce_mean(
layer.output, keep_dims=True))
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6)) + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE,
augment=False)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = max(self.config.BATCH_SIZE // 2, 2)
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=False,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image to fit the model expected size
# TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
max_dim=self.config.IMAGE_MAX_DIM,
padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox =\
self.keras_model.predict([molded_images, image_metas], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs):
"""Runs a sub-set of the computation graph that computes the given
outputs.
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Run inference
molded_images, image_metas, windows = self.mold_inputs(images)
# TODO: support training mode?
# if TEST_MODE == "training":
# model_in = [molded_images, image_metas,
# target_rpn_match, target_rpn_bbox,
# gt_boxes, gt_masks]
# if not config.USE_RPN_ROIS:
# model_in.append(target_rois)
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(1.)
# outputs_np = kf(model_in)
# else:
model_in = [molded_images, image_metas]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
|
the-stack_0_7124 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jar_task import JarTask
##
## See `Appendix A` in the 'publish' documentation:
##
## http://pantsbuild.github.io/publish.html
##
## for tips on how to adapt this example task for your own custom publishing needs.
##
class ExtraTestJarExample(JarTask):
"""Example of a pants publish plugin.
For every JarLibrary target in the build graph, this plugin will create an 'example.txt' file,
which will be placed in an additional jar. During publishing, this additional jar will be published
along with the target.
"""
def __init__(self, context, workdir):
# Constructor for custom task. Setup things that you need at pants initialization time.
super(ExtraTestJarExample, self).__init__(context, workdir)
# This method is called by pants, when the RoundEngine gets to the phase where your task is
# attached.
def execute(self):
# For each node in the graph that was selected below, create a jar, and store a reference to
# the jar in the product map.
def process(target):
self.context.log.info("Processing target %s" % target)
jar_name = "%s.%s-extra_example.jar" % (target.provides.org, target.provides.name)
# This is the path in .pants.d to write our new additional jar to. Note that we won't publish
# directly from this location.
jar_path = os.path.join(self.workdir, jar_name)
# A sample file to stuff into the jar.
example_file_name = os.path.join(self.workdir, "example.txt")
with open(example_file_name, 'wb') as f:
f.write("This is an example test file.\n")
# Create a jar file to be published along with other artifacts for this target.
# In principle, any extra file type could be created here, and published.
# Options in pants.ini allow specifying the file extension.
with self.open_jar(jar_path, overwrite=True, compressed=True) as open_jar:
# Write the sample file to the jar.
open_jar.write(os.path.join(self.workdir, example_file_name), "example.txt")
# For this target, add the path to the newly created jar to the product map, under the
# 'extra_test_jar_example key.
#
# IMPORTANT: this string *must* match the string that you have set in pants.ini. Otherwise,
# the code in 'jar_publish.py' won't be able to find this addition to the product map.
self.context.products.get('extra_test_jar_example').add(target, self.workdir).append(jar_name)
self.context.log.info("Made a jar: %s" % jar_path)
# Loop over all of the targets in the graph, and select the ones that we wish to operate on.
# This example selects all JavaLibrary targets, but different criteria can be specified below.
for target in self.context.targets(lambda target: isinstance(target, JavaLibrary)):
process(target)
|
the-stack_0_7129 | #! coding:utf-8
"""
compiler tests.
These tests are among the very first that were written when SQLAlchemy
began in 2005. As a result the testing style here is very dense;
it's an ongoing job to break these into much smaller tests with correct pep8
styling and coherent test organization.
"""
from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import Integer, String, MetaData, Table, Column, select, \
func, not_, cast, text, tuple_, exists, update, bindparam,\
literal, and_, null, type_coerce, alias, or_, literal_column,\
Float, TIMESTAMP, Numeric, Date, Text, union, except_,\
intersect, union_all, Boolean, distinct, join, outerjoin, asc, desc,\
over, subquery, case, true
import decimal
from sqlalchemy.util import u
from sqlalchemy import exc, sql, util, types, schema
from sqlalchemy.sql import table, column, label
from sqlalchemy.sql.expression import ClauseList, _literal_as_text, HasPrefixes
from sqlalchemy.engine import default
from sqlalchemy.dialects import mysql, mssql, postgresql, oracle, \
sqlite, sybase
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import compiler
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
metadata = MetaData()
# table with a schema
table4 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='remote_owner'
)
# table with a 'multipart' schema
table5 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='dbo.remote_owner'
)
users = table('users',
column('user_id'),
column('user_name'),
column('password'),
)
addresses = table('addresses',
column('address_id'),
column('user_id'),
column('street'),
column('city'),
column('state'),
column('zip')
)
keyed = Table('keyed', metadata,
Column('x', Integer, key='colx'),
Column('y', Integer, key='coly'),
Column('z', Integer),
)
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_attribute_sanity(self):
assert hasattr(table1, 'c')
assert hasattr(table1.select(), 'c')
assert not hasattr(table1.c.myid.self_group(), 'columns')
assert hasattr(table1.select().self_group(), 'columns')
assert not hasattr(table1.c.myid, 'columns')
assert not hasattr(table1.c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'c')
if util.compat.py32:
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns'))
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(select([table1.c.myid]).as_scalar(),
'columns'))
else:
assert not hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns')
assert not hasattr(select([table1.c.myid]).as_scalar(), 'columns')
def test_prefix_constructor(self):
class Pref(HasPrefixes):
def _generate(self):
return self
assert_raises(exc.ArgumentError,
Pref().prefix_with,
"some prefix", not_a_dialect=True
)
def test_table_select(self):
self.assert_compile(table1.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable")
self.assert_compile(
select(
[
table1,
table2]),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable")
def test_invalid_col_argument(self):
assert_raises(exc.ArgumentError, select, table1)
assert_raises(exc.ArgumentError, select, table1.c.myid)
def test_int_limit_offset_coercion(self):
for given, exp in [
("5", 5),
(5, 5),
(5.2, 5),
(decimal.Decimal("5"), 5),
(None, None),
]:
eq_(select().limit(given)._limit, exp)
eq_(select().offset(given)._offset, exp)
eq_(select(limit=given)._limit, exp)
eq_(select(offset=given)._offset, exp)
assert_raises(ValueError, select().limit, "foo")
assert_raises(ValueError, select().offset, "foo")
assert_raises(ValueError, select, offset="foo")
assert_raises(ValueError, select, limit="foo")
def test_limit_offset(self):
for lim, offset, exp, params in [
(5, 10, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 5, 'param_2': 10}),
(None, 10, "LIMIT -1 OFFSET :param_1", {'param_1': 10}),
(5, None, "LIMIT :param_1", {'param_1': 5}),
(0, 0, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 0, 'param_2': 0}),
]:
self.assert_compile(
select([1]).limit(lim).offset(offset),
"SELECT 1 " + exp,
checkparams=params
)
def test_select_precol_compile_ordering(self):
s1 = select([column('x')]).select_from('a').limit(5).as_scalar()
s2 = select([s1]).limit(10)
class MyCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select):
result = ""
if select._limit:
result += "FIRST %s " % self.process(
literal(
select._limit))
if select._offset:
result += "SKIP %s " % self.process(
literal(
select._offset))
return result
def limit_clause(self, select):
return ""
dialect = default.DefaultDialect()
dialect.statement_compiler = MyCompiler
dialect.paramstyle = 'qmark'
dialect.positional = True
self.assert_compile(
s2,
"SELECT FIRST ? (SELECT FIRST ? x FROM a) AS anon_1",
checkpositional=(10, 5),
dialect=dialect
)
def test_from_subquery(self):
"""tests placing select statements in the column clause of
another select, for the
purposes of selecting from the exported columns of that select."""
s = select([table1], table1.c.name == 'jack')
self.assert_compile(
select(
[s],
s.c.myid == 7),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description AS description "
"FROM mytable "
"WHERE mytable.name = :name_1) WHERE myid = :myid_1")
sq = select([table1])
self.assert_compile(
sq.select(),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable)"
)
sq = select(
[table1],
).alias('sq')
self.assert_compile(
sq.select(sq.c.myid == 7),
"SELECT sq.myid, sq.name, sq.description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS sq "
"WHERE sq.myid = :myid_1"
)
sq = select(
[table1, table2],
and_(table1.c.myid == 7, table2.c.otherid == table1.c.myid),
use_labels=True
).alias('sq')
sqstring = "SELECT mytable.myid AS mytable_myid, mytable.name AS "\
"mytable_name, mytable.description AS mytable_description, "\
"myothertable.otherid AS myothertable_otherid, "\
"myothertable.othername AS myothertable_othername FROM "\
"mytable, myothertable WHERE mytable.myid = :myid_1 AND "\
"myothertable.otherid = mytable.myid"
self.assert_compile(
sq.select(),
"SELECT sq.mytable_myid, sq.mytable_name, "
"sq.mytable_description, sq.myothertable_otherid, "
"sq.myothertable_othername FROM (%s) AS sq" % sqstring)
sq2 = select(
[sq],
use_labels=True
).alias('sq2')
self.assert_compile(
sq2.select(),
"SELECT sq2.sq_mytable_myid, sq2.sq_mytable_name, "
"sq2.sq_mytable_description, sq2.sq_myothertable_otherid, "
"sq2.sq_myothertable_othername FROM "
"(SELECT sq.mytable_myid AS "
"sq_mytable_myid, sq.mytable_name AS sq_mytable_name, "
"sq.mytable_description AS sq_mytable_description, "
"sq.myothertable_otherid AS sq_myothertable_otherid, "
"sq.myothertable_othername AS sq_myothertable_othername "
"FROM (%s) AS sq) AS sq2" % sqstring)
def test_select_from_clauselist(self):
self.assert_compile(
select([ClauseList(column('a'), column('b'))]
).select_from('sometable'),
'SELECT a, b FROM sometable'
)
def test_use_labels(self):
self.assert_compile(
select([table1.c.myid == 5], use_labels=True),
"SELECT mytable.myid = :myid_1 AS anon_1 FROM mytable"
)
self.assert_compile(
select([func.foo()], use_labels=True),
"SELECT foo() AS foo_1"
)
# this is native_boolean=False for default dialect
self.assert_compile(
select([not_(True)], use_labels=True),
"SELECT :param_1 = 0"
)
self.assert_compile(
select([cast("data", Integer)], use_labels=True),
"SELECT CAST(:param_1 AS INTEGER) AS anon_1"
)
self.assert_compile(
select([func.sum(
func.lala(table1.c.myid).label('foo')).label('bar')]),
"SELECT sum(lala(mytable.myid)) AS bar FROM mytable"
)
self.assert_compile(
select([keyed]),
"SELECT keyed.x, keyed.y"
", keyed.z FROM keyed"
)
self.assert_compile(
select([keyed]).apply_labels(),
"SELECT keyed.x AS keyed_x, keyed.y AS "
"keyed_y, keyed.z AS keyed_z FROM keyed"
)
def test_paramstyles(self):
stmt = text("select :foo, :bar, :bat from sometable")
self.assert_compile(
stmt,
"select ?, ?, ? from sometable",
dialect=default.DefaultDialect(paramstyle='qmark')
)
self.assert_compile(
stmt,
"select :foo, :bar, :bat from sometable",
dialect=default.DefaultDialect(paramstyle='named')
)
self.assert_compile(
stmt,
"select %s, %s, %s from sometable",
dialect=default.DefaultDialect(paramstyle='format')
)
self.assert_compile(
stmt,
"select :1, :2, :3 from sometable",
dialect=default.DefaultDialect(paramstyle='numeric')
)
self.assert_compile(
stmt,
"select %(foo)s, %(bar)s, %(bat)s from sometable",
dialect=default.DefaultDialect(paramstyle='pyformat')
)
def test_dupe_columns(self):
"""test that deduping is performed against clause
element identity, not rendered result."""
self.assert_compile(
select([column('a'), column('a'), column('a')]),
"SELECT a, a, a", dialect=default.DefaultDialect()
)
c = column('a')
self.assert_compile(
select([c, c, c]),
"SELECT a", dialect=default.DefaultDialect()
)
a, b = column('a'), column('b')
self.assert_compile(
select([a, b, b, b, a, a]),
"SELECT a, b", dialect=default.DefaultDialect()
)
# using alternate keys.
a, b, c = Column('a', Integer, key='b'), \
Column('b', Integer), \
Column('c', Integer, key='a')
self.assert_compile(
select([a, b, c, a, b, c]),
"SELECT a, b, c", dialect=default.DefaultDialect()
)
self.assert_compile(
select([bindparam('a'), bindparam('b'), bindparam('c')]),
"SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3",
dialect=default.DefaultDialect(paramstyle='named')
)
self.assert_compile(
select([bindparam('a'), bindparam('b'), bindparam('c')]),
"SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3",
dialect=default.DefaultDialect(paramstyle='qmark'),
)
self.assert_compile(
select(["a", "a", "a"]),
"SELECT a, a, a"
)
s = select([bindparam('a'), bindparam('b'), bindparam('c')])
s = s.compile(dialect=default.DefaultDialect(paramstyle='qmark'))
eq_(s.positiontup, ['a', 'b', 'c'])
def test_nested_label_targeting(self):
"""test nested anonymous label generation.
"""
s1 = table1.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(s5,
'SELECT anon_1.anon_2_myid AS '
'anon_1_anon_2_myid, anon_1.anon_2_name AS '
'anon_1_anon_2_name, anon_1.anon_2_descript'
'ion AS anon_1_anon_2_description FROM '
'(SELECT anon_2.myid AS anon_2_myid, '
'anon_2.name AS anon_2_name, '
'anon_2.description AS anon_2_description '
'FROM (SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description FROM mytable) AS anon_2) '
'AS anon_1')
def test_nested_label_targeting_keyed(self):
s1 = keyed.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
self.assert_compile(s3,
"SELECT anon_1.x AS anon_1_x, "
"anon_1.y AS anon_1_y, "
"anon_1.z AS anon_1_z FROM "
"(SELECT keyed.x AS x, keyed.y "
"AS y, keyed.z AS z FROM keyed) AS anon_1")
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(s5,
"SELECT anon_1.anon_2_x AS anon_1_anon_2_x, "
"anon_1.anon_2_y AS anon_1_anon_2_y, "
"anon_1.anon_2_z AS anon_1_anon_2_z "
"FROM (SELECT anon_2.x AS anon_2_x, "
"anon_2.y AS anon_2_y, "
"anon_2.z AS anon_2_z FROM "
"(SELECT keyed.x AS x, keyed.y AS y, keyed.z "
"AS z FROM keyed) AS anon_2) AS anon_1"
)
def test_exists(self):
s = select([table1.c.myid]).where(table1.c.myid == 5)
self.assert_compile(exists(s),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)"
)
self.assert_compile(exists(s.as_scalar()),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)"
)
self.assert_compile(exists([table1.c.myid], table1.c.myid
== 5).select(),
'SELECT EXISTS (SELECT mytable.myid FROM '
'mytable WHERE mytable.myid = :myid_1)',
params={'mytable_myid': 5})
self.assert_compile(select([table1, exists([1],
from_obj=table2)]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, EXISTS (SELECT 1 '
'FROM myothertable) FROM mytable',
params={})
self.assert_compile(select([table1,
exists([1],
from_obj=table2).label('foo')]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, EXISTS (SELECT 1 '
'FROM myothertable) AS foo FROM mytable',
params={})
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)
).replace_selectable(
table2,
table2.alias()),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable AS '
'myothertable_1 WHERE myothertable_1.otheri'
'd = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)).
select_from(
table1.join(
table2,
table1.c.myid == table2.c.otherid)).
replace_selectable(
table2,
table2.alias()),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable JOIN '
'myothertable AS myothertable_1 ON '
'mytable.myid = myothertable_1.otherid '
'WHERE EXISTS (SELECT * FROM myothertable '
'AS myothertable_1 WHERE '
'myothertable_1.otherid = mytable.myid)')
self.assert_compile(
select([
or_(
exists().where(table2.c.otherid == 'foo'),
exists().where(table2.c.otherid == 'bar')
)
]),
"SELECT (EXISTS (SELECT * FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)) "
"OR (EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = :otherid_2)) AS anon_1"
)
def test_where_subquery(self):
s = select([addresses.c.street], addresses.c.user_id
== users.c.user_id, correlate=True).alias('s')
# don't correlate in a FROM list
self.assert_compile(select([users, s.c.street], from_obj=s),
"SELECT users.user_id, users.user_name, "
"users.password, s.street FROM users, "
"(SELECT addresses.street AS street FROM "
"addresses, users WHERE addresses.user_id = "
"users.user_id) AS s")
self.assert_compile(table1.select(
table1.c.myid == select(
[table1.c.myid],
table1.c.name == 'jack')),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'mytable.myid = (SELECT mytable.myid FROM '
'mytable WHERE mytable.name = :name_1)')
self.assert_compile(
table1.select(
table1.c.myid == select(
[table2.c.otherid],
table1.c.name == table2.c.othername
)
),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'mytable.myid = (SELECT '
'myothertable.otherid FROM myothertable '
'WHERE mytable.name = myothertable.othernam'
'e)')
self.assert_compile(table1.select(exists([1], table2.c.otherid
== table1.c.myid)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT 1 FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
talias = table1.alias('ta')
s = subquery('sq2', [talias], exists([1], table2.c.otherid
== talias.c.myid))
self.assert_compile(select([s, table1]),
'SELECT sq2.myid, sq2.name, '
'sq2.description, mytable.myid, '
'mytable.name, mytable.description FROM '
'(SELECT ta.myid AS myid, ta.name AS name, '
'ta.description AS description FROM '
'mytable AS ta WHERE EXISTS (SELECT 1 FROM '
'myothertable WHERE myothertable.otherid = '
'ta.myid)) AS sq2, mytable')
# test constructing the outer query via append_column(), which
# occurs in the ORM's Query object
s = select([], exists([1], table2.c.otherid == table1.c.myid),
from_obj=table1)
s.append_column(table1)
self.assert_compile(s,
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT 1 FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
def test_orderby_subquery(self):
self.assert_compile(
table1.select(
order_by=[
select(
[
table2.c.otherid],
table1.c.myid == table2.c.otherid)]),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable ORDER BY '
'(SELECT myothertable.otherid FROM '
'myothertable WHERE mytable.myid = '
'myothertable.otherid)')
self.assert_compile(table1.select(order_by=[
desc(select([table2.c.otherid],
table1.c.myid == table2.c.otherid))]),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable ORDER BY '
'(SELECT myothertable.otherid FROM '
'myothertable WHERE mytable.myid = '
'myothertable.otherid) DESC')
def test_scalar_select(self):
assert_raises_message(
exc.InvalidRequestError,
r"Select objects don't have a type\. Call as_scalar\(\) "
"on this Select object to return a 'scalar' "
"version of this Select\.",
func.coalesce, select([table1.c.myid])
)
s = select([table1.c.myid], correlate=False).as_scalar()
self.assert_compile(select([table1, s]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT mytable.myid '
'FROM mytable) AS anon_1 FROM mytable')
s = select([table1.c.myid]).as_scalar()
self.assert_compile(select([table2, s]),
'SELECT myothertable.otherid, '
'myothertable.othername, (SELECT '
'mytable.myid FROM mytable) AS anon_1 FROM '
'myothertable')
s = select([table1.c.myid]).correlate(None).as_scalar()
self.assert_compile(select([table1, s]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT mytable.myid '
'FROM mytable) AS anon_1 FROM mytable')
s = select([table1.c.myid]).as_scalar()
s2 = s.where(table1.c.myid == 5)
self.assert_compile(
s2,
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)"
)
self.assert_compile(
s, "(SELECT mytable.myid FROM mytable)"
)
# test that aliases use as_scalar() when used in an explicitly
# scalar context
s = select([table1.c.myid]).alias()
self.assert_compile(select([table1.c.myid]).where(table1.c.myid
== s),
'SELECT mytable.myid FROM mytable WHERE '
'mytable.myid = (SELECT mytable.myid FROM '
'mytable)')
self.assert_compile(select([table1.c.myid]).where(s
> table1.c.myid),
'SELECT mytable.myid FROM mytable WHERE '
'mytable.myid < (SELECT mytable.myid FROM '
'mytable)')
s = select([table1.c.myid]).as_scalar()
self.assert_compile(select([table2, s]),
'SELECT myothertable.otherid, '
'myothertable.othername, (SELECT '
'mytable.myid FROM mytable) AS anon_1 FROM '
'myothertable')
# test expressions against scalar selects
self.assert_compile(select([s - literal(8)]),
'SELECT (SELECT mytable.myid FROM mytable) '
'- :param_1 AS anon_1')
self.assert_compile(select([select([table1.c.name]).as_scalar()
+ literal('x')]),
'SELECT (SELECT mytable.name FROM mytable) '
'|| :param_1 AS anon_1')
self.assert_compile(select([s > literal(8)]),
'SELECT (SELECT mytable.myid FROM mytable) '
'> :param_1 AS anon_1')
self.assert_compile(select([select([table1.c.name]).label('foo'
)]),
'SELECT (SELECT mytable.name FROM mytable) '
'AS foo')
# scalar selects should not have any attributes on their 'c' or
# 'columns' attribute
s = select([table1.c.myid]).as_scalar()
try:
s.c.foo
except exc.InvalidRequestError as err:
assert str(err) \
== 'Scalar Select expression has no columns; use this '\
'object directly within a column-level expression.'
try:
s.columns.foo
except exc.InvalidRequestError as err:
assert str(err) \
== 'Scalar Select expression has no columns; use this '\
'object directly within a column-level expression.'
zips = table('zips',
column('zipcode'),
column('latitude'),
column('longitude'),
)
places = table('places',
column('id'),
column('nm')
)
zip = '12345'
qlat = select([zips.c.latitude], zips.c.zipcode == zip).\
correlate(None).as_scalar()
qlng = select([zips.c.longitude], zips.c.zipcode == zip).\
correlate(None).as_scalar()
q = select([places.c.id, places.c.nm, zips.c.zipcode,
func.latlondist(qlat, qlng).label('dist')],
zips.c.zipcode == zip,
order_by=['dist', places.c.nm]
)
self.assert_compile(q,
'SELECT places.id, places.nm, '
'zips.zipcode, latlondist((SELECT '
'zips.latitude FROM zips WHERE '
'zips.zipcode = :zipcode_1), (SELECT '
'zips.longitude FROM zips WHERE '
'zips.zipcode = :zipcode_2)) AS dist FROM '
'places, zips WHERE zips.zipcode = '
':zipcode_3 ORDER BY dist, places.nm')
zalias = zips.alias('main_zip')
qlat = select([zips.c.latitude], zips.c.zipcode == zalias.c.zipcode).\
as_scalar()
qlng = select([zips.c.longitude], zips.c.zipcode == zalias.c.zipcode).\
as_scalar()
q = select([places.c.id, places.c.nm, zalias.c.zipcode,
func.latlondist(qlat, qlng).label('dist')],
order_by=['dist', places.c.nm])
self.assert_compile(q,
'SELECT places.id, places.nm, '
'main_zip.zipcode, latlondist((SELECT '
'zips.latitude FROM zips WHERE '
'zips.zipcode = main_zip.zipcode), (SELECT '
'zips.longitude FROM zips WHERE '
'zips.zipcode = main_zip.zipcode)) AS dist '
'FROM places, zips AS main_zip ORDER BY '
'dist, places.nm')
a1 = table2.alias('t2alias')
s1 = select([a1.c.otherid], table1.c.myid == a1.c.otherid).as_scalar()
j1 = table1.join(table2, table1.c.myid == table2.c.otherid)
s2 = select([table1, s1], from_obj=j1)
self.assert_compile(s2,
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT '
't2alias.otherid FROM myothertable AS '
't2alias WHERE mytable.myid = '
't2alias.otherid) AS anon_1 FROM mytable '
'JOIN myothertable ON mytable.myid = '
'myothertable.otherid')
def test_label_comparison_one(self):
x = func.lala(table1.c.myid).label('foo')
self.assert_compile(select([x], x == 5),
'SELECT lala(mytable.myid) AS foo FROM '
'mytable WHERE lala(mytable.myid) = '
':param_1')
def test_label_comparison_two(self):
self.assert_compile(
label('bar', column('foo', type_=String)) + 'foo',
'foo || :param_1')
def test_order_by_labels_enabled(self):
lab1 = (table1.c.myid + 12).label('foo')
lab2 = func.somefunc(table1.c.name).label('bar')
dialect = default.DefaultDialect()
self.assert_compile(select([lab1, lab2]).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY foo, bar DESC",
dialect=dialect
)
# the function embedded label renders as the function
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), bar DESC",
dialect=dialect
)
# binary expressions render as the expression without labels
self.assert_compile(select([lab1, lab2]).order_by(lab1 + "test"),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1 + :param_1",
dialect=dialect
)
# labels within functions in the columns clause render
# with the expression
self.assert_compile(
select([lab1, func.foo(lab1)]).order_by(lab1, func.foo(lab1)),
"SELECT mytable.myid + :myid_1 AS foo, "
"foo(mytable.myid + :myid_1) AS foo_1 FROM mytable "
"ORDER BY foo, foo(mytable.myid + :myid_1)",
dialect=dialect
)
lx = (table1.c.myid + table1.c.myid).label('lx')
ly = (func.lower(table1.c.name) + table1.c.description).label('ly')
self.assert_compile(
select([lx, ly]).order_by(lx, ly.desc()),
"SELECT mytable.myid + mytable.myid AS lx, "
"lower(mytable.name) || mytable.description AS ly "
"FROM mytable ORDER BY lx, ly DESC",
dialect=dialect
)
def test_order_by_labels_disabled(self):
lab1 = (table1.c.myid + 12).label('foo')
lab2 = func.somefunc(table1.c.name).label('bar')
dialect = default.DefaultDialect()
dialect.supports_simple_order_by_label = False
self.assert_compile(
select(
[
lab1,
lab2]).order_by(
lab1,
desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1, somefunc(mytable.name) DESC",
dialect=dialect)
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), "
"somefunc(mytable.name) DESC",
dialect=dialect
)
def test_conjunctions(self):
a, b, c = 'a', 'b', 'c'
x = and_(a, b, c)
assert isinstance(x.type, Boolean)
assert str(x) == 'a AND b AND c'
self.assert_compile(
select([x.label('foo')]),
'SELECT a AND b AND c AS foo'
)
self.assert_compile(
and_(table1.c.myid == 12, table1.c.name == 'asdf',
table2.c.othername == 'foo', "sysdate() = today()"),
"mytable.myid = :myid_1 AND mytable.name = :name_1 "
"AND myothertable.othername = "
":othername_1 AND sysdate() = today()"
)
self.assert_compile(
and_(
table1.c.myid == 12,
or_(table2.c.othername == 'asdf',
table2.c.othername == 'foo', table2.c.otherid == 9),
"sysdate() = today()",
),
'mytable.myid = :myid_1 AND (myothertable.othername = '
':othername_1 OR myothertable.othername = :othername_2 OR '
'myothertable.otherid = :otherid_1) AND sysdate() = '
'today()',
checkparams={'othername_1': 'asdf', 'othername_2': 'foo',
'otherid_1': 9, 'myid_1': 12}
)
# test a generator
self.assert_compile(
and_(
conj for conj in [
table1.c.myid == 12,
table1.c.name == 'asdf'
]
),
"mytable.myid = :myid_1 AND mytable.name = :name_1"
)
def test_nested_conjunctions_short_circuit(self):
"""test that empty or_(), and_() conjunctions are collapsed by
an enclosing conjunction."""
t = table('t', column('x'))
self.assert_compile(
select([t]).where(and_(t.c.x == 5,
or_(and_(or_(t.c.x == 7))))),
"SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2"
)
self.assert_compile(
select([t]).where(and_(or_(t.c.x == 12,
and_(or_(t.c.x == 8))))),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2"
)
self.assert_compile(
select([t]).
where(
and_(
or_(
or_(t.c.x == 12),
and_(
or_(),
or_(and_(t.c.x == 8)),
and_()
)
)
)
),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2"
)
def test_true_short_circuit(self):
t = table('t', column('x'))
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE 1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE true",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
self.assert_compile(
select([t]),
"SELECT t.x FROM t",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
def test_distinct(self):
self.assert_compile(
select([table1.c.myid.distinct()]),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([distinct(table1.c.myid)]),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([table1.c.myid]).distinct(),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([func.count(table1.c.myid.distinct())]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable"
)
self.assert_compile(
select([func.count(distinct(table1.c.myid))]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable"
)
def test_where_empty(self):
self.assert_compile(
select([table1.c.myid]).where(and_()),
"SELECT mytable.myid FROM mytable"
)
self.assert_compile(
select([table1.c.myid]).where(or_()),
"SELECT mytable.myid FROM mytable"
)
def test_multiple_col_binds(self):
self.assert_compile(
select(["*"], or_(table1.c.myid == 12, table1.c.myid == 'asdf',
table1.c.myid == 'foo')),
"SELECT * FROM mytable WHERE mytable.myid = :myid_1 "
"OR mytable.myid = :myid_2 OR mytable.myid = :myid_3"
)
def test_order_by_nulls(self):
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
table2.c.othername.desc().nullsfirst()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS FIRST"
)
self.assert_compile(
table2.select(order_by=[
table2.c.otherid, table2.c.othername.desc().nullslast()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS LAST"
)
self.assert_compile(
table2.select(order_by=[
table2.c.otherid.nullslast(),
table2.c.othername.desc().nullsfirst()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS LAST, "
"myothertable.othername DESC NULLS FIRST"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid.nullsfirst(),
table2.c.othername.desc()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid.nullsfirst(),
table2.c.othername.desc().nullslast()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC NULLS LAST"
)
def test_orderby_groupby(self):
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
asc(table2.c.othername)]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername ASC"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
table2.c.othername.desc()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC"
)
# generative order_by
self.assert_compile(
table2.select().order_by(table2.c.otherid).
order_by(table2.c.othername.desc()),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC"
)
self.assert_compile(
table2.select().order_by(table2.c.otherid).
order_by(table2.c.othername.desc()
).order_by(None),
"SELECT myothertable.otherid, myothertable.othername "
"FROM myothertable"
)
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername]),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername"
)
# generative group by
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)]).
group_by(table2.c.othername),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername"
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)]).
group_by(table2.c.othername).group_by(None),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable"
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername],
order_by=[table2.c.othername]),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable "
"GROUP BY myothertable.othername ORDER BY myothertable.othername"
)
def test_for_update(self):
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
# not supported by dialect, should just use update
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
assert_raises_message(
exc.ArgumentError,
"Unknown for_update argument: 'unknown_mode'",
table1.select, table1.c.myid == 7, for_update='unknown_mode'
)
def test_alias(self):
# test the alias for a table1. column names stay the same,
# table name "changes" to "foo".
self.assert_compile(
select([table1.alias('foo')]),
"SELECT foo.myid, foo.name, foo.description FROM mytable AS foo")
for dialect in (oracle.dialect(),):
self.assert_compile(
select([table1.alias('foo')]),
"SELECT foo.myid, foo.name, foo.description FROM mytable foo",
dialect=dialect)
self.assert_compile(
select([table1.alias()]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1")
# create a select for a join of two tables. use_labels
# means the column names will have labels tablename_columnname,
# which become the column keys accessible off the Selectable object.
# also, only use one column from the second table and all columns
# from the first table1.
q = select(
[table1, table2.c.otherid],
table1.c.myid == table2.c.otherid, use_labels=True
)
# make an alias of the "selectable". column names
# stay the same (i.e. the labels), table name "changes" to "t2view".
a = alias(q, 't2view')
# select from that alias, also using labels. two levels of labels
# should produce two underscores.
# also, reference the column "mytable_myid" off of the t2view alias.
self.assert_compile(
a.select(a.c.mytable_myid == 9, use_labels=True),
"SELECT t2view.mytable_myid AS t2view_mytable_myid, "
"t2view.mytable_name "
"AS t2view_mytable_name, "
"t2view.mytable_description AS t2view_mytable_description, "
"t2view.myothertable_otherid AS t2view_myothertable_otherid FROM "
"(SELECT mytable.myid AS mytable_myid, "
"mytable.name AS mytable_name, "
"mytable.description AS mytable_description, "
"myothertable.otherid AS "
"myothertable_otherid FROM mytable, myothertable "
"WHERE mytable.myid = "
"myothertable.otherid) AS t2view "
"WHERE t2view.mytable_myid = :mytable_myid_1"
)
def test_prefix(self):
self.assert_compile(
table1.select().prefix_with("SQL_CALC_FOUND_ROWS").
prefix_with("SQL_SOME_WEIRD_MYSQL_THING"),
"SELECT SQL_CALC_FOUND_ROWS SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable"
)
def test_prefix_dialect_specific(self):
self.assert_compile(
table1.select().prefix_with("SQL_CALC_FOUND_ROWS",
dialect='sqlite').
prefix_with("SQL_SOME_WEIRD_MYSQL_THING",
dialect='mysql'),
"SELECT SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable",
dialect=mysql.dialect()
)
@testing.emits_warning('.*empty sequence.*')
def test_render_binds_as_literal(self):
"""test a compiler that renders binds inline into
SQL in the columns clause."""
dialect = default.DefaultDialect()
class Compiler(dialect.statement_compiler):
ansi_bind_rules = True
dialect.statement_compiler = Compiler
self.assert_compile(
select([literal("someliteral")]),
"SELECT 'someliteral' AS anon_1",
dialect=dialect
)
self.assert_compile(
select([table1.c.myid + 3]),
"SELECT mytable.myid + 3 AS anon_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([table1.c.myid.in_([4, 5, 6])]),
"SELECT mytable.myid IN (4, 5, 6) AS anon_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([func.mod(table1.c.myid, 5)]),
"SELECT mod(mytable.myid, 5) AS mod_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([literal("foo").in_([])]),
"SELECT 'foo' != 'foo' AS anon_1",
dialect=dialect
)
self.assert_compile(
select([literal(util.b("foo"))]),
"SELECT 'foo' AS anon_1",
dialect=dialect
)
# test callable
self.assert_compile(
select([table1.c.myid == bindparam("foo", callable_=lambda: 5)]),
"SELECT mytable.myid = 5 AS anon_1 FROM mytable",
dialect=dialect
)
assert_raises_message(
exc.CompileError,
"Bind parameter 'foo' without a "
"renderable value not allowed here.",
bindparam("foo").in_(
[]).compile,
dialect=dialect)
def test_literal(self):
self.assert_compile(select([literal('foo')]),
"SELECT :param_1 AS anon_1")
self.assert_compile(
select(
[
literal("foo") +
literal("bar")],
from_obj=[table1]),
"SELECT :param_1 || :param_2 AS anon_1 FROM mytable")
def test_calculated_columns(self):
value_tbl = table('values',
column('id', Integer),
column('val1', Float),
column('val2', Float),
)
self.assert_compile(
select([value_tbl.c.id, (value_tbl.c.val2 -
value_tbl.c.val1) / value_tbl.c.val1]),
"SELECT values.id, (values.val2 - values.val1) "
"/ values.val1 AS anon_1 FROM values"
)
self.assert_compile(
select([
value_tbl.c.id],
(value_tbl.c.val2 - value_tbl.c.val1) /
value_tbl.c.val1 > 2.0),
"SELECT values.id FROM values WHERE "
"(values.val2 - values.val1) / values.val1 > :param_1"
)
self.assert_compile(
select([value_tbl.c.id], value_tbl.c.val1 /
(value_tbl.c.val2 - value_tbl.c.val1) /
value_tbl.c.val1 > 2.0),
"SELECT values.id FROM values WHERE "
"(values.val1 / (values.val2 - values.val1)) "
"/ values.val1 > :param_1"
)
def test_percent_chars(self):
t = table("table%name",
column("percent%"),
column("%(oneofthese)s"),
column("spaces % more spaces"),
)
self.assert_compile(
t.select(use_labels=True),
'''SELECT "table%name"."percent%" AS "table%name_percent%", '''
'''"table%name"."%(oneofthese)s" AS '''
'''"table%name_%(oneofthese)s", '''
'''"table%name"."spaces % more spaces" AS '''
'''"table%name_spaces % '''
'''more spaces" FROM "table%name"'''
)
def test_joins(self):
self.assert_compile(
join(table2, table1, table1.c.myid == table2.c.otherid).select(),
"SELECT myothertable.otherid, myothertable.othername, "
"mytable.myid, mytable.name, mytable.description FROM "
"myothertable JOIN mytable ON mytable.myid = myothertable.otherid"
)
self.assert_compile(
select(
[table1],
from_obj=[join(table1, table2, table1.c.myid
== table2.c.otherid)]
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable JOIN myothertable ON mytable.myid = myothertable.otherid")
self.assert_compile(
select(
[join(join(table1, table2, table1.c.myid == table2.c.otherid),
table3, table1.c.myid == table3.c.userid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid, "
"thirdtable.otherstuff FROM mytable JOIN myothertable "
"ON mytable.myid ="
" myothertable.otherid JOIN thirdtable ON "
"mytable.myid = thirdtable.userid"
)
self.assert_compile(
join(users, addresses, users.c.user_id ==
addresses.c.user_id).select(),
"SELECT users.user_id, users.user_name, users.password, "
"addresses.address_id, addresses.user_id, addresses.street, "
"addresses.city, addresses.state, addresses.zip "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id"
)
self.assert_compile(
select([table1, table2, table3],
from_obj=[join(table1, table2,
table1.c.myid == table2.c.otherid).
outerjoin(table3,
table1.c.myid == table3.c.userid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable "
"JOIN myothertable ON mytable.myid "
"= myothertable.otherid LEFT OUTER JOIN thirdtable "
"ON mytable.myid ="
" thirdtable.userid"
)
self.assert_compile(
select([table1, table2, table3],
from_obj=[outerjoin(table1,
join(table2, table3, table2.c.otherid
== table3.c.userid),
table1.c.myid == table2.c.otherid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable LEFT OUTER JOIN "
"(myothertable "
"JOIN thirdtable ON myothertable.otherid = "
"thirdtable.userid) ON "
"mytable.myid = myothertable.otherid"
)
query = select(
[table1, table2],
or_(
table1.c.name == 'fred',
table1.c.myid == 10,
table2.c.othername != 'jack',
"EXISTS (select yay from foo where boo = lar)"
),
from_obj=[outerjoin(table1, table2,
table1.c.myid == table2.c.otherid)]
)
self.assert_compile(
query, "SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername "
"FROM mytable LEFT OUTER JOIN myothertable ON mytable.myid = "
"myothertable.otherid WHERE mytable.name = :name_1 OR "
"mytable.myid = :myid_1 OR myothertable.othername != :othername_1 "
"OR EXISTS (select yay from foo where boo = lar)", )
def test_compound_selects(self):
assert_raises_message(
exc.ArgumentError,
"All selectables passed to CompoundSelect "
"must have identical numbers of columns; "
"select #1 has 2 columns, select #2 has 3",
union, table3.select(), table1.select()
)
x = union(
select([table1], table1.c.myid == 5),
select([table1], table1.c.myid == 12),
order_by=[table1.c.myid],
)
self.assert_compile(
x, "SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE "
"mytable.myid = :myid_1 UNION "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_2 "
"ORDER BY mytable.myid")
x = union(
select([table1]),
select([table1])
)
x = union(x, select([table1]))
self.assert_compile(
x, "(SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable) UNION SELECT mytable.myid,"
" mytable.name, mytable.description FROM mytable")
u1 = union(
select([table1.c.myid, table1.c.name]),
select([table2]),
select([table3])
)
self.assert_compile(
u1, "SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable "
"UNION SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable")
assert u1.corresponding_column(table2.c.otherid) is u1.c.myid
self.assert_compile(
union(
select([table1.c.myid, table1.c.name]),
select([table2]),
order_by=['myid'],
offset=10,
limit=5
),
"SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername "
"FROM myothertable ORDER BY myid LIMIT :param_1 OFFSET :param_2",
{'param_1': 5, 'param_2': 10}
)
self.assert_compile(
union(
select([table1.c.myid, table1.c.name,
func.max(table1.c.description)],
table1.c.name == 'name2',
group_by=[table1.c.myid, table1.c.name]),
table1.select(table1.c.name == 'name1')
),
"SELECT mytable.myid, mytable.name, "
"max(mytable.description) AS max_1 "
"FROM mytable WHERE mytable.name = :name_1 "
"GROUP BY mytable.myid, "
"mytable.name UNION SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE mytable.name = :name_2"
)
self.assert_compile(
union(
select([literal(100).label('value')]),
select([literal(200).label('value')])
),
"SELECT :param_1 AS value UNION SELECT :param_2 AS value"
)
self.assert_compile(
union_all(
select([table1.c.myid]),
union(
select([table2.c.otherid]),
select([table3.c.userid]),
)
),
"SELECT mytable.myid FROM mytable UNION ALL "
"(SELECT myothertable.otherid FROM myothertable UNION "
"SELECT thirdtable.userid FROM thirdtable)"
)
s = select([column('foo'), column('bar')])
# ORDER BY's even though not supported by
# all DB's, are rendered if requested
self.assert_compile(
union(
s.order_by("foo"),
s.order_by("bar")),
"SELECT foo, bar ORDER BY foo UNION SELECT foo, bar ORDER BY bar")
# self_group() is honored
self.assert_compile(
union(s.order_by("foo").self_group(),
s.order_by("bar").limit(10).self_group()),
"(SELECT foo, bar ORDER BY foo) UNION (SELECT foo, "
"bar ORDER BY bar LIMIT :param_1)",
{'param_1': 10}
)
def test_compound_grouping(self):
s = select([column('foo'), column('bar')]).select_from('bat')
self.assert_compile(
union(union(union(s, s), s), s),
"((SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat) UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(s, s, s, s),
"SELECT foo, bar FROM bat UNION SELECT foo, bar "
"FROM bat UNION SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(s, union(s, union(s, s))),
"SELECT foo, bar FROM bat UNION (SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat))"
)
self.assert_compile(
select([s.alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat) AS anon_1'
)
self.assert_compile(
select([union(s, s).alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat UNION '
'SELECT foo, bar FROM bat) AS anon_1'
)
self.assert_compile(
select([except_(s, s).alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat EXCEPT '
'SELECT foo, bar FROM bat) AS anon_1'
)
# this query sqlite specifically chokes on
self.assert_compile(
union(
except_(s, s),
s
),
"(SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(
s,
except_(s, s),
),
"SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat)"
)
# this solves it
self.assert_compile(
union(
except_(s, s).alias().select(),
s
),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat EXCEPT "
"SELECT foo, bar FROM bat) AS anon_1 "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
except_(
union(s, s),
union(s, s)
),
"(SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"EXCEPT (SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat)"
)
s2 = union(s, s)
s3 = union(s2, s2)
self.assert_compile(s3, "(SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat)")
self.assert_compile(
union(
intersect(s, s),
intersect(s, s)
),
"(SELECT foo, bar FROM bat INTERSECT SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat INTERSECT "
"SELECT foo, bar FROM bat)"
)
def test_binds(self):
for (
stmt,
expected_named_stmt,
expected_positional_stmt,
expected_default_params_dict,
expected_default_params_list,
test_param_dict,
expected_test_params_dict,
expected_test_params_list
) in [
(
select(
[table1, table2],
and_(
table1.c.myid == table2.c.otherid,
table1.c.name == bindparam('mytablename')
)),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid "
"AND mytable.name = :mytablename",
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid AND "
"mytable.name = ?",
{'mytablename': None}, [None],
{'mytablename': 5}, {'mytablename': 5}, [5]
),
(
select([table1], or_(table1.c.myid == bindparam('myid'),
table2.c.otherid == bindparam('myid'))),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = :myid "
"OR myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{'myid': None}, [None, None],
{'myid': 5}, {'myid': 5}, [5, 5]
),
(
text("SELECT mytable.myid, mytable.name, "
"mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid"),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? OR "
"myothertable.otherid = ?",
{'myid': None}, [None, None],
{'myid': 5}, {'myid': 5}, [5, 5]
),
(
select([table1], or_(table1.c.myid ==
bindparam('myid', unique=True),
table2.c.otherid ==
bindparam('myid', unique=True))),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{'myid_1': None, 'myid_2': None}, [None, None],
{'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6]
),
(
bindparam('test', type_=String, required=False) + text("'hi'"),
":test || 'hi'",
"? || 'hi'",
{'test': None}, [None],
{}, {'test': None}, [None]
),
(
# testing select.params() here - bindparam() objects
# must get required flag set to False
select(
[table1],
or_(
table1.c.myid == bindparam('myid'),
table2.c.otherid == bindparam('myotherid')
)).params({'myid': 8, 'myotherid': 7}),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid OR myothertable.otherid = :myotherid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{'myid': 8, 'myotherid': 7}, [8, 7],
{'myid': 5}, {'myid': 5, 'myotherid': 7}, [5, 7]
),
(
select([table1], or_(table1.c.myid ==
bindparam('myid', value=7, unique=True),
table2.c.otherid ==
bindparam('myid', value=8, unique=True))),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{'myid_1': 7, 'myid_2': 8}, [7, 8],
{'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6]
),
]:
self.assert_compile(stmt, expected_named_stmt,
params=expected_default_params_dict)
self.assert_compile(stmt, expected_positional_stmt,
dialect=sqlite.dialect())
nonpositional = stmt.compile()
positional = stmt.compile(dialect=sqlite.dialect())
pp = positional.params
eq_([pp[k] for k in positional.positiontup],
expected_default_params_list)
eq_(nonpositional.construct_params(test_param_dict),
expected_test_params_dict)
pp = positional.construct_params(test_param_dict)
eq_(
[pp[k] for k in positional.positiontup],
expected_test_params_list
)
# check that params() doesn't modify original statement
s = select([table1], or_(table1.c.myid == bindparam('myid'),
table2.c.otherid ==
bindparam('myotherid')))
s2 = s.params({'myid': 8, 'myotherid': 7})
s3 = s2.params({'myid': 9})
assert s.compile().params == {'myid': None, 'myotherid': None}
assert s2.compile().params == {'myid': 8, 'myotherid': 7}
assert s3.compile().params == {'myid': 9, 'myotherid': 7}
# test using same 'unique' param object twice in one compile
s = select([table1.c.myid]).where(table1.c.myid == 12).as_scalar()
s2 = select([table1, s], table1.c.myid == s)
self.assert_compile(
s2, "SELECT mytable.myid, mytable.name, mytable.description, "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = "
":myid_1) AS anon_1 FROM mytable WHERE mytable.myid = "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)")
positional = s2.compile(dialect=sqlite.dialect())
pp = positional.params
assert [pp[k] for k in positional.positiontup] == [12, 12]
# check that conflicts with "unique" params are caught
s = select([table1], or_(table1.c.myid == 7,
table1.c.myid == bindparam('myid_1')))
assert_raises_message(exc.CompileError,
"conflicts with unique bind parameter "
"of the same name",
str, s)
s = select([table1], or_(table1.c.myid == 7, table1.c.myid == 8,
table1.c.myid == bindparam('myid_1')))
assert_raises_message(exc.CompileError,
"conflicts with unique bind parameter "
"of the same name",
str, s)
def _test_binds_no_hash_collision(self):
"""test that construct_params doesn't corrupt dict
due to hash collisions"""
total_params = 100000
in_clause = [':in%d' % i for i in range(total_params)]
params = dict(('in%d' % i, i) for i in range(total_params))
t = text('text clause %s' % ', '.join(in_clause))
eq_(len(t.bindparams), total_params)
c = t.compile()
pp = c.construct_params(params)
eq_(len(set(pp)), total_params, '%s %s' % (len(set(pp)), len(pp)))
eq_(len(set(pp.values())), total_params)
def test_bind_as_col(self):
t = table('foo', column('id'))
s = select([t, literal('lala').label('hoho')])
self.assert_compile(s, "SELECT foo.id, :param_1 AS hoho FROM foo")
assert [str(c) for c in s.c] == ["id", "hoho"]
def test_bind_callable(self):
expr = column('x') == bindparam("key", callable_=lambda: 12)
self.assert_compile(
expr,
"x = :key",
{'x': 12}
)
def test_bind_params_missing(self):
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(
[table1]).where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True)
)
).compile().construct_params,
params=dict(y=5)
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(
[table1]).where(
table1.c.myid == bindparam(
"x",
required=True)).compile().construct_params)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(
[table1]).where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True)
)
).compile().construct_params,
params=dict(y=5), _group_number=2)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(
[table1]).where(
table1.c.myid == bindparam(
"x",
required=True)).compile().construct_params,
_group_number=2)
def test_tuple(self):
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[(1, 'foo'), (5, 'bar')]),
"(mytable.myid, mytable.name) IN "
"((:param_1, :param_2), (:param_3, :param_4))"
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[tuple_(table2.c.otherid, table2.c.othername)]
),
"(mytable.myid, mytable.name) IN "
"((myothertable.otherid, myothertable.othername))"
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
select([table2.c.otherid, table2.c.othername])
),
"(mytable.myid, mytable.name) IN (SELECT "
"myothertable.otherid, myothertable.othername FROM myothertable)"
)
def test_cast(self):
tbl = table('casttest',
column('id', Integer),
column('v1', Float),
column('v2', Float),
column('ts', TIMESTAMP),
)
def check_results(dialect, expected_results, literal):
eq_(len(expected_results), 5,
'Incorrect number of expected results')
eq_(str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[0])
eq_(str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[1])
eq_(str(cast(tbl.c.ts, Date).compile(dialect=dialect)),
'CAST(casttest.ts AS %s)' % expected_results[2])
eq_(str(cast(1234, Text).compile(dialect=dialect)),
'CAST(%s AS %s)' % (literal, expected_results[3]))
eq_(str(cast('test', String(20)).compile(dialect=dialect)),
'CAST(%s AS %s)' % (literal, expected_results[4]))
# fixme: shoving all of this dialect-specific stuff in one test
# is now officialy completely ridiculous AND non-obviously omits
# coverage on other dialects.
sel = select([tbl, cast(tbl.c.v1, Numeric)]).compile(
dialect=dialect)
if isinstance(dialect, type(mysql.dialect())):
eq_(str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, "
"CAST(casttest.v1 AS DECIMAL) AS anon_1 \nFROM casttest")
else:
eq_(str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, CAST(casttest.v1 AS NUMERIC) AS "
"anon_1 \nFROM casttest")
# first test with PostgreSQL engine
check_results(
postgresql.dialect(), [
'NUMERIC', 'NUMERIC(12, 9)', 'DATE', 'TEXT', 'VARCHAR(20)'],
'%(param_1)s')
# then the Oracle engine
check_results(
oracle.dialect(), [
'NUMERIC', 'NUMERIC(12, 9)', 'DATE',
'CLOB', 'VARCHAR2(20 CHAR)'],
':param_1')
# then the sqlite engine
check_results(sqlite.dialect(), ['NUMERIC', 'NUMERIC(12, 9)',
'DATE', 'TEXT', 'VARCHAR(20)'], '?')
# then the MySQL engine
check_results(mysql.dialect(), ['DECIMAL', 'DECIMAL(12, 9)',
'DATE', 'CHAR', 'CHAR(20)'], '%s')
self.assert_compile(cast(text('NULL'), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
self.assert_compile(cast(null(), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
self.assert_compile(cast(literal_column('NULL'), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
def test_over(self):
self.assert_compile(
func.row_number().over(),
"row_number() OVER ()"
)
self.assert_compile(
func.row_number().over(
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name],
order_by=[table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=table1.c.description
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[],
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description],
order_by=[]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[],
order_by=[]
),
"row_number() OVER ()"
)
self.assert_compile(
select([func.row_number().over(
order_by=table1.c.description
).label('foo')]),
"SELECT row_number() OVER (ORDER BY mytable.description) "
"AS foo FROM mytable"
)
# test from_obj generation.
# from func:
self.assert_compile(
select([
func.max(table1.c.name).over(
partition_by=['foo']
)
]),
"SELECT max(mytable.name) OVER (PARTITION BY foo) "
"AS anon_1 FROM mytable"
)
# from partition_by
self.assert_compile(
select([
func.row_number().over(
partition_by=[table1.c.name]
)
]),
"SELECT row_number() OVER (PARTITION BY mytable.name) "
"AS anon_1 FROM mytable"
)
# from order_by
self.assert_compile(
select([
func.row_number().over(
order_by=table1.c.name
)
]),
"SELECT row_number() OVER (ORDER BY mytable.name) "
"AS anon_1 FROM mytable"
)
# this tests that _from_objects
# concantenates OK
self.assert_compile(
select([column("x") + over(func.foo())]),
"SELECT x + foo() OVER () AS anon_1"
)
def test_date_between(self):
import datetime
table = Table('dt', metadata,
Column('date', Date))
self.assert_compile(
table.select(table.c.date.between(datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5))),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={'date_1': datetime.date(2006, 6, 1),
'date_2': datetime.date(2006, 6, 5)})
self.assert_compile(
table.select(sql.between(table.c.date, datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5))),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={'date_1': datetime.date(2006, 6, 1),
'date_2': datetime.date(2006, 6, 5)})
def test_delayed_col_naming(self):
my_str = Column(String)
sel1 = select([my_str])
assert_raises_message(
exc.InvalidRequestError,
"Cannot initialize a sub-selectable with this Column",
lambda: sel1.c
)
# calling label or as_scalar doesn't compile
# anything.
sel2 = select([func.substr(my_str, 2, 3)]).label('my_substr')
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
str, sel2
)
sel3 = select([my_str]).as_scalar()
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
str, sel3
)
my_str.name = 'foo'
self.assert_compile(
sel1,
"SELECT foo",
)
self.assert_compile(
sel2,
'(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)',
)
self.assert_compile(
sel3,
"(SELECT foo)"
)
def test_naming(self):
# TODO: the part where we check c.keys() are not "compile" tests, they
# belong probably in test_selectable, or some broken up
# version of that suite
f1 = func.hoho(table1.c.name)
s1 = select([table1.c.myid, table1.c.myid.label('foobar'),
f1,
func.lala(table1.c.name).label('gg')])
eq_(
list(s1.c.keys()),
['myid', 'foobar', str(f1), 'gg']
)
meta = MetaData()
t1 = Table('mytable', meta, Column('col1', Integer))
exprs = (
table1.c.myid == 12,
func.hoho(table1.c.myid),
cast(table1.c.name, Numeric),
literal('x'),
)
for col, key, expr, lbl in (
(table1.c.name, 'name', 'mytable.name', None),
(exprs[0], str(exprs[0]), 'mytable.myid = :myid_1', 'anon_1'),
(exprs[1], str(exprs[1]), 'hoho(mytable.myid)', 'hoho_1'),
(exprs[2], str(exprs[2]),
'CAST(mytable.name AS NUMERIC)', 'anon_1'),
(t1.c.col1, 'col1', 'mytable.col1', None),
(column('some wacky thing'), 'some wacky thing',
'"some wacky thing"', ''),
(exprs[3], exprs[3].key, ":param_1", "anon_1")
):
if getattr(col, 'table', None) is not None:
t = col.table
else:
t = table1
s1 = select([col], from_obj=t)
assert list(s1.c.keys()) == [key], list(s1.c.keys())
if lbl:
self.assert_compile(
s1, "SELECT %s AS %s FROM mytable" %
(expr, lbl))
else:
self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,))
s1 = select([s1])
if lbl:
self.assert_compile(
s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" %
(lbl, expr, lbl))
elif col.table is not None:
# sqlite rule labels subquery columns
self.assert_compile(
s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" %
(key, expr, key))
else:
self.assert_compile(s1,
"SELECT %s FROM (SELECT %s FROM mytable)" %
(expr, expr))
def test_hints(self):
s = select([table1.c.myid]).with_hint(table1, "test hint %(name)s")
s2 = select([table1.c.myid]).\
with_hint(table1, "index(%(name)s idx)", 'oracle').\
with_hint(table1, "WITH HINT INDEX idx", 'sybase')
a1 = table1.alias()
s3 = select([a1.c.myid]).with_hint(a1, "index(%(name)s hint)")
subs4 = select([
table1, table2
]).select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)).\
with_hint(table1, 'hint1')
s4 = select([table3]).select_from(
table3.join(
subs4,
subs4.c.othername == table3.c.otherstuff
)
).\
with_hint(table3, 'hint3')
t1 = table('QuotedName', column('col1'))
s6 = select([t1.c.col1]).where(t1.c.col1 > 10).\
with_hint(t1, '%(name)s idx1')
a2 = t1.alias('SomeName')
s7 = select([a2.c.col1]).where(a2.c.col1 > 10).\
with_hint(a2, '%(name)s idx1')
mysql_d, oracle_d, sybase_d = \
mysql.dialect(), \
oracle.dialect(), \
sybase.dialect()
for stmt, dialect, expected in [
(s, mysql_d,
"SELECT mytable.myid FROM mytable test hint mytable"),
(s, oracle_d,
"SELECT /*+ test hint mytable */ mytable.myid FROM mytable"),
(s, sybase_d,
"SELECT mytable.myid FROM mytable test hint mytable"),
(s2, mysql_d,
"SELECT mytable.myid FROM mytable"),
(s2, oracle_d,
"SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable"),
(s2, sybase_d,
"SELECT mytable.myid FROM mytable WITH HINT INDEX idx"),
(s3, mysql_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)"),
(s3, oracle_d,
"SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM "
"mytable mytable_1"),
(s3, sybase_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)"),
(s4, mysql_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 INNER JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 INNER "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff"),
(s4, sybase_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff"),
(s4, oracle_d,
"SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable JOIN (SELECT /*+ hint1 */ mytable.myid,"
" mytable.name, mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable JOIN myothertable ON"
" mytable.myid = myothertable.otherid) ON othername ="
" thirdtable.otherstuff"),
# TODO: figure out dictionary ordering solution here
# (s5, oracle_d,
# "SELECT /*+ hint3 */ /*+ hint1 */ thirdtable.userid, "
# "thirdtable.otherstuff "
# "FROM thirdtable JOIN (SELECT mytable.myid,"
# " mytable.name, mytable.description, myothertable.otherid,"
# " myothertable.othername FROM mytable JOIN myothertable ON"
# " mytable.myid = myothertable.otherid) ON othername ="
# " thirdtable.otherstuff"),
(s6, oracle_d,
"""SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """
"""FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1"""),
(s7, oracle_d,
"""SELECT /*+ SomeName idx1 */ "SomeName".col1 FROM """
""""QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1"""),
]:
self.assert_compile(
stmt,
expected,
dialect=dialect
)
def test_literal_as_text_fromstring(self):
self.assert_compile(
and_("a", "b"),
"a AND b"
)
def test_literal_as_text_nonstring_raise(self):
assert_raises(exc.ArgumentError,
and_, ("a",), ("b",)
)
class UnsupportedTest(fixtures.TestBase):
def test_unsupported_element_str_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
__visit_name__ = 'some_element'
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.SQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile
)
def test_unsupported_element_meth_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
@classmethod
def __visit_name__(cls):
return "some_element"
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.SQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile
)
def test_unsupported_operator(self):
from sqlalchemy.sql.expression import BinaryExpression
def myop(x, y):
pass
binary = BinaryExpression(column("foo"), column("bar"), myop)
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.SQLCompiler .*"
r"can't render element of type <function.*",
binary.compile
)
class KwargPropagationTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
from sqlalchemy.sql.expression import ColumnClause, TableClause
class CatchCol(ColumnClause):
pass
class CatchTable(TableClause):
pass
cls.column = CatchCol("x")
cls.table = CatchTable("y")
cls.criterion = cls.column == CatchCol('y')
@compiles(CatchCol)
def compile_col(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_column(element)
@compiles(CatchTable)
def compile_table(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_table(element)
def _do_test(self, element):
d = default.DefaultDialect()
d.statement_compiler(d, element,
compile_kwargs={"canary": True})
def test_binary(self):
self._do_test(self.column == 5)
def test_select(self):
s = select([self.column]).select_from(self.table).\
where(self.column == self.criterion).\
order_by(self.column)
self._do_test(s)
def test_case(self):
c = case([(self.criterion, self.column)], else_=self.column)
self._do_test(c)
def test_cast(self):
c = cast(self.column, Integer)
self._do_test(c)
class CRUDTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_correlated_update(self):
# test against a straight text subquery
u = update(
table1,
values={
table1.c.name:
text("(select name from mytable where id=mytable.id)")
}
)
self.assert_compile(
u,
"UPDATE mytable SET name=(select name from mytable "
"where id=mytable.id)")
mt = table1.alias()
u = update(table1, values={
table1.c.name:
select([mt.c.name], mt.c.myid == table1.c.myid)
})
self.assert_compile(
u, "UPDATE mytable SET name=(SELECT mytable_1.name FROM "
"mytable AS mytable_1 WHERE "
"mytable_1.myid = mytable.myid)")
# test against a regular constructed subquery
s = select([table2], table2.c.otherid == table1.c.myid)
u = update(table1, table1.c.name == 'jack', values={table1.c.name: s})
self.assert_compile(
u, "UPDATE mytable SET name=(SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable WHERE "
"myothertable.otherid = mytable.myid) "
"WHERE mytable.name = :name_1")
# test a non-correlated WHERE clause
s = select([table2.c.othername], table2.c.otherid == 7)
u = update(table1, table1.c.name == s)
self.assert_compile(u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)")
# test one that is actually correlated...
s = select([table2.c.othername], table2.c.otherid == table1.c.myid)
u = table1.update(table1.c.name == s)
self.assert_compile(u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = mytable.myid)")
# test correlated FROM implicit in WHERE and SET clauses
u = table1.update().values(name=table2.c.othername)\
.where(table2.c.otherid == table1.c.myid)
self.assert_compile(
u, "UPDATE mytable SET name=myothertable.othername "
"FROM myothertable WHERE myothertable.otherid = mytable.myid")
u = table1.update().values(name='foo')\
.where(table2.c.otherid == table1.c.myid)
self.assert_compile(
u, "UPDATE mytable SET name=:name "
"FROM myothertable WHERE myothertable.otherid = mytable.myid")
self.assert_compile(u,
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable WHERE "
"myothertable.otherid = mytable.myid",
dialect=mssql.dialect())
self.assert_compile(u.where(table2.c.othername == mt.c.name),
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable, mytable AS mytable_1 "
"WHERE myothertable.otherid = mytable.myid "
"AND myothertable.othername = mytable_1.name",
dialect=mssql.dialect())
def test_binds_that_match_columns(self):
"""test bind params named after column names
replace the normal SET/VALUES generation."""
t = table('foo', column('x'), column('y'))
u = t.update().where(t.c.x == bindparam('x'))
assert_raises(exc.CompileError, u.compile)
self.assert_compile(u, "UPDATE foo SET WHERE foo.x = :x", params={})
assert_raises(exc.CompileError, u.values(x=7).compile)
self.assert_compile(u.values(y=7),
"UPDATE foo SET y=:y WHERE foo.x = :x")
assert_raises(exc.CompileError,
u.values(x=7).compile, column_keys=['x', 'y'])
assert_raises(exc.CompileError, u.compile, column_keys=['x', 'y'])
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x")
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x",
params={
'x': 1})
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x), y=:y WHERE foo.x = :x",
params={
'x': 1,
'y': 2})
i = t.insert().values(x=3 + bindparam('x'))
self.assert_compile(i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x))")
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x), :y)",
params={
'x': 1,
'y': 2})
i = t.insert().values(x=bindparam('y'))
self.assert_compile(i, "INSERT INTO foo (x) VALUES (:y)")
i = t.insert().values(x=bindparam('y'), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam('y'), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam('x2'))
self.assert_compile(i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x2))")
self.assert_compile(
i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x2))",
params={})
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={
'x': 1,
'y': 2})
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={
'x2': 1,
'y': 2})
def test_unconsumed_names(self):
t = table("t", column("x"), column("y"))
t2 = table("t2", column("q"), column("z"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.insert().values(x=5, z=5).compile,
)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.update().values(x=5, z=5).compile,
)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).values({t2.c.z: 5}).
where(t.c.x == t2.c.q).compile,
)
# bindparam names don't get counted
i = t.insert().values(x=3 + bindparam('x2'))
self.assert_compile(
i,
"INSERT INTO t (x) VALUES ((:param_1 + :x2))"
)
# even if in the params list
i = t.insert().values(x=3 + bindparam('x2'))
self.assert_compile(
i,
"INSERT INTO t (x) VALUES ((:param_1 + :x2))",
params={"x2": 1}
)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).compile,
column_keys=['j']
)
def test_labels_no_collision(self):
t = table('foo', column('id'), column('foo_id'))
self.assert_compile(
t.update().where(t.c.id == 5),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1"
)
self.assert_compile(
t.update().where(t.c.id == bindparam(key=t.c.id._label)),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1"
)
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _illegal_type_fixture(self):
class MyType(types.TypeEngine):
pass
@compiles(MyType)
def compile(element, compiler, **kw):
raise exc.CompileError("Couldn't compile type")
return MyType
def test_reraise_of_column_spec_issue(self):
MyType = self._illegal_type_fixture()
t1 = Table('t', MetaData(),
Column('x', MyType())
)
assert_raises_message(
exc.CompileError,
r"\(in table 't', column 'x'\): Couldn't compile type",
schema.CreateTable(t1).compile
)
def test_reraise_of_column_spec_issue_unicode(self):
MyType = self._illegal_type_fixture()
t1 = Table('t', MetaData(),
Column(u('méil'), MyType())
)
assert_raises_message(
exc.CompileError,
u(r"\(in table 't', column 'méil'\): Couldn't compile type"),
schema.CreateTable(t1).compile
)
def test_system_flag(self):
m = MetaData()
t = Table('t', m, Column('x', Integer),
Column('y', Integer, system=True),
Column('z', Integer))
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, z INTEGER)"
)
m2 = MetaData()
t2 = t.tometadata(m2)
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE t (x INTEGER, z INTEGER)"
)
class InlineDefaultTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_insert(self):
m = MetaData()
foo = Table('foo', m,
Column('id', Integer))
t = Table('test', m,
Column('col1', Integer, default=func.foo(1)),
Column('col2', Integer, default=select(
[func.coalesce(func.max(foo.c.id))])),
)
self.assert_compile(
t.insert(
inline=True, values={}),
"INSERT INTO test (col1, col2) VALUES (foo(:foo_1), "
"(SELECT coalesce(max(foo.id)) AS coalesce_1 FROM "
"foo))")
def test_update(self):
m = MetaData()
foo = Table('foo', m,
Column('id', Integer))
t = Table('test', m,
Column('col1', Integer, onupdate=func.foo(1)),
Column('col2', Integer, onupdate=select(
[func.coalesce(func.max(foo.c.id))])),
Column('col3', String(30))
)
self.assert_compile(t.update(inline=True, values={'col3': 'foo'}),
"UPDATE test SET col1=foo(:foo_1), col2=(SELECT "
"coalesce(max(foo.id)) AS coalesce_1 FROM foo), "
"col3=:col3")
class SchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_select(self):
self.assert_compile(table4.select(),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable")
self.assert_compile(
table4.select(
and_(
table4.c.datatype_id == 7,
table4.c.value == 'hi')),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND"
" remote_owner.remotetable.value = :value_1")
s = table4.select(and_(table4.c.datatype_id == 7,
table4.c.value == 'hi'), use_labels=True)
self.assert_compile(
s, "SELECT remote_owner.remotetable.rem_id AS"
" remote_owner_remotetable_rem_id, "
"remote_owner.remotetable.datatype_id AS"
" remote_owner_remotetable_datatype_id, "
"remote_owner.remotetable.value "
"AS remote_owner_remotetable_value FROM "
"remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND "
"remote_owner.remotetable.value = :value_1")
# multi-part schema name
self.assert_compile(table5.select(),
'SELECT "dbo.remote_owner".remotetable.rem_id, '
'"dbo.remote_owner".remotetable.datatype_id, '
'"dbo.remote_owner".remotetable.value '
'FROM "dbo.remote_owner".remotetable'
)
# multi-part schema name labels - convert '.' to '_'
self.assert_compile(table5.select(use_labels=True),
'SELECT "dbo.remote_owner".remotetable.rem_id AS'
' dbo_remote_owner_remotetable_rem_id, '
'"dbo.remote_owner".remotetable.datatype_id'
' AS dbo_remote_owner_remotetable_datatype_id,'
' "dbo.remote_owner".remotetable.value AS '
'dbo_remote_owner_remotetable_value FROM'
' "dbo.remote_owner".remotetable'
)
def test_alias(self):
a = alias(table4, 'remtable')
self.assert_compile(a.select(a.c.datatype_id == 7),
"SELECT remtable.rem_id, remtable.datatype_id, "
"remtable.value FROM"
" remote_owner.remotetable AS remtable "
"WHERE remtable.datatype_id = :datatype_id_1")
def test_update(self):
self.assert_compile(
table4.update(table4.c.value == 'test',
values={table4.c.datatype_id: 12}),
"UPDATE remote_owner.remotetable SET datatype_id=:datatype_id "
"WHERE remote_owner.remotetable.value = :value_1")
def test_insert(self):
self.assert_compile(table4.insert(values=(2, 5, 'test')),
"INSERT INTO remote_owner.remotetable "
"(rem_id, datatype_id, value) VALUES "
"(:rem_id, :datatype_id, :value)")
class CorrelateTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_dont_overcorrelate(self):
self.assert_compile(select([table1], from_obj=[table1,
table1.select()]),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable, (SELECT "
"mytable.myid AS myid, mytable.name AS "
"name, mytable.description AS description "
"FROM mytable)")
def _fixture(self):
t1 = table('t1', column('a'))
t2 = table('t2', column('a'))
return t1, t2, select([t1]).where(t1.c.a == t2.c.a)
def _assert_where_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
def _assert_where_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a FROM t1, t2 WHERE t2.a = "
"(SELECT t1.a WHERE t1.a = t2.a)")
# note there's no more "backwards" correlation after
# we've done #2746
# def _assert_where_backwards_correlated(self, stmt):
# self.assert_compile(
# stmt,
# "SELECT t2.a FROM t2 WHERE t2.a = "
# "(SELECT t1.a FROM t2 WHERE t1.a = t2.a)")
# def _assert_column_backwards_correlated(self, stmt):
# self.assert_compile(stmt,
# "SELECT t2.a, (SELECT t1.a FROM t2 WHERE t1.a = t2.a) "
# "AS anon_1 FROM t2")
def _assert_column_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, (SELECT t1.a FROM t1 WHERE t1.a = t2.a) "
"AS anon_1 FROM t2")
def _assert_column_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, "
"(SELECT t1.a WHERE t1.a = t2.a) AS anon_1 FROM t1, t2")
def _assert_having_correlated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
def _assert_from_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, anon_1.a FROM t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
def _assert_from_all_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, anon_1.a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
def _assert_where_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
def _assert_column_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a, (SELECT t1.a FROM t1, t2 "
"WHERE t1.a = t2.a) AS anon_1 FROM t2")
def _assert_having_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
def _assert_where_single_full_correlated(self, stmt):
self.assert_compile(stmt,
"SELECT t1.a FROM t1 WHERE t1.a = (SELECT t1.a)")
def test_correlate_semiauto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate(t2)))
def test_correlate_semiauto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate(t2).as_scalar()]))
def test_correlate_semiauto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate(t2).alias()]))
def test_correlate_semiauto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate(t2)))
def test_correlate_except_inclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate_except(t1)))
def test_correlate_except_exclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate_except(t2)))
def test_correlate_except_inclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate_except(t1).as_scalar()]))
def test_correlate_except_exclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate_except(t2).as_scalar()]))
def test_correlate_except_inclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t1).alias()]))
def test_correlate_except_exclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t2).alias()]))
def test_correlate_except_none(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate_except(None)))
def test_correlate_except_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate_except(t1)))
def test_correlate_auto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1))
def test_correlate_auto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.as_scalar()]))
def test_correlate_auto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.alias()]))
def test_correlate_auto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1))
def test_correlate_disabled_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate(None)))
def test_correlate_disabled_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate(None).as_scalar()]))
def test_correlate_disabled_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate(None).alias()]))
def test_correlate_disabled_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_uncorrelated(
select([t2]).having(t2.c.a == s1.correlate(None)))
def test_correlate_all_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate(t1, t2)))
def test_correlate_all_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_all_correlated(
select([t1, t2, s1.correlate(t1, t2).as_scalar()]))
def test_correlate_all_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_all_uncorrelated(
select([t1, t2, s1.correlate(t1, t2).alias()]))
def test_correlate_where_all_unintentional(self):
t1, t2, s1 = self._fixture()
assert_raises_message(
exc.InvalidRequestError,
"returned no FROM clauses due to auto-correlation",
select([t1, t2]).where(t2.c.a == s1).compile
)
def test_correlate_from_all_ok(self):
t1, t2, s1 = self._fixture()
self.assert_compile(
select([t1, t2, s1]),
"SELECT t1.a, t2.a, a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a)"
)
def test_correlate_auto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s)
self.assert_compile(s2,
"SELECT t1.a FROM t1 WHERE t1.a = "
"(SELECT t1.a FROM t1)")
def test_correlate_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate(t1))
self._assert_where_single_full_correlated(s2)
def test_correlate_except_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate_except(t2))
self._assert_where_single_full_correlated(s2)
def test_correlate_alone_noeffect(self):
# new as of #2668
t1, t2, s1 = self._fixture()
self.assert_compile(s1.correlate(t1, t2),
"SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a")
def test_correlate_except_froms(self):
# new as of #2748
t1 = table('t1', column('a'))
t2 = table('t2', column('a'), column('b'))
s = select([t2.c.b]).where(t1.c.a == t2.c.a)
s = s.correlate_except(t2).alias('s')
s2 = select([func.foo(s.c.b)]).as_scalar()
s3 = select([t1], order_by=s2)
self.assert_compile(
s3, "SELECT t1.a FROM t1 ORDER BY "
"(SELECT foo(s.b) AS foo_1 FROM "
"(SELECT t2.b AS b FROM t2 WHERE t1.a = t2.a) AS s)")
def test_multilevel_froms_correlation(self):
# new as of #2748
p = table('parent', column('id'))
c = table('child', column('id'), column('parent_id'), column('pos'))
s = c.select().where(
c.c.parent_id == p.c.id).order_by(
c.c.pos).limit(1)
s = s.correlate(p)
s = exists().select_from(s).where(s.c.id == 1)
s = select([p]).where(s)
self.assert_compile(
s, "SELECT parent.id FROM parent WHERE EXISTS (SELECT * "
"FROM (SELECT child.id AS id, child.parent_id AS parent_id, "
"child.pos AS pos FROM child WHERE child.parent_id = parent.id "
"ORDER BY child.pos LIMIT :param_1) WHERE id = :id_1)")
def test_no_contextless_correlate_except(self):
# new as of #2748
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1]).where(t1.c.x == t2.c.y).\
where(t2.c.y == t3.c.z).correlate_except(t1)
self.assert_compile(
s,
"SELECT t1.x FROM t1, t2, t3 WHERE t1.x = t2.y AND t2.y = t3.z")
def test_multilevel_implicit_correlation_disabled(self):
# test that implicit correlation with multilevel WHERE correlation
# behaves like 0.8.1, 0.7 (i.e. doesn't happen)
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t3.c.z]).where(t3.c.z == s.as_scalar())
s3 = select([t1]).where(t1.c.x == s2.as_scalar())
self.assert_compile(s3,
"SELECT t1.x FROM t1 "
"WHERE t1.x = (SELECT t3.z "
"FROM t3 "
"WHERE t3.z = (SELECT t1.x "
"FROM t1, t2 "
"WHERE t1.x = t2.y))"
)
def test_from_implicit_correlation_disabled(self):
# test that implicit correlation with immediate and
# multilevel FROM clauses behaves like 0.8.1 (i.e. doesn't happen)
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t2, s])
s3 = select([t1, s2])
self.assert_compile(s3,
"SELECT t1.x, y, x FROM t1, "
"(SELECT t2.y AS y, x FROM t2, "
"(SELECT t1.x AS x FROM t1, t2 WHERE t1.x = t2.y))"
)
class CoercionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def _fixture(self):
m = MetaData()
return Table('foo', m,
Column('id', Integer))
bool_table = table('t', column('x', Boolean))
def test_coerce_bool_where(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x"
)
def test_coerce_bool_where_non_native(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 1",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
self.assert_compile(
select([self.bool_table]).where(~self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 0",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
def test_null_constant(self):
self.assert_compile(_literal_as_text(None), "NULL")
def test_false_constant(self):
self.assert_compile(_literal_as_text(False), "false")
def test_true_constant(self):
self.assert_compile(_literal_as_text(True), "true")
def test_val_and_false(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, False),
"false")
def test_val_and_true_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, True),
"foo.id = :id_1")
def test_val_is_null_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == None),
"foo.id IS NULL")
def test_val_and_None(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, None),
"foo.id = :id_1 AND NULL")
def test_None_and_val(self):
t = self._fixture()
self.assert_compile(and_(None, t.c.id == 1),
"NULL AND foo.id = :id_1")
def test_None_and_nothing(self):
# current convention is None in and_()
# returns None May want
# to revise this at some point.
self.assert_compile(
and_(None), "NULL")
def test_val_and_null(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, null()),
"foo.id = :id_1 AND NULL")
class ResultMapTest(fixtures.TestBase):
"""test the behavior of the 'entry stack' and the determination
when the result_map needs to be populated.
"""
def test_compound_populates(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
stmt = select([t]).union(select([t]))
comp = stmt.compile()
eq_(
comp.result_map,
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
'b': ('b', (t.c.b, 'b', 'b'), t.c.b.type)}
)
def test_compound_not_toplevel_doesnt_populate(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
subq = select([t]).union(select([t]))
stmt = select([t.c.a]).select_from(t.join(subq, t.c.a == subq.c.a))
comp = stmt.compile()
eq_(
comp.result_map,
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)}
)
def test_compound_only_top_populates(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
stmt = select([t.c.a]).union(select([t.c.b]))
comp = stmt.compile()
eq_(
comp.result_map,
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)},
)
def test_label_plus_element(self):
t = Table('t', MetaData(), Column('a', Integer))
l1 = t.c.a.label('bar')
tc = type_coerce(t.c.a, String)
stmt = select([t.c.a, l1, tc])
comp = stmt.compile()
tc_anon_label = comp.result_map['a_1'][1][0]
eq_(
comp.result_map,
{
'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
'bar': ('bar', (l1, 'bar'), l1.type),
'a_1': ('%%(%d a)s' % id(tc), (tc_anon_label, 'a_1'), tc.type),
},
)
def test_label_conflict_union(self):
t1 = Table('t1', MetaData(), Column('a', Integer),
Column('b', Integer))
t2 = Table('t2', MetaData(), Column('t1_a', Integer))
union = select([t2]).union(select([t2])).alias()
t1_alias = t1.alias()
stmt = select([t1, t1_alias]).select_from(
t1.join(union, t1.c.a == union.c.t1_a)).apply_labels()
comp = stmt.compile()
eq_(
set(comp.result_map),
set(['t1_1_b', 't1_1_a', 't1_a', 't1_b'])
)
is_(
comp.result_map['t1_a'][1][2], t1.c.a
)
def test_insert_with_select_values(self):
astring = Column('a', String)
aint = Column('a', Integer)
m = MetaData()
Table('t1', m, astring)
t2 = Table('t2', m, aint)
stmt = t2.insert().values(a=select([astring])).returning(aint)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp.result_map,
{'a': ('a', (aint, 'a', 'a'), aint.type)}
)
def test_insert_from_select(self):
astring = Column('a', String)
aint = Column('a', Integer)
m = MetaData()
Table('t1', m, astring)
t2 = Table('t2', m, aint)
stmt = t2.insert().from_select(['a'], select([astring])).\
returning(aint)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp.result_map,
{'a': ('a', (aint, 'a', 'a'), aint.type)}
)
|
the-stack_0_7130 | import math
import torch as th
from torch import nn
from torch.nn import functional as F
from . import torch_util as tu
from gym3.types import Real, TensorType
REAL = Real()
class Encoder(nn.Module):
"""
Takes in seq of observations and outputs sequence of codes
Encoders can be stateful, meaning that you pass in one observation at a
time and update the state, which is a separate object. (This object
doesn't store any state except parameters)
"""
def __init__(self, obtype, codetype):
super().__init__()
self.obtype = obtype
self.codetype = codetype
def initial_state(self, batchsize):
raise NotImplementedError
def empty_state(self):
return None
def stateless_forward(self, obs):
"""
inputs:
obs: array or dict, all with preshape (B, T)
returns:
codes: array or dict, all with preshape (B, T)
"""
code, _state = self(obs, None, self.empty_state())
return code
def forward(self, obs, first, state_in):
"""
inputs:
obs: array or dict, all with preshape (B, T)
first: float array shape (B, T)
state_in: array or dict, all with preshape (B,)
returns:
codes: array or dict
state_out: array or dict
"""
raise NotImplementedError
class CnnBasicBlock(nn.Module):
"""
Residual basic block (without batchnorm), as in ImpalaCNN
Preserves channel number and shape
"""
def __init__(self, inchan, scale=1, batch_norm=False):
super().__init__()
self.inchan = inchan
self.batch_norm = batch_norm
s = math.sqrt(scale)
self.conv0 = tu.NormedConv2d(self.inchan, self.inchan, 3, padding=1, scale=s)
self.conv1 = tu.NormedConv2d(self.inchan, self.inchan, 3, padding=1, scale=s)
if self.batch_norm:
self.bn0 = nn.BatchNorm2d(self.inchan)
self.bn1 = nn.BatchNorm2d(self.inchan)
def residual(self, x):
# inplace should be False for the first relu, so that it does not change the input,
# which will be used for skip connection.
# getattr is for backwards compatibility with loaded models
if getattr(self, "batch_norm", False):
x = self.bn0(x)
x = F.relu(x, inplace=False)
x = self.conv0(x)
if getattr(self, "batch_norm", False):
x = self.bn1(x)
x = F.relu(x, inplace=True)
x = self.conv1(x)
return x
def forward(self, x):
return x + self.residual(x)
class CnnDownStack(nn.Module):
"""
Downsampling stack from Impala CNN
"""
def __init__(self, inchan, nblock, outchan, scale=1, pool=True, **kwargs):
super().__init__()
self.inchan = inchan
self.outchan = outchan
self.pool = pool
self.firstconv = tu.NormedConv2d(inchan, outchan, 3, padding=1)
s = scale / math.sqrt(nblock)
self.blocks = nn.ModuleList(
[CnnBasicBlock(outchan, scale=s, **kwargs) for _ in range(nblock)]
)
def forward(self, x):
x = self.firstconv(x)
if getattr(self, "pool", True):
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
for block in self.blocks:
x = block(x)
return x
def output_shape(self, inshape):
c, h, w = inshape
assert c == self.inchan
if getattr(self, "pool", True):
return (self.outchan, (h + 1) // 2, (w + 1) // 2)
else:
return (self.outchan, h, w)
class ImpalaCNN(nn.Module):
name = "ImpalaCNN" # put it here to preserve pickle compat
def __init__(
self, inshape, chans, outsize, scale_ob, nblock, final_relu=True, **kwargs
):
super().__init__()
self.scale_ob = scale_ob
h, w, c = inshape
curshape = (c, h, w)
s = 1 / math.sqrt(len(chans)) # per stack scale
self.stacks = nn.ModuleList()
for outchan in chans:
stack = CnnDownStack(
curshape[0], nblock=nblock, outchan=outchan, scale=s, **kwargs
)
self.stacks.append(stack)
curshape = stack.output_shape(curshape)
self.dense = tu.NormedLinear(tu.intprod(curshape), outsize, scale=1.4)
self.outsize = outsize
self.final_relu = final_relu
def forward(self, x):
"""
Forward input through model, x should be of shape [B, T, *state_shape]
"""
x = x.to(dtype=th.float32) / self.scale_ob
b, t = x.shape[:-3]
x = x.reshape(b * t, *x.shape[-3:])
x = tu.transpose(x, "bhwc", "bchw")
x = tu.sequential(self.stacks, x, diag_name=self.name)
x = x.reshape(b, t, *x.shape[1:])
x = tu.flatten_image(x)
x = th.relu(x)
x = self.dense(x)
if self.final_relu:
x = th.relu(x)
return x
class ImpalaEncoder(Encoder):
def __init__(
self,
inshape,
outsize=256,
chans=(16, 32, 32),
scale_ob=255.0,
nblock=2,
**kwargs
):
codetype = TensorType(eltype=REAL, shape=(outsize,))
obtype = TensorType(eltype=REAL, shape=inshape)
super().__init__(codetype=codetype, obtype=obtype)
self.cnn = ImpalaCNN(
inshape=inshape,
chans=chans,
scale_ob=scale_ob,
nblock=nblock,
outsize=outsize,
**kwargs
)
def forward(self, x, first, state_in):
x = self.cnn(x)
return x, state_in
def initial_state(self, batchsize):
return tu.zeros(batchsize, 0)
|
the-stack_0_7131 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from urlparse import urlparse
import subprocess
import threading
from git_checkout import local_git_parsers
from libs.gitiles.git_repository import GitRepository
import script_util
_CHANGELOG_FORMAT_STRING = ('commit %H%n'
'author %an%n'
'author-mail %ae%n'
'author-time %ad%n%n'
'committer %cn%n'
'committer-mail %ce%n'
'committer-time %cd%n%n'
'--Message start--%n%B%n--Message end--%n')
_CHANGELOGS_FORMAT_STRING = ('**Changelog start**%%n%s' %
_CHANGELOG_FORMAT_STRING)
CHECKOUT_ROOT_DIR = os.path.join(os.path.expanduser('~'), '.local_checkouts')
def ConvertRemoteCommitToLocal(revision):
"""Converts remote commit from gitile to local git checkout revision."""
return 'HEAD' if revision == 'master' else revision
class LocalGitRepository(GitRepository):
"""Represents local checkout of git repository on chromium host.
Note, to automatically check out internal repos which you have access to,
follow the instructions in 'go/internal-repo-checkout-setup'.
"""
lock = threading.Lock()
# Keep track all the updated repos, so every repo only get updated once.
_updated_repos = set()
def __init__(self, repo_url=None):
self._host = None
self._repo_path = None
self._repo_url = repo_url
if repo_url is not None:
parsed_url = urlparse(repo_url)
self._host = parsed_url.netloc
# Remove the / in the front of path.
self._repo_path = parsed_url.path[1:]
self._CloneOrUpdateRepoIfNeeded()
self.changelog_parser = local_git_parsers.GitChangeLogParser()
self.changelogs_parser = local_git_parsers.GitChangeLogsParser()
self.blame_parser = local_git_parsers.GitBlameParser()
self.diff_parser = local_git_parsers.GitDiffParser()
@classmethod
def Factory(cls): # pragma: no cover
"""Construct a factory for creating ``LocalGitRepository`` instances.
Returns:
A function from repo urls to ``LocalGitRepository`` instances. All
instances produced by the returned function are novel (i.e., newly
allocated).
"""
return lambda repo_url: cls(repo_url) # pylint: disable=W0108
@property
def repo_path(self):
return self._repo_path
@property
def real_repo_path(self):
"""Absolute path of the local repository."""
return os.path.join(CHECKOUT_ROOT_DIR, self._host, self.repo_path)
@property
def repo_url(self):
"""Url of remote repository which the local repo checks out from."""
return self._repo_url
def _CloneOrUpdateRepoIfNeeded(self):
"""Clones repo, or update it if it didn't got updated before."""
with LocalGitRepository.lock:
if self.repo_url in LocalGitRepository._updated_repos:
return
# Clone the repo if needed.
if not os.path.exists(self.real_repo_path):
try:
subprocess.check_call(['git', 'clone',
self.repo_url, self.real_repo_path])
except subprocess.CalledProcessError as e: # pragma: no cover.
raise Exception(
'Exception while cloning %s: %s' % (self.repo_url, e))
# Update repo if it's already cloned.
else:
try:
# Disable verbose of cd and git pull.
with open(os.devnull, 'w') as null_handle:
subprocess.check_call(
'cd %s && git pull' % self.real_repo_path,
stdout=null_handle, stderr=null_handle, shell=True)
except subprocess.CalledProcessError as e: # pragma: no cover.
raise Exception(
'Exception while updating %s: %s' % (self.repo_path, e))
LocalGitRepository._updated_repos.add(self.repo_url)
def _GetFinalCommand(self, command, utc=False):
# Change local time to utc time.
if utc:
command = 'TZ=UTC %s --date=format-local:"%s"' % (
command, local_git_parsers.DATETIME_FORMAT)
return 'cd %s && %s' % (self.real_repo_path, command)
def GetChangeLog(self, revision):
"""Returns the change log of the given revision."""
command = ('git log --pretty=format:"%s" --max-count=1 --raw '
'--no-abbrev %s' % (_CHANGELOG_FORMAT_STRING,
ConvertRemoteCommitToLocal(revision)))
output = script_util.GetCommandOutput(self._GetFinalCommand(command, True))
return self.changelog_parser(output, self.repo_url)
def GetChangeLogs(self, start_revision, end_revision): # pylint: disable=W
"""Returns change log list in (start_revision, end_revision]."""
command = ('git log --pretty=format:"%s" --raw --no-abbrev %s' % (
_CHANGELOGS_FORMAT_STRING,
'%s..%s' % (ConvertRemoteCommitToLocal(start_revision),
ConvertRemoteCommitToLocal(end_revision))))
output = script_util.GetCommandOutput(self._GetFinalCommand(command, True))
return self.changelogs_parser(output, self.repo_url)
def GetChangeDiff(self, revision, path=None): # pylint: disable=W
"""Returns the diff of the given revision."""
command = ('git log --format="" --max-count=1 %s' %
ConvertRemoteCommitToLocal(revision))
if path:
command += ' -p %s' % path
output = script_util.GetCommandOutput(self._GetFinalCommand(command))
return self.diff_parser(output)
def GetBlame(self, path, revision):
"""Returns blame of the file at ``path`` of the given revision."""
command = 'git blame --incremental %s -- %s' % (
ConvertRemoteCommitToLocal(revision), path)
output = script_util.GetCommandOutput(self._GetFinalCommand(command))
return self.blame_parser(output, path, revision)
def GetSource(self, path, revision):
"""Returns source code of the file at ``path`` of the given revision."""
# Check whether the requested file exist or not.
command = 'git show %s:%s' % (ConvertRemoteCommitToLocal(revision), path)
output = script_util.GetCommandOutput(self._GetFinalCommand(command))
return output
|
the-stack_0_7133 | from tornado.web import RequestHandler
from swampdragon.default_settings import SwampDragonSettings
from django.conf import settings as django_settings
from .same_origin import set_origin_cookie
def get_host():
host = django_settings.DRAGON_URL
if host.endswith('/'):
return host[:-1]
return host
class SettingsHandler(RequestHandler):
def set_default_headers(self):
self.set_header("Content-Type", "application/javascript")
set_origin_cookie(self)
def get(self, *args, **kwargs):
data = '''window.swampdragon_settings = {settings};
window.swampdragon_host = "{host}";
'''.format(**{
'settings': SwampDragonSettings().to_dict(),
'host': get_host()
})
self.write(data)
|
the-stack_0_7134 | import os
import ycm_core
from clang_helpers import PrepareClangFlags
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# These are the compilation flags that will be used in case there's no
# compilation database set.
flags = [
'-Wall',
'-std=c++11',
'-stdlib=libc++',
'-x',
'c++',
'-I',
'.',
'-isystem',
'/usr/lib/c++/v1'
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile(filename)
final_flags = PrepareClangFlags(
MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_),
filename)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True}
|
the-stack_0_7135 | from itertools import count
import pytest
import numpy as np
import astropy.units as u
from astropy._erfa import DJM0
from astropy.time import Time, TimeFormat
from astropy.time.utils import day_frac
class SpecificException(ValueError):
pass
@pytest.fixture
def custom_format_name():
for i in count():
if not i:
custom = f"custom_format_name"
else:
custom = f"custom_format_name_{i}"
if custom not in Time.FORMATS:
break
yield custom
Time.FORMATS.pop(custom, None)
def test_custom_time_format_set_jds_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_val_type_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_value_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
raise SpecificException
t = Time.now()
with pytest.raises(SpecificException):
getattr(t, custom_format_name)
def test_custom_time_format_fine(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
return self.jd1 + self.jd2
t = Time.now()
getattr(t, custom_format_name)
t2 = Time(7, 9, format=custom_format_name)
getattr(t2, custom_format_name)
def test_custom_time_format_forgot_property(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
def value(self):
return self.jd1, self.jd2
t = Time.now()
with pytest.raises(AttributeError):
getattr(t, custom_format_name)
t.format = custom_format_name
with pytest.raises(AttributeError):
t.value
with pytest.raises(AttributeError):
Time(7, 9, format=custom_format_name).value
def test_custom_time_format_problematic_name():
assert "sort" not in Time.FORMATS, "problematic name in default FORMATS!"
assert hasattr(Time, "sort")
try:
class Custom(TimeFormat):
name = "sort"
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
return self.jd1, self.jd2
t = Time.now()
assert t.sort() == t, "bogus time format clobbers everyone's Time objects"
t.format = "sort"
if not isinstance(t.value, tuple):
pytest.xfail("No good way to detect that `sort` is invalid")
assert Time(7, 9, format="sort").value == (7, 9)
finally:
Time.FORMATS.pop("sort", None)
def test_mjd_longdouble_preserves_precision(custom_format_name):
class CustomMJD(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
val = np.longdouble(val)
if val2 is not None:
raise ValueError("Only one value permitted")
return val, 0
def set_jds(self, val, val2):
mjd1 = np.float64(np.floor(val))
mjd2 = np.float64(val - mjd1)
self.jd1, self.jd2 = day_frac(mjd1 + DJM0, mjd2)
@property
def value(self):
mjd1, mjd2 = day_frac(self.jd1 - DJM0, self.jd2)
return np.longdouble(mjd1) + np.longdouble(mjd2)
m = 58000.0
t = Time(m, format=custom_format_name)
t2 = Time(m + 2 * m * np.finfo(np.longdouble).eps, format=custom_format_name)
assert t != t2
assert isinstance(getattr(t, custom_format_name), np.longdouble)
assert getattr(t, custom_format_name) != getattr(t2, custom_format_name)
def test_mjd_unit_validation():
with pytest.raises(u.UnitConversionError):
Time(58000 * u.m, format="mjd")
def test_mjd_unit_conversion():
assert Time(58000 * u.day, format="mjd") == Time(58000 * u.day, format="mjd")
assert Time(58000 * u.day, format="mjd") != Time(58000 * u.s, format="mjd")
assert Time(58000 * u.day, format="mjd") == Time(58000 * 86400 * u.s, format="mjd")
@pytest.mark.parametrize("f", ["mjd", "unix", "cxcsec"])
def test_existing_types_refuse_longdoubles(f):
t = np.longdouble(getattr(Time(58000, format="mjd"), f))
t2 = t + np.finfo(np.longdouble).eps * 2 * t
try:
tm = Time(np.longdouble(t), format=f)
except ValueError:
# Time processing makes it always ValueError not TypeError
return
else:
# accepts long doubles, better preserve accuracy!
assert Time(np.longdouble(t2), format=f) != tm
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.