filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_26180
|
import tkinter as tk
from PIL import Image, ImageTk
class CameraPane(tk.Frame):
placeholder = Image.open("./resources/placeholder.jpg")
CAMERA_DIM = (160, 120)
def __init__(self, master, **kw):
super().__init__(master, **kw)
self.grid(row=0, column=0)
self._cam = None
self._debug = None
self.cm_label = tk.Label(self)
self.dg_label = tk.Label(self)
self.cm_label.grid(sticky=tk.N)
self.dg_label.grid(row=1, sticky=tk.N)
self.set(self.placeholder, self.placeholder)
def set(self, camera, debug=None):
camera = camera.copy()
camera.thumbnail((160, 120), Image.ANTIALIAS)
self._cam = ImageTk.PhotoImage(camera)
self.cm_label.configure(image=self._cam)
if debug is not None:
debug = debug.copy()
debug.thumbnail((160, 120), Image.ANTIALIAS)
self._debug = ImageTk.PhotoImage(debug)
self.dg_label.configure(image=self._debug)
|
the-stack_0_26181
|
#!/usr/bin/env python
from setuptools import setup
import os
import glob
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
requirements = []
with open(os.path.join(this_directory, 'requirements.txt'), encoding='utf-8') as r:
line = r.readline()
while line:
requirements.append(line.strip())
line = r.readline()
def getFiles(path):
files = []
(root, dirNames, fileNames) = next(os.walk(path))
fileNames.sort()
dirFiles = []
for fileName in fileNames:
dirFiles.append(os.path.join(path, fileName))
files.append((path, dirFiles))
dirNames.sort()
for dirName in dirNames:
files.extend(getFiles(os.path.join(root, dirName)))
return files
setup(
name='azbacklog',
author="Joshua Davis",
author_email="[email protected]",
url='https://github.com/Azure/Azure-Backlog-Generator',
version='0.1.12',
description='The Azure Backlog Generator (ABG) is designed to build backlogs for complex processes based on proven practices. The backlogs can be generated in either Azure DevOps or GitHub.',
long_description=long_description,
long_description_content_type='text/markdown',
package_dir={'': 'src'},
packages=[
'azbacklog',
'azbacklog.entities',
'azbacklog.helpers',
'azbacklog.services'
],
data_files=getFiles('workitems'),
install_requires=[
'pygithub'
],
extras_require={
'dev': requirements
},
entry_points={
'console_scripts': {
'azbacklog = azbacklog.azbacklog:main'
}
},
python_requires='>=3.6'
)
|
the-stack_0_26182
|
import os
import time
from glob import glob
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
class Runner():
def __init__(self, model_name, net, optim, torch_device, criterion, epochs, logger, save_dir, save_interval, scheduler, resume_file):
self.torch_device = torch_device
self.model_name = model_name
self.net = net
self.criterion = criterion
self.optim = optim
self.scheduler = scheduler
self.epochs = epochs
self.logger = logger
self.save_dir = save_dir
self.save_interval = save_interval
self.start_epoch = 0
self.best_metric = -1
self.best_epoch = -1
self.resume(resume_file)
def save(self, epoch, filename="train"):
"""Save current epoch model
Save Elements:
model_type : model name
start_epoch : current epoch
network : network parameters
optimizer: optimizer parameters
best_metric : current best score
Parameters:
epoch : current epoch
filename : model save file name
"""
torch.save({"model_type": self.model_name,
"start_epoch": epoch + 1,
"network": self.net.state_dict(),
"optimizer": self.optim.state_dict(),
"best_metric": self.best_metric
}, self.save_dir + "/%s.pth.tar" % (filename))
print("Model saved %d epoch" % (epoch))
def resume(self, filename=""):
""" Model load. same with save"""
if filename == "":
# load last epoch model
filenames = sorted(glob(self.save_dir + "/*.pth.tar"))
if len(filenames) == 0:
print("Not resume")
return
else:
filename = os.path.basename(filenames[-1])
file_path = self.save_dir + "/" + filename
if os.path.exists(file_path):
print("Load %s to %s File" % (self.save_dir, filename))
ckpoint = torch.load(file_path)
if ckpoint["model_type"] != self.model_name:
raise ValueError("Ckpoint Model Type is %s" %
(ckpoint["model_type"]))
self.net.load_state_dict(ckpoint['network'])
self.optim.load_state_dict(ckpoint['optimizer'])
self.start_epoch = ckpoint['start_epoch']
self.best_metric = ckpoint["best_metric"]
print("Load Model Type : %s, epoch : %d acc : %f" %
(ckpoint["model_type"], self.start_epoch, self.best_metric))
else:
print("Resume Failed, not exists file")
def train(self, train_loader, val_loader=None):
print("\nStart Train len :", len(train_loader.dataset))
self.net.train()
for epoch in range(self.start_epoch, self.epochs):
for i, (input_, target_) in enumerate(tqdm(train_loader)):
input_ = input_.to(self.torch_device)
target_ = target_.to(self.torch_device)
if self.scheduler:
self.scheduler.step()
out = self.net(input_)
loss = self.criterion(out, target_)
self.optim.zero_grad()
loss.backward()
self.optim.step()
if (i % 50) == 0:
self.logger.log_write("train", epoch=epoch, loss=loss.item())
acc = self._get_acc(train_loader)
self.logger.log_write("train", epoch=epoch, acc=acc)
if val_loader is not None:
self.valid(epoch, val_loader)
def _get_acc(self, loader):
self.net.eval()
correct = 0
with torch.no_grad():
for input_, target_ in loader:
input_ = input_.to(self.torch_device)
out = self.net(input_)
out = F.softmax(out, dim=1)
_, idx = out.max(dim=1)
correct += (target_ == idx).sum().item()
self.net.train()
return correct / len(loader.dataset)
def valid(self, epoch, val_loader):
acc = self._get_acc(val_loader)
self.logger.log_write("valid", epoch=epoch, acc=acc)
if acc > self.best_metric:
self.best_metric = acc
if acc > self.best_metric or (epoch + 1)%self.save_interval==0:
self.save(epoch, "epoch[%05d]_acc[%.4f]" % (
epoch, acc))
def test(self, train_loader, val_loader):
print("\n Start Test")
self.resume()
train_acc = self._get_acc(train_loader)
valid_acc = self._get_acc(val_loader)
self.logger.log_write("test", fname="test", train_acc=train_acc, valid_acc=valid_acc)
return train_acc, valid_acc
|
the-stack_0_26185
|
"""This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
from torch.utils import data
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
dataset_class = find_dataset_using_name(opt.dataset_mode)
dataset = dataset_class(opt)
return dataset
def create_dataloader(opt,rank,dataset):
data_loader = CustomDatasetDataLoader(opt,rank,dataset)
dataset = data_loader.load_data()
return dataset
def collate_fn(batch):
batch = list(filter(lambda x: x is not None, batch))
if len(batch) > 0:
return torch.utils.data.dataloader.default_collate(batch)
else:
return None
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt,rank,dataset):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
self.dataset = dataset
if rank==0:
print("dataset [%s] was created" % type(self.dataset).__name__)
if len(opt.gpu_ids)>1:
world_size=len(opt.gpu_ids)
sampler=data.distributed.DistributedSampler(self.dataset,
num_replicas=world_size,
rank=rank,
shuffle=not opt.serial_batches)
shuffle=False
else:
sampler=None
shuffle=not opt.serial_batches
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
sampler=sampler,
shuffle=shuffle,
num_workers=int(opt.num_threads),
collate_fn=collate_fn)
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if data is None:
continue
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
|
the-stack_0_26187
|
"""Optimize a 3-qubit circuit to be a toffoli gate."""
from __future__ import annotations
import logging
import numpy as np
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates import VariableUnitaryGate
from bqskit.ir.opt.cost import HilbertSchmidtCostGenerator
from bqskit.qis.unitary import UnitaryMatrix
# The next two lines start bqskits's logger.
logging.getLogger('bqskit').setLevel(logging.INFO)
# We will optimize towards the toffoli unitary.
toffoli = np.array([
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
])
toffoli = UnitaryMatrix(toffoli)
# Start with the circuit structure
circuit = Circuit(3)
circuit.append_gate(VariableUnitaryGate(2), [1, 2])
circuit.append_gate(VariableUnitaryGate(2), [0, 2])
circuit.append_gate(VariableUnitaryGate(2), [1, 2])
circuit.append_gate(VariableUnitaryGate(2), [0, 2])
circuit.append_gate(VariableUnitaryGate(2), [0, 1])
# Instantiate the circuit template with qfactor
circuit.instantiate(
toffoli,
method='qfactor',
diff_tol_a=1e-12, # Stopping criteria for distance change
diff_tol_r=1e-6, # Relative criteria for distance change
dist_tol=1e-12, # Stopping criteria for distance
max_iters=100000, # Maximum number of iterations
min_iters=1000, # Minimum number of iterations
slowdown_factor=0, # Larger numbers slowdown optimization
# to avoid local minima
)
# Calculate and print final distance
dist = HilbertSchmidtCostGenerator().calc_cost(circuit, toffoli)
print('Final Distance: ', dist)
# If you would like to convert the unitary operations to native gates,
# you should use the KAK decomposition for 2 qubit unitaries, or
# qsearch or qfast for 3+ qubit unitaries.
|
the-stack_0_26188
|
import grpc
import HelloDroidTLS_pb2
import HelloDroidTLS_pb2_grpc
with open('server.crt', 'rb') as f:
trusted_certs = f.read()
creds = grpc.ssl_channel_credentials(root_certificates=trusted_certs)
channel = grpc.secure_channel('localhost:8443', creds)
stub = HelloDroidTLS_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(HelloDroidTLS_pb2.HelloRequest(name='yourname'))
print("Greeter client received: " + response.message)
|
the-stack_0_26189
|
# The MIT License (MIT)
#
# Copyright (c) 2020 Kevin Matocha
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`kmatch98_textmap`
================================================================================
Text graphics handling for CircuitPython, including ttext boxes
* Author(s): Kevin Matocha
Implementation Notes
--------------------
**Hardware:**
.. todo:: Add links to any specific hardware product page(s), or category page(s). Use unordered list & hyperlink rST
inline format: "* `Link Text <url>`_"
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
.. todo:: Uncomment or remove the Bus Device and/or the Register library dependencies based on the library's use of either.
# * Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
# * Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
# imports
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/kmatch98/CircuitPython_textMap.git"
def bounding_box(text, font, lineSpacing, scale=1):
# bounding_box - determines the bounding box size around give text
# To be used to calculate if the text will be printed within the bounding_box
# This function can used to determine character-wrapping or word-wrapping for a
# text terminal box, prior to actually printing the text in the bitmap.
#
# Note: Scale is not implemented at this time
boxHeight = boxWidth = 0
fontHeight = font.get_glyph(ord("M")).height
for char in text:
myGlyph = font.get_glyph(ord(char))
width = myGlyph.width
height = myGlyph.height
dx = myGlyph.dx
dy = myGlyph.dy
shift_x = myGlyph.shift_x
shift_y = myGlyph.shift_x
# Not working yet***
# This offset is used to match the label.py function from Adafruit_Display_Text library
# y_offset = int(
# (
# self._font.get_glyph(ord("M")).height
# - new_text.count("\n") * self.height * self.line_spacing
# )
# / 2 )
# yOffset = int( (fontHeight-height*lineSpacing)/2 )
yOffset = fontHeight - height
boxWidth = boxWidth + shift_x
boxHeight = max(boxHeight, height - dy + yOffset)
return (boxWidth, boxHeight)
def placeText(
bitmap, text, font, lineSpacing, xPosition, yPosition, paletteIndex=1, scale=1
):
# placeText - Writes text into a bitmap at the specified location.
#
# (xPosition, yPosition) correspond to upper left corner of the height of the 'M' glyph
# To Do: Add anchor positions, and adjust the default baseline position to match
# the current "label" function
# Verify paletteIndex is working properly with * operator, especially if accommodating multicolored fonts
#
# Note: Scale is not implemented at this time
import terminalio
fontHeight = font.get_glyph(ord("M")).height
if font == terminalio.FONT:
print("terminalio.FONT Found - BuiltinFont not handled by this function")
# handle this differently
else:
bitmapWidth = bitmap.width
bitmapHeight = bitmap.height
for char in text:
myGlyph = font.get_glyph(ord(char))
width = myGlyph.width
height = myGlyph.height
# print('glyph width: {}, height: {}'.format(width, height))
dx = myGlyph.dx
dy = myGlyph.dy
shift_x = myGlyph.shift_x
shift_y = myGlyph.shift_x
# Not working yet***
# This offset is used to match the label.py function from Adafruit_Display_Text library
# y_offset = int(
# (
# self._font.get_glyph(ord("M")).height
# - new_text.count("\n") * self.height * self.line_spacing
# )
# / 2 )
# position_y = y - glyph.height - glyph.dy + y_offset
# yOffset = int( (fontHeight-height*lineSpacing)/2 )
yOffset = fontHeight - height
for y in range(height):
for x in range(width):
xPlacement = x + xPosition + dx
# yPlacement=y+yPosition-height-dy+yOffset
yPlacement = y + yPosition - dy + yOffset
if (
(xPlacement >= 0)
and (yPlacement >= 0)
and (xPlacement < bitmapWidth)
and (yPlacement < bitmapHeight)
):
# print('x: {}, y: {}, value: {}'.format(xPlacement, yPlacement, myGlyph.bitmap[x,y]))
bitmap[xPlacement, yPlacement] = (
myGlyph.bitmap[x, y] * paletteIndex
)
xPosition = xPosition + shift_x
return (xPosition, yPosition)
class textBox:
def __init__(
self, text, width, height, backgroundColor, textColor, font, lineSpacing=1.25
):
import displayio
# import terminalio
self._text = text # text on the display
self._font = font
self._lineSpacing = lineSpacing
self._fontHeight = self._font.get_glyph(ord("M")).height
self._width = width # in pixels
self._height = height # in pixels
self._backgroundColor = backgroundColor
self._textColor = textColor
self.bitmap = displayio.Bitmap(self._width, self._height, 2)
self.palette = displayio.Palette(2)
self.palette[0] = self._backgroundColor
self.palette[1] = self._textColor
self._cursorX = 1 # controls insertion point for text
self._cursorY = 1
self._startX = self._cursorX # the left column start position
self._startY = self._cursorY # the top row start position
self.addText(self._text)
import gc
gc.collect()
def addText(self, newText): # add text to a textBox
# print('startX: {}'.format(self._cursorX) )
import gc
for char in newText:
(charWidth, charHeight) = bounding_box(char, self._font, self._lineSpacing)
if (self._cursorX + charWidth >= self._width - 1) or (char == "\n"):
# make a newline
self.setCursor(
self._startX,
self._cursorY + int(self._fontHeight * self._lineSpacing),
)
(newX, newY) = placeText(
self.bitmap,
char,
self._font,
self._lineSpacing,
self._cursorX,
self._cursorY,
)
# print('newX: {}'.format(newX) )
self.setCursor(newX, newY)
self._text = self._text + newText
gc.collect()
return self.getCursor() # return tuple: (self._cursorX , self._cursorY)
def setCursor(self, newCursorX, newCursorY): # set cursor position
self._cursorX = newCursorX
self._cursorY = newCursorY
def getCursor(self): # get current cursor position, tuple
return (self._cursorX, self._cursorY)
def clearBitmap(self):
import gc
for x in range(self._width):
for y in range(self._height):
self.bitmap[x, y] = 0
gc.collect()
self.setCursor(self._startX, self._startY)
|
the-stack_0_26191
|
from numpy import inf, nan
from sklearn.linear_model import SGDRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class SGDRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for SGDRegressor Linear model fitted by minimizing a regularized empirical loss with SGD",
"allOf": [
{
"type": "object",
"required": [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"verbose",
"epsilon",
"random_state",
"learning_rate",
"eta0",
"power_t",
"early_stopping",
"validation_fraction",
"n_iter_no_change",
"warm_start",
"average",
],
"relevantToOptimizer": [
"loss",
"penalty",
"alpha",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"epsilon",
"learning_rate",
"eta0",
],
"additionalProperties": False,
"properties": {
"loss": {
"enum": [
"epsilon_insensitive",
"huber",
"squared_epsilon_insensitive",
"squared_loss",
],
"default": "squared_loss",
"description": "The loss function to be used",
},
"penalty": {
"XXX TODO XXX": "str, 'none', 'l2', 'l1', or 'elasticnet'",
"description": "The penalty (aka regularization term) to be used",
"enum": ["elasticnet", "l1", "l2", "none"],
"default": "l2",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0001,
"description": "Constant that multiplies the regularization term",
},
"l1_ratio": {
"type": "number",
"default": 0.15,
"description": "The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether the intercept should be estimated or not",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The maximum number of passes over the training data (aka epochs)",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"distribution": "loguniform",
},
{"enum": [None]},
],
"default": None,
"description": "The stopping criterion",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether or not the training data should be shuffled after each epoch",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "The verbosity level.",
},
"epsilon": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 1.35,
"distribution": "loguniform",
"default": 0.1,
"description": "Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling the data",
},
"learning_rate": {
"enum": ["adaptive", "constant", "optimal", "invscaling"],
"default": "invscaling",
"description": "The learning rate schedule: 'constant': eta = eta0 'optimal': eta = 1.0 / (alpha * (t + t0)) where t0 is chosen by a heuristic proposed by Leon Bottou",
},
"eta0": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.01,
"description": "The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules",
},
"power_t": {
"type": "number",
"default": 0.25,
"description": "The exponent for inverse scaling learning rate [default 0.5].",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation score is not improving",
},
"validation_fraction": {
"type": "number",
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for early stopping",
},
"n_iter_no_change": {
"type": "integer",
"default": 5,
"description": "Number of iterations with no improvement to wait before early stopping",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
"average": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute",
},
},
},
{
"XXX TODO XXX": "Parameter: max_iter > only impacts the behavior in the fit method, and not the partial_fit"
},
{
"description": "epsilon, only if loss is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'",
"anyOf": [
{"type": "object", "properties": {"epsilon": {"enum": [0.1]}}},
{
"type": "object",
"properties": {
"loss": {
"enum": [
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
]
}
},
},
],
},
{
"description": "validation_fraction, only used if early_stopping is true",
"anyOf": [
{
"type": "object",
"properties": {"validation_fraction": {"enum": [0.1]}},
},
{"type": "object", "properties": {"early_stopping": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with Stochastic Gradient Descent.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values",
},
"coef_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial coefficients to warm-start the optimization.",
},
"intercept_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial intercept to warm-start the optimization.",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "Weights applied to individual samples (1",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted target values per element in X.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.SGDRegressor#sklearn-linear_model-sgdregressor",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(SGDRegressorImpl, _combined_schemas)
SGDRegressor = make_operator(SGDRegressorImpl, _combined_schemas)
|
the-stack_0_26193
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of a
object detection dataset, and computes frequently used detection metrics such as
Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import collections
import logging
import unicodedata
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import label_map_util
from object_detection.utils import metrics
from object_detection.utils import per_image_evaluation
class DetectionEvaluator(six.with_metaclass(ABCMeta, object)):
"""Interface for object detection evalution classes.
Example usage of the Evaluator:
------------------------------
evaluator = DetectionEvaluator(categories)
# Detections and groundtruth for image 1.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
# Detections and groundtruth for image 2.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
metrics_dict = evaluator.evaluate()
"""
def __init__(self, categories):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
"""
self._categories = categories
def observe_result_dict_for_single_example(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
raise NotImplementedError('Not implemented for this evaluator!')
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required for
evaluations.
"""
pass
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required for
evaluation.
"""
pass
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
pass
@abstractmethod
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
pass
@abstractmethod
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
pass
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
evaluate_corlocs=False,
evaluate_precision_recall=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False,
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
recall_lower_bound: lower bound of recall operating area.
recall_upper_bound: upper bound of recall operating area.
evaluate_corlocs: (optional) boolean which determines if corloc scores are
to be returned or not.
evaluate_precision_recall: (optional) boolean which determines if
precision and recall values are to be returned or not.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
evaluate_masks: If False, evaluation will be performed based on boxes. If
True, mask evaluation will be performed instead.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
Raises:
ValueError: If the category ids are not 1-indexed.
"""
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if min(cat['id'] for cat in categories) < 1:
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._recall_lower_bound = recall_lower_bound
self._recall_upper_bound = recall_upper_bound
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._group_of_weight = group_of_weight
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
recall_lower_bound=self._recall_lower_bound,
recall_upper_bound=self._recall_upper_bound,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
group_of_weight=self._group_of_weight)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._evaluate_precision_recall = evaluate_precision_recall
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_difficult,
standard_fields.InputDataFields.groundtruth_instance_masks,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
standard_fields.DetectionResultFields.detection_masks
])
self._build_metric_names()
def get_internal_state(self):
"""Returns internal state and image ids that lead to the state.
Note that only evaluation results will be returned (e.g. not raw predictions
or groundtruth.
"""
return self._evaluation.get_internal_state(), self._image_ids
def merge_internal_state(self, image_ids, state_tuple):
"""Merges internal state with the existing state of evaluation.
If image_id is already seen by evaluator, an error will be thrown.
Args:
image_ids: list of images whose state is stored in the tuple.
state_tuple: state.
"""
for image_id in image_ids:
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
self._evaluation.merge_internal_state(state_tuple)
def _build_metric_names(self):
"""Builds a list with metric names."""
if self._recall_lower_bound > 0.0 or self._recall_upper_bound < 1.0:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU@[{:.1f},{:.1f}]Recall'.format(
self._matching_iou_threshold, self._recall_lower_bound,
self._recall_upper_bound)
]
else:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU'.format(self._matching_iou_threshold)
]
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))
category_index = label_map_util.create_category_index(self._categories)
for idx in range(self._num_classes):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = six.text_type(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_difficult: Optional length M
numpy boolean array denoting whether a ground truth box is a difficult
instance or not. This field is optional to support the case that no
boxes are difficult.
standard_fields.InputDataFields.groundtruth_instance_masks: Optional
numpy array of shape [num_boxes, height, width] with values in {0, 1}.
Raises:
ValueError: On adding groundtruth for an image more than once. Will also
raise error if instance masks are not in groundtruth dictionary.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_difficult in six.viewkeys(
groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult]
.size or not groundtruth_classes.size)):
groundtruth_difficult = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult]
else:
groundtruth_difficult = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth difficult flag specified',
image_id)
groundtruth_masks = None
if self._evaluate_masks:
if (standard_fields.InputDataFields.groundtruth_instance_masks not in
groundtruth_dict):
raise ValueError('Instance masks not in groundtruth dictionary.')
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_is_difficult_list=groundtruth_difficult,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
standard_fields.DetectionResultFields.detection_masks: uint8 numpy array
of shape [num_boxes, height, width] containing `num_boxes` masks of
values ranging between 0 and 1.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (standard_fields.DetectionResultFields.detection_masks not in
detections_dict):
raise ValueError('Detection masks not in detections dictionary.')
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def evaluate(self):
"""Compute evaluation result.
Returns:
A dictionary of metrics with the following fields -
1. summary_metrics:
'<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean
average precision at the specified IOU threshold.
2. per_category_ap: category specific results with keys of the form
'<prefix if not empty>_PerformanceByCategory/
mAP@<matching_iou_threshold>IOU/category'.
"""
(per_class_ap, mean_ap, per_class_precision, per_class_recall,
per_class_corloc, mean_corloc) = (
self._evaluation.evaluate())
pascal_metrics = {self._metric_names[0]: mean_ap}
if self._evaluate_corlocs:
pascal_metrics[self._metric_names[1]] = mean_corloc
category_index = label_map_util.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = str(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
display_name = (
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add precision and recall values
if self._evaluate_precision_recall:
display_name = (
self._metric_prefix +
'PerformanceByCategory/Precision@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_precision[idx]
display_name = (
self._metric_prefix +
'PerformanceByCategory/Recall@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_recall[idx]
# Optionally add CorLoc metrics.classes
if self._evaluate_corlocs:
display_name = (
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_corloc[idx]
return pascal_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset)
self._image_ids.clear()
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
# remove unexpected fields
eval_dict_filtered = dict()
for key, value in eval_dict.items():
if key in self._expected_keys:
eval_dict_filtered[key] = value
eval_dict_keys = list(eval_dict_filtered.keys())
def update_op(image_id, *eval_dict_batched_as_list):
"""Update operation that adds batch of images to ObjectDetectionEvaluator.
Args:
image_id: image id (single id or an array)
*eval_dict_batched_as_list: the values of the dictionary of tensors.
"""
if np.isscalar(image_id):
single_example_dict = dict(
zip(eval_dict_keys, eval_dict_batched_as_list))
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
else:
for unzipped_tuple in zip(*eval_dict_batched_as_list):
single_example_dict = dict(zip(eval_dict_keys, unzipped_tuple))
image_id = single_example_dict[standard_fields.InputDataFields.key]
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
args = [eval_dict_filtered[standard_fields.InputDataFields.key]]
args.extend(six.itervalues(eval_dict_filtered))
return tf.py_func(update_op, args, [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example(). It must contain
standard_fields.InputDataFields.key.
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
update_op = self.add_eval_dict(eval_dict)
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[self._metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {self._metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in self._metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalBoxes',
use_weighted_mean_ap=False)
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalBoxes',
use_weighted_mean_ap=True)
class PrecisionAtRecallDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using precision@recall metrics."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0):
super(PrecisionAtRecallDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
recall_lower_bound=recall_lower_bound,
recall_upper_bound=recall_upper_bound,
evaluate_corlocs=False,
metric_prefix='PrecisionAtRecallBoxes',
use_weighted_mean_ap=False)
class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalMasks',
use_weighted_mean_ap=False,
evaluate_masks=True)
class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalMasks',
use_weighted_mean_ap=True,
evaluate_masks=True)
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using Open Images V2 metrics.
Open Images V2 introduce group_of type of bounding boxes and this metric
handles those boxes appropriately.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_masks=False,
evaluate_corlocs=False,
metric_prefix='OpenImagesV2',
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_masks: if True, evaluator evaluates masks.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
metric_prefix: Prefix name of the metric.
group_of_weight: Weight of the group-of bounding box. If set to 0 (default
for Open Images V2 detection protocol), detections of the correct class
within a group-of box are ignored. If weight is > 0, then if at least
one detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix=metric_prefix,
group_of_weight=group_of_weight,
evaluate_masks=evaluate_masks)
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
if evaluate_masks:
self._expected_keys.add(
standard_fields.InputDataFields.groundtruth_instance_masks)
self._expected_keys.add(
standard_fields.DetectionResultFields.detection_masks)
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_group_of in six.viewkeys(
groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of]
.size or not groundtruth_classes.size)):
groundtruth_group_of = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_group_of]
else:
groundtruth_group_of = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth group_of flag specified',
image_id)
if self._evaluate_masks:
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
else:
groundtruth_masks = None
self._evaluation.add_single_ground_truth_image_info(
image_id,
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=groundtruth_group_of,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator):
"""A class implements Open Images Challenge metrics.
Both Detection and Instance Segmentation evaluation metrics are implemented.
Open Images Challenge Detection metric has two major changes in comparison
with Open Images V2 detection metric:
- a custom weight might be specified for detecting an object contained in
a group-of box.
- verified image-level labels should be explicitelly provided for
evaluation: in case in image has neither positive nor negative image level
label of class c, all detections of this class on this image will be
ignored.
Open Images Challenge Instance Segmentation metric allows to measure per
formance of models in case of incomplete annotations: some instances are
annotations only on box level and some - on image-level. In addition,
image-level labels are taken into account as in detection metric.
Open Images Challenge Detection metric default parameters:
evaluate_masks = False
group_of_weight = 1.0
Open Images Challenge Instance Segmentation metric default parameters:
evaluate_masks = True
(group_of_weight will not matter)
"""
def __init__(self,
categories,
evaluate_masks=False,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
group_of_weight=1.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluate_masks: set to true for instance segmentation metric and to false
for detection metric.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
group_of_weight: Weight of group-of boxes. If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
"""
if not evaluate_masks:
metrics_prefix = 'OpenImagesDetectionChallenge'
else:
metrics_prefix = 'OpenImagesInstanceSegmentationChallenge'
super(OpenImagesChallengeEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_masks=evaluate_masks,
evaluate_corlocs=evaluate_corlocs,
group_of_weight=group_of_weight,
metric_prefix=metrics_prefix)
self._evaluatable_labels = {}
# Only one of the two has to be provided, but both options are given
# for compatibility with previous codebase.
self._expected_keys.update([
standard_fields.InputDataFields.groundtruth_image_classes,
standard_fields.InputDataFields.groundtruth_labeled_classes])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_image_classes: integer 1D
numpy array containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
super(OpenImagesChallengeEvaluator,
self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
input_fields = standard_fields.InputDataFields
groundtruth_classes = (
groundtruth_dict[input_fields.groundtruth_classes] -
self._label_id_offset)
image_classes = np.array([], dtype=int)
if input_fields.groundtruth_image_classes in groundtruth_dict:
image_classes = groundtruth_dict[input_fields.groundtruth_image_classes]
elif input_fields.groundtruth_labeled_classes in groundtruth_dict:
image_classes = groundtruth_dict[input_fields.groundtruth_labeled_classes]
image_classes -= self._label_id_offset
self._evaluatable_labels[image_id] = np.unique(
np.concatenate((image_classes, groundtruth_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
if image_id not in self._image_ids:
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
allowed_classes = np.where(
np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores][allowed_classes]
if self._evaluate_masks:
detection_masks = detections_dict[standard_fields.DetectionResultFields
.detection_masks][allowed_classes]
else:
detection_masks = None
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def clear(self):
"""Clears stored data."""
super(OpenImagesChallengeEvaluator, self).clear()
self._evaluatable_labels.clear()
ObjectDetectionEvalMetrics = collections.namedtuple(
'ObjectDetectionEvalMetrics', [
'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs',
'mean_corloc'
])
class OpenImagesDetectionChallengeEvaluator(OpenImagesChallengeEvaluator):
"""A class implements Open Images Detection Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesDetectionChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=False,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=1.0)
class OpenImagesInstanceSegmentationChallengeEvaluator(
OpenImagesChallengeEvaluator):
"""A class implements Open Images Instance Segmentation Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesInstanceSegmentationChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=True,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=0.0)
ObjectDetectionEvaluationState = collections.namedtuple(
'ObjectDetectionEvaluationState', [
'num_gt_instances_per_class',
'scores_per_class',
'tp_fp_labels_per_class',
'num_gt_imgs_per_class',
'num_images_correctly_detected_per_class',
])
class ObjectDetectionEvaluation(object):
"""Internal implementation of Pascal object detection metrics."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
use_weighted_mean_ap=False,
label_id_offset=0,
group_of_weight=0.0,
per_image_eval_class=per_image_evaluation.PerImageEvaluation):
"""Constructor.
Args:
num_groundtruth_classes: Number of ground-truth classes.
matching_iou_threshold: IOU threshold used for matching detected boxes to
ground-truth boxes.
nms_iou_threshold: IOU threshold used for non-maximum suppression.
nms_max_output_boxes: Maximum number of boxes returned by non-maximum
suppression.
recall_lower_bound: lower bound of recall operating area
recall_upper_bound: upper bound of recall operating area
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
label_id_offset: The label id offset.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
per_image_eval_class: The class that contains functions for computing per
image metrics.
Raises:
ValueError: if num_groundtruth_classes is smaller than 1.
"""
if num_groundtruth_classes < 1:
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(
num_groundtruth_classes=num_groundtruth_classes,
matching_iou_threshold=matching_iou_threshold,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes,
group_of_weight=group_of_weight)
self.recall_lower_bound = recall_lower_bound
self.recall_upper_bound = recall_upper_bound
self.group_of_weight = group_of_weight
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.groundtruth_is_difficult_list = {}
self.groundtruth_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
"""Initializes internal data structures."""
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = [np.nan] * self.num_class
self.recalls_per_class = [np.nan] * self.num_class
self.sum_tp_class = [np.nan] * self.num_class
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def get_internal_state(self):
"""Returns internal state of the evaluation.
NOTE: that only evaluation results will be returned
(e.g. no raw predictions or groundtruth).
Returns:
internal state of the evaluation.
"""
return ObjectDetectionEvaluationState(
self.num_gt_instances_per_class, self.scores_per_class,
self.tp_fp_labels_per_class, self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
def merge_internal_state(self, state_tuple):
"""Merges internal state of the evaluation with the current state.
Args:
state_tuple: state tuple representing evaluation state: should be of type
ObjectDetectionEvaluationState.
"""
(num_gt_instances_per_class, scores_per_class, tp_fp_labels_per_class,
num_gt_imgs_per_class, num_images_correctly_detected_per_class) = (
state_tuple)
assert self.num_class == len(num_gt_instances_per_class)
assert self.num_class == len(scores_per_class)
assert self.num_class == len(tp_fp_labels_per_class)
for i in range(self.num_class):
self.scores_per_class[i].extend(scores_per_class[i])
self.tp_fp_labels_per_class[i].extend(tp_fp_labels_per_class[i])
self.num_gt_instances_per_class[i] += num_gt_instances_per_class[i]
self.num_gt_imgs_per_class[i] += num_gt_imgs_per_class[i]
self.num_images_correctly_detected_per_class[
i] += num_images_correctly_detected_per_class[i]
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=None,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` groundtruth boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
groundtruth_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed groundtruth classes for the boxes.
groundtruth_is_difficult_list: A length M numpy boolean array denoting
whether a ground truth box is a difficult instance or not. To support
the case that no boxes are difficult, it is by default set as None.
groundtruth_is_group_of_list: A length M numpy boolean array denoting
whether a ground truth box is a group-of box or not. To support the case
that no boxes are groups-of, it is by default set as None.
groundtruth_masks: uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` groundtruth masks. The mask values range from 0
to 1.
"""
if image_key in self.groundtruth_boxes:
logging.warning(
'image %s has already been added to the ground truth database.',
image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
if groundtruth_is_difficult_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_difficult_list[
image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
if groundtruth_is_group_of_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
if groundtruth_masks is None:
num_boxes = groundtruth_boxes.shape[0]
mask_presence_indicator = np.zeros(num_boxes, dtype=bool)
else:
mask_presence_indicator = (np.sum(groundtruth_masks,
axis=(1, 2)) == 0).astype(dtype=bool)
self.groundtruth_is_group_of_list[
image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
self._update_ground_truth_statistics(
groundtruth_class_labels,
groundtruth_is_difficult_list.astype(dtype=bool)
| mask_presence_indicator, # ignore boxes without masks
groundtruth_is_group_of_list.astype(dtype=bool))
def add_single_detected_image_info(self,
image_key,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
detected_scores: float32 numpy array of shape [num_boxes] containing
detection scores for the boxes.
detected_class_labels: integer numpy array of shape [num_boxes] containing
0-indexed detection classes for the boxes.
detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` detection masks with values ranging between 0 and
1.
Raises:
ValueError: if the number of boxes, scores and class labels differ in
length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError(
'detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes), len(detected_scores),
len(detected_class_labels))
if image_key in self.detection_keys:
logging.warning(
'image %s has already been added to the detection result database',
image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
# Masks are popped instead of look up. The reason is that we do not want
# to keep all masks in memory which can cause memory overflow.
groundtruth_masks = self.groundtruth_masks.pop(image_key)
groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[
image_key]
groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[
image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if detected_masks is None:
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
groundtruth_is_difficult_list = np.array([], dtype=bool)
groundtruth_is_group_of_list = np.array([], dtype=bool)
scores, tp_fp_labels, is_class_correctly_detected_in_image = (
self.per_image_eval.compute_object_detection_metrics(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks))
for i in range(self.num_class):
if scores[i].shape[0] > 0:
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
(self.num_images_correctly_detected_per_class
) += is_class_correctly_detected_in_image
def _update_ground_truth_statistics(self, groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M, representing
M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box is a group-of box or not
"""
for class_index in range(self.num_class):
num_gt_instances = np.sum(groundtruth_class_labels[
~groundtruth_is_difficult_list
& ~groundtruth_is_group_of_list] == class_index)
num_groupof_gt_instances = self.group_of_weight * np.sum(
groundtruth_class_labels[
groundtruth_is_group_of_list
& ~groundtruth_is_difficult_list] == class_index)
self.num_gt_instances_per_class[
class_index] += num_gt_instances + num_groupof_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
A named tuple with the following fields -
average_precision: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions: List of precisions, each precision is a float numpy
array
recalls: List of recalls, each recall is a float numpy array
corloc: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warning(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
if not self.scores_per_class[class_index]:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
self.precisions_per_class[class_index] = precision_within_bound
self.recalls_per_class[class_index] = recall_within_bound
self.sum_tp_class[class_index] = tp_fp_labels.sum()
average_precision = metrics.compute_average_precision(
precision_within_bound, recall_within_bound)
self.average_precision_per_class[class_index] = average_precision
logging.info('average_precision: %f', average_precision)
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
precision, recall = metrics.compute_precision_recall(
all_scores, all_tp_fp_labels, num_gt_instances)
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
mean_ap = metrics.compute_average_precision(precision_within_bound,
recall_within_bound)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(self.average_precision_per_class, mean_ap,
self.precisions_per_class,
self.recalls_per_class,
self.corloc_per_class, mean_corloc)
|
the-stack_0_26195
|
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from otp.level import BasicEntities
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
import LiftConstants
import MovingPlatform
class DistributedLift(BasicEntities.DistributedNodePathEntity):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLift')
def __init__(self, cr):
BasicEntities.DistributedNodePathEntity.__init__(self, cr)
def generateInit(self):
self.notify.debug('generateInit')
BasicEntities.DistributedNodePathEntity.generateInit(self)
self.moveSnd = base.loader.loadSfx('phase_9/audio/sfx/CHQ_FACT_elevator_up_down.mp3')
self.fsm = ClassicFSM.ClassicFSM('DistributedLift', [State.State('off', self.enterOff, self.exitOff, ['moving']), State.State('moving', self.enterMoving, self.exitMoving, ['waiting']), State.State('waiting', self.enterWaiting, self.exitWaiting, ['moving'])], 'off', 'off')
self.fsm.enterInitialState()
def generate(self):
self.notify.debug('generate')
BasicEntities.DistributedNodePathEntity.generate(self)
self.platform = self.attachNewNode('platParent')
def setStateTransition(self, toState, fromState, arrivalTimestamp):
self.notify.debug('setStateTransition: %s->%s' % (fromState, toState))
if not self.isGenerated():
self.initialState = toState
self.initialFromState = fromState
self.initialStateTimestamp = arrivalTimestamp
else:
self.fsm.request('moving', [toState, fromState, arrivalTimestamp])
def announceGenerate(self):
self.notify.debug('announceGenerate')
BasicEntities.DistributedNodePathEntity.announceGenerate(self)
self.initPlatform()
self.state = None
self.fsm.request('moving', [self.initialState, self.initialFromState, self.initialStateTimestamp])
del self.initialState
del self.initialStateTimestamp
return
def disable(self):
self.notify.debug('disable')
self.ignoreAll()
self.fsm.requestFinalState()
BasicEntities.DistributedNodePathEntity.disable(self)
def delete(self):
self.notify.debug('delete')
del self.moveSnd
del self.fsm
self.destroyPlatform()
self.platform.removeNode()
del self.platform
BasicEntities.DistributedNodePathEntity.delete(self)
def initPlatform(self):
model = loader.loadModel(self.modelPath)
if model is None:
return
model.setScale(self.modelScale)
if self.floorName is None:
return
self.platformModel = MovingPlatform.MovingPlatform()
self.platformModel.setupCopyModel(self.getParentToken(), model, self.floorName)
self.accept(self.platformModel.getEnterEvent(), self.localToonEntered)
self.accept(self.platformModel.getExitEvent(), self.localToonLeft)
self.startGuard = None
self.endGuard = None
zoneNp = self.getZoneNode()
if len(self.startGuardName):
self.startGuard = zoneNp.find('**/%s' % self.startGuardName)
if len(self.endGuardName):
self.endGuard = zoneNp.find('**/%s' % self.endGuardName)
side2srch = {'front': '**/wall_front',
'back': '**/wall_back',
'left': '**/wall_left',
'right': '**/wall_right'}
for side in side2srch.values():
np = self.platformModel.find(side)
if not np.isEmpty():
np.setScale(1.0, 1.0, 2.0)
np.setZ(-10)
np.flattenLight()
self.startBoardColl = NodePathCollection()
self.endBoardColl = NodePathCollection()
for side in self.startBoardSides:
np = self.platformModel.find(side2srch[side])
if np.isEmpty():
DistributedLift.warning("couldn't find %s board collision" % side)
else:
self.startBoardColl.addPath(np)
for side in self.endBoardSides:
np = self.platformModel.find(side2srch[side])
if np.isEmpty():
DistributedLift.warning("couldn't find %s board collision" % side)
else:
self.endBoardColl.addPath(np)
self.platformModel.reparentTo(self.platform)
return
def destroyPlatform(self):
if hasattr(self, 'platformModel'):
self.ignore(self.platformModel.getEnterEvent())
self.ignore(self.platformModel.getExitEvent())
self.platformModel.destroy()
del self.platformModel
if self.startGuard is not None:
self.startGuard.unstash()
if self.endGuard is not None:
self.endGuard.unstash()
del self.startGuard
del self.endGuard
del self.startBoardColl
del self.endBoardColl
return
def localToonEntered(self):
self.sendUpdate('setAvatarEnter')
def localToonLeft(self):
self.sendUpdate('setAvatarLeave')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def getPosition(self, state):
if state is LiftConstants.Down:
return self.startPos
else:
return self.endPos
def getGuard(self, state):
if state is LiftConstants.Down:
return self.startGuard
else:
return self.endGuard
def getBoardColl(self, state):
if state is LiftConstants.Down:
return self.startBoardColl
else:
return self.endBoardColl
def enterMoving(self, toState, fromState, arrivalTimestamp):
self.notify.debug('enterMoving, %s->%s' % (fromState, toState))
if self.state == toState:
self.notify.warning('already in state %s' % toState)
startPos = self.getPosition(fromState)
endPos = self.getPosition(toState)
startGuard = self.getGuard(fromState)
endGuard = self.getGuard(toState)
startBoardColl = self.getBoardColl(fromState)
endBoardColl = self.getBoardColl(toState)
def startMoving(self = self, guard = startGuard, boardColl = startBoardColl):
if guard is not None and not guard.isEmpty():
guard.unstash()
boardColl.unstash()
self.soundIval = SoundInterval(self.moveSnd, node=self.platform)
self.soundIval.loop()
return
def doneMoving(self = self, guard = endGuard, boardColl = endBoardColl, newState = toState):
self.state = newState
if hasattr(self, 'soundIval'):
self.soundIval.pause()
del self.soundIval
if guard is not None and not guard.isEmpty():
guard.stash()
boardColl.stash()
self.fsm.request('waiting')
return
self.moveIval = Sequence(Func(startMoving), LerpPosInterval(self.platform, self.duration, endPos, startPos=startPos, blendType='easeInOut', name='lift-%s-move' % self.entId, fluid=1), Func(doneMoving))
ivalStartT = globalClockDelta.networkToLocalTime(arrivalTimestamp, bits=32) - self.moveIval.getDuration()
self.moveIval.start(globalClock.getFrameTime() - ivalStartT)
def exitMoving(self):
if hasattr(self, 'soundIval'):
self.soundIval.pause()
del self.soundIval
self.moveIval.pause()
del self.moveIval
def enterWaiting(self):
self.notify.debug('enterWaiting')
def exitWaiting(self):
pass
if __dev__:
def attribChanged(self, *args):
BasicEntities.DistributedNodePathEntity.attribChanged(self, *args)
self.destroyPlatform()
self.initPlatform()
|
the-stack_0_26196
|
# -*- coding: utf-8 -*-
"""
Provides classes representing different transform types suitable for
use with visuals and scenes.
Adapted from vispy.visuals.transforms
Copyright (c) Vispy Development Team. All Rights Reserved.
Distributed under the (new) BSD License. See vispy/LICENSE.txt for more info.
"""
from .base_transform import BaseTransform, InverseTransform
from .linear import NullTransform, TTransform, STTransform, AffineTransform
from .nonlinear import LogTransform, PolarTransform
from .composite import CompositeTransform, SimplifiedCompositeTransform
from ._util import arg_to_array, arg_to_vec, as_vec, TransformCache
def transform_types():
typs = [BaseTransform]
i = 0
while i < len(typs):
typs.extend(typs[i].__subclasses__())
i += 1
return typs[1:]
_cached_types = None
def create_transform(type, params):
global _cached_types
if _cached_types is None or type not in _cached_types:
_cached_types = {tr.__name__: tr for tr in transform_types()}
if type not in _cached_types:
raise TypeError('Unknown transform type %r' % type)
return _cached_types[type](**params)
|
the-stack_0_26198
|
import logging
import sqlite3
import pandas as pd
class DB:
logger = logging.getLogger(__name__)
db_file = "db/mixpanel3_exports.sqlite"
def __init__(self):
self.logger.info("Starting db client")
self.conn = sqlite3.connect(self.db_file)
self.logger.info("Starting db client - done")
def append(self, date_from, date_to, event_name, file_size):
"""
:return: Logs all events including file_size
"""
df = pd.DataFrame({'date_from': pd.Timestamp(date_from),
'date_to': pd.Timestamp(date_to),
'event': event_name,
'file_size': file_size},
index=[0])
try:
df.to_sql("exports", self.conn, if_exists="append", index=False)
except pd.io.sql.DatabaseError as e:
self.logger.warning("DatabaseError: Table currencies most probably does not exist", e)
return None
|
the-stack_0_26199
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
def _MakeFunc(v, arg_name):
"""Ensure v is a func."""
if isinstance(v, attr_value_pb2.NameAttrList):
return v
fn_attr = attr_value_pb2.NameAttrList()
if isinstance(v, compat.bytes_or_text_types):
fn_attr.name = v
elif hasattr(v, "add_to_graph"):
v.add_to_graph(ops.get_default_graph())
fn_attr.name = v.name
else:
raise TypeError("Don't know how to convert {} to a func for "
"argument {}".format(v, arg_name))
return fn_attr
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
# pylint: disable=invalid-name
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op = self._apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
outputs = op.outputs
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(self, op_type_name, name=None, **keywords):
"""Implementation of apply_op that returns output_structure, op."""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError(
"%s that are invalid. Tensors: %s" % (prefix, values))
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.internal_convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead. Error: %s" %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__, err))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any(bt != base_types[0] for bt in base_types):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type %s that does not "
"match type %s of argument '%s'." %
(input_name, op_type_name, dtypes.as_dtype(attr_value).name,
dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
attr_value.func.CopyFrom(_MakeFunc(value, key))
elif attr_def.type == "list(func)":
attr_value.list.func.extend([_MakeFunc(x, key) for x in value])
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_structure = []
for arg in op_def.output_arg:
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
output_structure.append(len(t.list.type))
else:
output_structure.append(None)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
# pylint: disable=protected-access
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
name=scope, input_types=input_types,
attrs=attr_protos, op_def=op_def)
# Conditionally invoke tfdbg v2's op callback(s).
if op_callbacks.should_invoke_op_callbacks():
callback_outputs = op_callbacks.invoke_op_callbacks(
op.node_def.op, tuple(op.inputs), attr_protos, tuple(op.outputs),
op_name=op.name, graph=g)
if callback_outputs is not None:
for slot_index, callback_output in enumerate(callback_outputs):
op.outputs[slot_index] = callback_output
return output_structure, op_def.is_stateful, op
# pylint: enable=invalid-name
|
the-stack_0_26200
|
# Copyright (c) 2017-2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2007 The Regents of The University of Michigan
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Andreas Sandberg
from __future__ import print_function
from __future__ import absolute_import
import m5
import math
import sys
import _m5.stats
from m5.objects import Root
from m5.params import isNullPointer
from m5.util import attrdict, fatal
import m5.mcpat as mcpat
from m5.mcpat.autogen import generate_xml
import m5.vpi_shm as vpi_shm
import time
# Stat exports
from _m5.stats import schedStatEvent as schedEvent
from _m5.stats import periodicStatDump
outputList = []
# Dictionary of stat visitor factories populated by the _url_factory
# visitor.
factories = { }
# List of all factories. Contains tuples of (factory, schemes,
# enabled).
all_factories = []
def _url_factory(schemes, enable=True):
"""Wrap a plain Python function with URL parsing helpers
Wrap a plain Python function f(fn, **kwargs) to expect a URL that
has been split using urlparse.urlsplit. First positional argument
is assumed to be a filename, this is created as the concatenation
of the netloc (~hostname) and path in the parsed URL. Keyword
arguments are derived from the query values in the URL.
Arguments:
schemes: A list of URL schemes to use for this function.
Keyword arguments:
enable: Enable/disable this factory. Typically used when the
presence of a function depends on some runtime property.
For example:
wrapped_f(urlparse.urlsplit("text://stats.txt?desc=False")) ->
f("stats.txt", desc=False)
"""
from functools import wraps
def decorator(func):
@wraps(func)
def wrapper(url):
try:
from urllib.parse import parse_qs
except ImportError:
# Python 2 fallback
from urlparse import parse_qs
from ast import literal_eval
qs = parse_qs(url.query, keep_blank_values=True)
# parse_qs returns a list of values for each parameter. Only
# use the last value since kwargs don't allow multiple values
# per parameter. Use literal_eval to transform string param
# values into proper Python types.
def parse_value(key, values):
if len(values) == 0 or (len(values) == 1 and not values[0]):
fatal("%s: '%s' doesn't have a value." % (
url.geturl(), key))
elif len(values) > 1:
fatal("%s: '%s' has multiple values." % (
url.geturl(), key))
else:
try:
return key, literal_eval(values[0])
except ValueError:
fatal("%s: %s isn't a valid Python literal" \
% (url.geturl(), values[0]))
kwargs = dict([ parse_value(k, v) for k, v in qs.items() ])
try:
return func("%s%s" % (url.netloc, url.path), **kwargs)
except TypeError:
fatal("Illegal stat visitor parameter specified")
all_factories.append((wrapper, schemes, enable))
for scheme in schemes:
assert scheme not in factories
factories[scheme] = wrapper if enable else None
return wrapper
return decorator
@_url_factory([ None, "", "text", "file", ])
def _textFactory(fn, desc=True):
from m5 import options
"""Output stats in text format.
Text stat files contain one stat per line with an optional
description. The description is enabled by default, but can be
disabled by setting the desc parameter to False.
Parameters:
* desc (bool): Output stat descriptions (default: True)
Example:
text://stats.txt?desc=False
"""
return _m5.stats.initText(fn, desc, not options.stats_disable_file_io,
options.write_stripped_stats)
@_url_factory([ "h5", ], enable=hasattr(_m5.stats, "initHDF5"))
def _hdf5Factory(fn, chunking=10, desc=True, formulas=True):
"""Output stats in HDF5 format.
The HDF5 file format is a structured binary file format. It has
the multiple benefits over traditional text stat files:
* Efficient storage of time series (multiple stat dumps)
* Fast lookup of stats
* Plenty of existing tooling (e.g., Python libraries and graphical
viewers)
* File format can be used to store frame buffers together with
normal stats.
There are some drawbacks compared to the default text format:
* Large startup cost (single stat dump larger than text equivalent)
* Stat dumps are slower than text
Known limitations:
* Distributions and histograms currently unsupported.
* No support for forking.
Parameters:
* chunking (unsigned): Number of time steps to pre-allocate (default: 10)
* desc (bool): Output stat descriptions (default: True)
* formulas (bool): Output derived stats (default: True)
Example:
h5://stats.h5?desc=False;chunking=100;formulas=False
"""
return _m5.stats.initHDF5(fn, chunking, desc, formulas)
def addStatVisitor(url):
"""Add a stat visitor specified using a URL string
Stat visitors are specified using URLs on the following format:
format://path[?param=value[;param=value]]
The available formats are listed in the factories list. Factories
are called with the path as the first positional parameter and the
parameters are keyword arguments. Parameter values must be valid
Python literals.
"""
try:
from urllib.parse import urlsplit
except ImportError:
# Python 2 fallback
from urlparse import urlsplit
parsed = urlsplit(url)
try:
factory = factories[parsed.scheme]
except KeyError:
fatal("Illegal stat file type '%s' specified." % parsed.scheme)
if factory is None:
fatal("Stat type '%s' disabled at compile time" % parsed.scheme)
outputList.append(factory(parsed))
def printStatVisitorTypes():
"""List available stat visitors and their documentation"""
import inspect
def print_doc(doc):
for line in doc.splitlines():
print("| %s" % line)
print()
enabled_visitors = [ x for x in all_factories if x[2] ]
for factory, schemes, _ in enabled_visitors:
print("%s:" % ", ".join(filter(lambda x: x is not None, schemes)))
# Try to extract the factory doc string
print_doc(inspect.getdoc(factory))
def initSimStats():
_m5.stats.initSimStats()
_m5.stats.registerPythonStatsHandlers()
def _visit_groups(visitor, root=None):
if root is None:
root = Root.getInstance()
for group in root.getStatGroups().values():
visitor(group)
_visit_groups(visitor, root=group)
def _visit_stats(visitor, root=None):
def for_each_stat(g):
for stat in g.getStats():
visitor(g, stat)
_visit_groups(for_each_stat, root=root)
def _bindStatHierarchy(root):
def _bind_obj(name, obj):
if isNullPointer(obj):
return
if m5.SimObject.isSimObjectVector(obj):
if len(obj) == 1:
_bind_obj(name, obj[0])
else:
for idx, obj in enumerate(obj):
_bind_obj("{}{}".format(name, idx), obj)
else:
# We need this check because not all obj.getCCObject() is an
# instance of Stat::Group. For example, sc_core::sc_module, the C++
# class of SystemC_ScModule, is not a subclass of Stat::Group. So
# it will cause a type error if obj is a SystemC_ScModule when
# calling addStatGroup().
if isinstance(obj.getCCObject(), _m5.stats.Group):
parent = root
while parent:
if hasattr(parent, 'addStatGroup'):
parent.addStatGroup(name, obj.getCCObject())
break
parent = parent.get_parent();
_bindStatHierarchy(obj)
for name, obj in root._children.items():
_bind_obj(name, obj)
names = []
stats_dict = {}
stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
def check_stat(group, stat):
if not stat.check() or not stat.baseCheck():
fatal("statistic '%s' (%d) was not properly initialized " \
"by a regStats() function\n", stat.name, stat.id)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
# Legacy stat
global stats_list
stats_list = list(_m5.stats.statsList())
for stat in stats_list:
check_stat(None, stat)
stats_list.sort(key=lambda s: s.name.split('.'))
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
# New stats
_visit_stats(check_stat)
_visit_stats(lambda g, s: s.enable())
_m5.stats.enable();
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
# Legacy stats
for stat in stats_list:
stat.prepare()
# New stats
_visit_stats(lambda g, s: s.prepare())
stat_strings = []
def _dump_to_visitor(visitor, root=None):
global stat_strings
# Legacy stats
if root is None:
for stat in stats_list:
stat_strings.append(stat.visit(visitor))
# New stats
def dump_group(group):
global stat_strings
for stat in group.getStats():
stat_strings.append(stat.visit(visitor))
for n, g in group.getStatGroups().items():
visitor.beginGroup(n) #visitor is type output
dump_group(g)
visitor.endGroup()
if root is not None:
for p in root.path_list():
visitor.beginGroup(p)
dump_group(root if root is not None else Root.getInstance())
if root is not None:
for p in reversed(root.path_list()):
visitor.endGroup()
lastDump = 0
numDump = 0
init_ncsim = True
lastVoltage = 0
lastCurrent = 0
runtime_begin_profile=False
profiling = False
lv = 0
committedInstrs = 0
def beginProfile():
global runtime_begin_profile
runtime_begin_profile = True
def get_current():
if not profiling:
return 0
global lastCurrent
return lastCurrent
def get_core_runtime_dynamic(core_id = 0):
path = "Processor:Core"+str(core_id)
return mcpat.get_runtime_dynamic(path)
def get_total_runtime_dynamic():
path = "Processor"
return mcpat.get_runtime_dynamic(path)
def get_voltage():
if not profiling:
return 1
global lastVoltage
return lastVoltage
def get_profiling():
return runtime_begin_profile
def setCommittedInstr(num):
global committedInstrs
committedInstrs = num
def dump(root=None, exit=False):
'''Dump all statistics data to the registered outputs'''
from m5 import options
now = m5.curTick()
global lastDump
global numDump
assert lastDump <= now
global stat_strings
stat_strings = []
new_dump = lastDump != now
lastDump = now
# Don't allow multiple global stat dumps in the same tick. It's
# still possible to dump a multiple sub-trees.
if not new_dump and root is None:
return
numDump+=1
if new_dump:
_m5.stats.processDumpQueue()
sim_root = Root.getInstance()
if sim_root:
sim_root.preDumpStats();
prepare()
for output in outputList:
if output.valid():
output.begin()
_dump_to_visitor(output, root=root)
output.end()
# max_instr = options.power_profile_instrs
max_dump = options.power_profile_duration
print("Num Dumps: ",numDump)
if(numDump == max_dump or exit):
print("Ending after "+str(numDump)+
" datapoints")
sys.exit()
def dump_verilog(root=None, exit=False):
print("******************DUMP_VERILOG******************")
'''Dump all statistics data to the registered outputs'''
from m5 import options
now = m5.curTick()
global lastDump
global numDump
global init_ncsim
global lastVoltage
global lastCurrent
global runtime_begin_profile
global committedInstrs
global profiling
global lv
assert lastDump <= now
global stat_strings
stat_strings = []
new_dump = lastDump != now
lastDump = now
# Don't allow multiple global stat dumps in the same tick. It's
# still possible to dump a multiple sub-trees.
# if not new_dump and root is None:
# return 0
if(options.mcpat_enable):
if((options.power_profile_start != -1 and
now >= options.power_profile_start) or
runtime_begin_profile):
mcpat.set_flags(options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
# profiling = True
# numDump += 1
new_dump = True
if new_dump:
_m5.stats.processDumpQueue()
sim_root = Root.getInstance()
if sim_root:
sim_root.preDumpStats(); #predumping does nothing unless overriden
prepare() #prepare for scalar stats does nothing: statistic.h - class statstor.prepare()
for output in outputList:
if output.valid():
stat_strings.append(output.begin())
_dump_to_visitor(output, root=root)
stat_strings.append(output.end())
#print("".join(stat_strings))
#sys.exit(1)
# Initialilze the Verilog Sim:
power = 0
resistance = 0
voltage = 0
current = 0
mp_v = vpi_shm.mp_get_voltage_set()
mp_f = []
for i in range(vpi_shm.mp_get_ncores()):
mp_f.append(vpi_shm.mp_get_freq(i))
if(options.ncverilog_enable):
if init_ncsim:
# Run an Initial McPAT stats run with 1.0v
mcpat.m5_to_mcpat(stat_strings,\
options.stats_read_from_file, mp_v, mp_f, \
380.0, options.mcpat_device_type)
resistance = mcpat.get_last_r(mp_v, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
current = mcpat.get_last_i(mp_v, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
power = mcpat.get_last_p(mp_v, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
# Run Init and warmup PowerSupply
vpi_shm.initialize(options.mcpat_testname)
for i in range(int(options.ncverilog_warmup)):
vpi_shm.set_driver_signals(current, 0)
lv = vpi_shm.get_voltage()
lastVoltage=lv
lastCurrent=vpi_shm.get_current()
vpi_shm.ack_supply()
init_ncsim = False
else:
if options.ncverilog_feedback:
mcpat.m5_to_mcpat(stat_strings,\
options.stats_read_from_file, lv, mp_f, \
380.0, options.mcpat_device_type)
resistance = mcpat.get_last_r(lv, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
current = mcpat.get_last_i(lv, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
power = mcpat.get_last_p(lv, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
else:
mcpat.m5_to_mcpat(stat_strings,\
options.stats_read_from_file, mp_v, mp_f, \
380.0, options.mcpat_device_type)
resistance = mcpat.get_last_r(mp_v, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
current = mcpat.get_last_i(mp_v, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
power = mcpat.get_last_p(mp_v, \
options.mcpat_use_fg_pg, \
options.mcpat_scale_factor)
vpi_shm.set_driver_signals(current, 0)
lv = vpi_shm.get_voltage()
lastVoltage=lv
lastCurrent=vpi_shm.get_current()
vpi_shm.ack_supply()
else:
mcpat.m5_to_mcpat(stat_strings,\
options.stats_read_from_file, 1.4, [4000], \
380.0, options.mcpat_device_type)
# max_dump = options.power_profile_duration
# max_instr = options.power_profile_instrs
# if(numDump == max_dump or exit or committedInstrs >= max_instr):
# mcpat.dump()
# runtime_begin_profile = False
# print("Ending after "+str(numDump)+
# " stat dumps")
# # Clean up simulation:
# if(options.ncverilog_enable):
# current = mcpat.get_last_i(mp_v)
# vpi_shm.set_driver_signals(current, 1)
# lastVoltage=vpi_shm.get_voltage()
# lastCurrent=vpi_shm.get_current()
# vpi_shm.ack_supply()
# sys.exit()
else:
if new_dump:
_m5.stats.processDumpQueue()
sim_root = Root.getInstance()
if sim_root:
sim_root.preDumpStats();
prepare()
for output in outputList:
if output.valid():
output.begin()
_dump_to_visitor(output, root=root)
output.end()
power_ret = mcpat.get_last_p(voltage=1.4, power_gating=True, scale_factor=1.0)
if power_ret is None:
return 0
return power_ret
def create_xml(frequency):
from m5 import options
import os
global stat_strings
stat_strings = []
_m5.stats.processDumpQueue()
sim_root = Root.getInstance()
if sim_root:
sim_root.preDumpStats(); #predumping does nothing unless overriden
prepare() #prepare for scalar stats does nothing: statistic.h - class statstor.prepare()
for output in outputList:
if output.valid():
stat_strings.append(output.begin())
_dump_to_visitor(output, None)
stat_strings.append(output.end())
m5_stats_file = os.path.join(options.outdir, options.stats_file)
m5_config_file = os.path.join(options.outdir, options.dump_config)
mcpat_output_path = os.path.join(options.mcpat_out, options.mcpat_testname)
if not os.path.isdir(mcpat_output_path):
os.mkdir(mcpat_output_path)
#TODO jimmy change these stats from hardcoded to params
fr = [4E9 / 1E6]
i_f = os.path.join(mcpat_output_path,"serial_mp.xml")
generate_xml(m5_stats_file, m5_config_file, i_f, stat_strings, \
True, voltage=1.4, frequency=fr, temperature=380.0, \
device_type=options.mcpat_device_type)
def reset():
'''Reset all statistics to the base state'''
# call reset stats on all SimObjects
root = Root.getInstance()
if root:
root.resetStats()
# call any other registered legacy stats reset callbacks
for stat in stats_list:
stat.reset()
_m5.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
|
the-stack_0_26202
|
#!/usr/bin/env python3
from include.common import *
analyze_up_to = 5000
print_table_up_to = 10
def print_builtin_decision(csv_file, id, ids, i, print_latex_table, decision):
query = "SELECT ID, BUILTIN_NAME, count FROM UniqueBuiltinCounts WHERE ID = ?"
result = c.execute(query, (id,)).fetchone()
total_count_percentage_query = "SELECT 100-COUNT(DISTINCT GITHUB_PROJECT_ID) * 100.0 / (SELECT COUNT(DISTINCT GITHUB_PROJECT_ID) FROM TempUniqueBuiltinsPerProject), (SELECT COUNT(DISTINCT GITHUB_PROJECT_ID) FROM TempUniqueBuiltinsPerProject) - COUNT(DISTINCT GITHUB_PROJECT_ID) FROM TempUniqueBuiltinsPerProject WHERE ID NOT IN (" + "?," * (len(ids)-1) + "?)"
total_count_percentage = c.execute(total_count_percentage_query, ids).fetchone()
if i <= print_table_up_to and print_latex_table:
print("%s & %0.2f & %d \\\\" % (escape_latex(result[1]), total_count_percentage[0], total_count_percentage[1]))
csv_file.write("%d;%0.2f;%s;%s\n" % (i, total_count_percentage[0], decision, result[1]))
csv_file.flush()
def get_nr_unsupported_projects_temp(project_ids):
if len(project_ids) == 0:
count_unsupported_projects_query = "SELECT COUNT(DISTINCT GITHUB_PROJECT_ID) FROM TempUniqueBuiltinsPerProject"
else:
count_unsupported_projects_query = "SELECT COUNT(DISTINCT GITHUB_PROJECT_ID) FROM TempUniqueBuiltinsPerProject WHERE ID NOT IN (" + "?," * (len(project_ids)-1) + "?)"
count_unsupported_projects = c.execute(count_unsupported_projects_query, project_ids).fetchone()[0]
return count_unsupported_projects
def create_temp_table():
c.execute("""
DROP TABLE IF EXISTS TempUniqueBuiltinsPerProject
""")
c.execute("""
CREATE TABLE TempUniqueBuiltinsPerProject AS
SELECT * FROM UniqueBuiltinsPerProject WHERE BUILTIN_CATEGORY IS NOT 'Unknown'
""")
conn.commit()
def drop_temp_table():
c.execute("""
DROP TABLE TempUniqueBuiltinsPerProject
""")
conn.commit()
def compute(file_name, assume_libc_builtins_to_already_be_implemented, assume_platform_specific_builtins_to_already_be_implemented, print_latex_table=False):
csv_file = open(file_name, 'w')
csv_file.write('nr_builtins;perc_projects;decision;name\n')
create_temp_table()
builtin_ids, selected_ids = get_start_configuration(assume_libc_builtins_to_already_be_implemented, assume_platform_specific_builtins_to_already_be_implemented)
if print_latex_table:
print_tabular_start(name="implementationordertable", columns=3, caption="Greedy order of implementing builtins and cumulative percentage and number of supported projects")
print("Builtin & \% projects & \# projects \\\\")
print("\\midrule{}%")
min_progress = 1
for i in range(1, analyze_up_to+1):
if len(builtin_ids) == 0:
break
min_unsupported_projects_count = None
min_unsupported_projects_id = None
unsupported_without_new_project = get_nr_unsupported_projects_temp(selected_ids)
for builtin_id in builtin_ids:
test_selected_ids = selected_ids + [builtin_id]
count_unsupported_projects = get_nr_unsupported_projects_temp(test_selected_ids)
if (min_unsupported_projects_count is None or count_unsupported_projects < min_unsupported_projects_count) and (unsupported_without_new_project >= count_unsupported_projects - min_progress):
min_unsupported_projects_id = builtin_id
min_unsupported_projects_count = count_unsupported_projects
no_greedy_candidate = min_unsupported_projects_id is None
if no_greedy_candidate:
min_unsupported_projects_id = c.execute("SELECT ID FROM TempUniqueBuiltinsPerProject WHERE ID NOT IN (" + "?," * (len(selected_ids)-1) + "?) ORDER BY github_project_count DESC LIMIT 1 ", selected_ids).fetchone()[0]
selected_ids += [min_unsupported_projects_id]
print_builtin_decision(csv_file, min_unsupported_projects_id, selected_ids, i, print_latex_table, 'most-frequent' if no_greedy_candidate else 'greedy')
builtin_ids.remove(min_unsupported_projects_id)
if print_latex_table:
print_tabular_end(label="tbl:implementationorder")
csv_file.close()
drop_temp_table()
def get_start_configuration(assume_libc_builtins_to_already_be_implemented, assume_platform_specific_builtins_to_already_be_implemented):
if assume_platform_specific_builtins_to_already_be_implemented and assume_libc_builtins_to_already_be_implemented:
builtin_candidates = 'SELECT ID FROM UniqueBuiltinCounts WHERE BUILTIN_CATEGORY IS NOT "Unknown" AND MACHINE_SPECIFIC IS NOT 1 AND BUILTIN_CATEGORY IS NOT "other-libc"'
start_ids = [row[0] for row in c.execute('SELECT ID FROM UniqueBuiltinCounts WHERE MACHINE_SPECIFIC IS NOT 1 OR BUILTIN_CATEGORY IS "other-libc"').fetchall()]
elif assume_platform_specific_builtins_to_already_be_implemented:
builtin_candidates = 'SELECT ID FROM UniqueBuiltinCounts WHERE BUILTIN_CATEGORY IS NOT "Unknown" AND MACHINE_SPECIFIC IS NOT 1'
start_ids = [row[0] for row in c.execute('SELECT ID FROM UniqueBuiltinCounts WHERE MACHINE_SPECIFIC IS 1').fetchall()]
elif assume_libc_builtins_to_already_be_implemented:
builtin_candidates = 'SELECT ID FROM UniqueBuiltinCounts WHERE BUILTIN_CATEGORY IS NOT "Unknown" AND BUILTIN_CATEGORY IS NOT "other-libc"'
start_ids = [row[0] for row in c.execute('SELECT ID FROM UniqueBuiltinCounts WHERE BUILTIN_CATEGORY IS "other-libc"').fetchall()]
else:
builtin_candidates = 'SELECT ID FROM UniqueBuiltinCounts WHERE BUILTIN_CATEGORY IS NOT "Unknown"'
start_ids = []
builtin_candidate_ids = [row[0] for row in c.execute(builtin_candidates).fetchall()]
return (builtin_candidate_ids, start_ids)
# Selects the implementation order by choosing the most frequent builtin
def by_frequency(file_name, assume_libc_builtins_to_already_be_implemented, assume_platform_specific_builtins_to_already_be_implemented):
create_temp_table()
csv_file = open(file_name, 'w')
csv_file.write('nr_builtins;perc_projects;decision;name\n')
builtin_ids, selected_ids = get_start_configuration(assume_libc_builtins_to_already_be_implemented, assume_platform_specific_builtins_to_already_be_implemented)
for i in range(1, analyze_up_to+1):
if len(builtin_ids) == 0:
break
current_id = builtin_ids.pop(0)
selected_ids += [current_id]
count_unsupported_projects = get_nr_unsupported_projects_temp(selected_ids)
print_builtin_decision(csv_file, current_id, selected_ids, i, False, 'most-frequent')
csv_file.close()
drop_temp_table()
by_frequency(current_dir + '/../../generated/most-frequent-all.csv', False, False)
by_frequency(current_dir + '/../../generated/most-frequent-machine-independent.csv', False, True)
compute(current_dir + '/../../generated/greedy-all.csv', False, False, print_latex_table=True)
compute(current_dir + '/../../generated/greedy-machine-independent.csv', False, True)
|
the-stack_0_26204
|
"""
自定义链表
"""
# link list node
class Node:
def __init__(self, elem, _next=None):
self.elem = elem
self.next = _next
class SingleLinkList:
def __init__(self):
self._head = None
def is_empty(self):
return self._head is None
def length(self):
cur = self._head
count = 0
while cur is not None:
count += 1
cur = cur.next
return count
def travel(self):
cur = self._head
while cur is not None:
print(cur.elem, end='\t')
cur = cur.next
print()
def add(self, elem):
"""
头部添加
:param elem:
:return:
"""
node = Node(elem)
node.next = self._head
self._head = node
def append(self, elem):
"""
尾添加
:param elem:
:return:
"""
node = Node(elem)
if self.is_empty():
self._head = node
else:
cur = self._head
while cur.next is not None:
cur = cur.next
cur.next = node
def insert(self, elem, pos):
"""
指定位置添加
:param elem:
:param pos:
:return:
"""
# pos < 0 时
if pos <= 0:
self.add(elem)
# pos > max length 时
elif pos > (self.length() - 1):
self.append(elem)
# 插入指定位置
else:
node = Node(elem)
count = 0
pre = self._head
while count < (pos - 1):
count += 1
pre = pre.next
node.next = pre.next
pre.next = node
def remove(self, elem):
cur = self._head
pre = None
while cur is not None:
# 找到了指定元素
if cur.elem == elem:
# 如果第一个元素就是指定的节点
if not pre:
self._head = cur.next
else:
pre.next = cur.next
break
else:
# 后移节点
pre = cur
cur = cur.next
def search(self, elem):
cur = self._head
while cur is not None:
if cur.elem == elem:
return True
cur = cur.next
return False
if __name__ == '__main__':
# test
ll = SingleLinkList()
ll.add(1)
ll.add(2)
ll.add(3)
ll.insert(4, 2)
ll.travel()
print(ll.search(4))
ll.remove(1)
ll.travel()
|
the-stack_0_26205
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "maskrcnn_benchmark", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"maskrcnn_benchmark._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="maskrcnn_benchmark",
description="object detection in pytorch",
packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension.with_options(use_ninja=False)},
)
|
the-stack_0_26206
|
# Copyright (C) 2021 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# You can plot the data generated by this program by following these
# steps.
#
# 1. Run this program and save the output to a file:
# ./rand_example normal > normal.data
# ./rand_example uniform > uniform.data
#
# 2. Use gnuplot to create a plot:
# gnuplot -c rand_view_normal.gp > normal.jpg
# gnuplot -c rand_view_uniform.gp > uniform.jpg
from ignition.math import Rand
import sys
if (len(sys.argv) < 2):
print("python rand_example [normal, uniform]")
else:
for i in range(100000):
value = 0
if (sys.argv[1] == "uniform"):
value = Rand.dbl_uniform(0, 1000);
elif (sys.argv[1] == "normal"):
value = Rand.dbl_normal(0, 100);
print(value)
|
the-stack_0_26208
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EllipsoidTestMicroprice [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EllipsoidTestMicroprice&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=IIDHFmicroprice).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, interp, floor, diff
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot
from autocorrelation import autocorrelation
from InvarianceTestEllipsoid import InvarianceTestEllipsoid
# -
# ## Load the database generated by the script S_HighFreqVolumeTime
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_HighFreqVolumeTime'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_HighFreqVolumeTime'),
squeeze_me=True) # generated by S_HighFreqVolumeTime
p_mic = db['p_mic'].reshape(1,-1)
t_k = db['t_k']
t_ms = db['t_ms']
q_t = db['q_t']
q = db['q']
# -
# ## Compute microprice time series
# +
delta_t = 2000 # selecting observations every 2 seconds
p_mic_t = p_mic[[0],::delta_t]
delta_q = floor((np.nanmax(q_t) - np.nanmin(q_t)) / p_mic_t.shape[1]) # width of activity time bins
volume_time = arange(np.nanmin(q_t),np.nanmax(q_t)+delta_q,delta_q).reshape(1,-1) # vector of volume times
t_q = interp(volume_time, q, t_k) # vector of wall clock time as a function of volume time
p_mic_q = interp(t_q, t_ms, p_mic[0]) # time changed microprice, i.e. microprice as a function of volume time
# -
# ## Compute microprice increments
delta_p_mic_t = diff(p_mic_t)
delta_p_mic_q = diff(p_mic_q)
# ## Compute autocorrelations at different lags
lag_ = 10
acf_t = autocorrelation(delta_p_mic_t, lag_)
acf_q = autocorrelation(delta_p_mic_q, lag_)
# ## Plot the results of the IID test
# +
lag = 10 # lag to be printed
ell_scale = 2 # ellipsoid radius coefficient
fit = 0 # normal fitting
f = figure(figsize=(12,6)) # changes in implied vol
InvarianceTestEllipsoid(delta_p_mic_t, acf_t[0,1:], lag, fit, ell_scale, [], 'IID test on the increments of microprice');
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
f = figure(figsize=(12,6)) # changes in log implied vol
pos = {}
pos[0]= [.2, .45, .3866, .43] # scatter plot
pos[1]= [.2905, .12, .205, .2157] # epsi
pos[2]= [.045, .45, .1437, .43] # epsi_lagged
pos[3]= [.6, .45, .3366, .43] # autocorrelation
pos[4]= [.085, .228, .11, .1] # leg
InvarianceTestEllipsoid(delta_p_mic_q,acf_q[0,1:], lag, fit, ell_scale, pos, 'IID test on the increments of time-changed microprice');
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
|
the-stack_0_26210
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
from pathlib import Path
from typing import Callable, Dict, List, Optional
import nox
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
# DO NOT EDIT THIS FILE EVER!
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
BLACK_VERSION = "black==19.10b0"
# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
# their directory and modify it.
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
"ignored_versions": [],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# If you need to use a specific version of pip,
# change pip_version_override to the string representation
# of the version number, for example, "20.2.4"
"pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
TEST_CONFIG_OVERRIDE = {}
# Update the TEST_CONFIG with the user supplied values.
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE",
False) in (
"True",
"true",
)
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
#
# Style Checks
#
def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
properly checked.
"""
file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]
return [
basename
for basename, extension in file_ext_pairs
if extension == ".py"
or os.path.isdir(os.path.join(start_dir, basename))
and basename not in ("__pycache__")
]
# Linting with flake8.
#
# We ignore the following rules:
# E203: whitespace before ‘:’
# E266: too many leading ‘#’ for block comment
# E501: line too long
# I202: Additional newline in a section of imports
#
# We also need to specify the rules which are ignored by default:
# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
FLAKE8_COMMON_ARGS = [
"--show-source",
"--builtin=gettext",
"--max-complexity=20",
"--import-order-style=google",
"--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
"--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
"--max-line-length=88",
]
@nox.session
def lint(session: nox.sessions.Session) -> None:
if not TEST_CONFIG["enforce_type_hints"]:
session.install("flake8", "flake8-import-order")
else:
session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
".",
]
session.run("flake8", *args)
#
# Black
#
@nox.session
def blacken(session: nox.sessions.Session) -> None:
session.install(BLACK_VERSION)
python_files = [path for path in os.listdir(".") if path.endswith(".py")]
session.run("black", *python_files)
#
# Sample Tests
#
PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
def _session_tests(
session: nox.sessions.Session, post_install: Callable = None
) -> None:
if TEST_CONFIG["pip_version_override"]:
pip_version = TEST_CONFIG["pip_version_override"]
session.install(f"pip=={pip_version}")
"""Runs py.test for a particular project."""
if os.path.exists("requirements.txt"):
if os.path.exists("constraints.txt"):
session.install("-r", "requirements.txt", "-c", "constraints.txt")
else:
session.install("-r", "requirements.txt")
if os.path.exists("requirements-test.txt"):
if os.path.exists("constraints-test.txt"):
session.install("-r", "requirements-test.txt", "-c",
"constraints-test.txt")
else:
session.install("-r", "requirements-test.txt")
if INSTALL_LIBRARY_FROM_SOURCE:
session.install("-e", _get_repo_root())
if post_install:
post_install(session)
session.run(
"pytest",
*(PYTEST_COMMON_ARGS + session.posargs),
# Pytest will return 5 when no tests are collected. This can happen
# on travis where slow and flaky tests are excluded.
# See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
success_codes=[0, 5],
env=get_pytest_env_vars(),
)
@nox.session(python=ALL_VERSIONS)
def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
session.skip(
"SKIPPED: {} tests are disabled for this sample.".format(
session.python)
)
#
# Readmegen
#
def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
if p is None:
break
if Path(p / ".git").exists():
return str(p)
# .git is not available in repos cloned via Cloud Build
# setup.py is always in the library's root, so use that instead
# https://github.com/googleapis/synthtool/issues/792
if Path(p / "setup.py").exists():
return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
@nox.session
@nox.parametrize("path", GENERATED_READMES)
def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
if os.path.exists(os.path.join(dir_, "requirements.txt")):
session.install("-r", os.path.join(dir_, "requirements.txt"))
in_file = os.path.join(dir_, "README.rst.in")
session.run(
"python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py",
in_file
)
|
the-stack_0_26216
|
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from django.conf import settings
from django.http import HttpResponseForbidden
from django.utils.safestring import mark_safe
import md5
import re
import itertools
_ERROR_MSG = mark_safe('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"><body><h1>403 Forbidden</h1><p>Cross Site Request Forgery detected. Request aborted.</p></body></html>')
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod=(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
def _make_token(session_id):
return md5.new(settings.SECRET_KEY + session_id).hexdigest()
class CsrfMiddleware(object):
"""Django middleware that adds protection against Cross Site
Request Forgeries by adding hidden form fields to POST forms and
checking requests for the correct value.
In the list of middlewares, SessionMiddleware is required, and must come
after this middleware. CsrfMiddleWare must come after compression
middleware.
If a session ID cookie is present, it is hashed with the SECRET_KEY
setting to create an authentication token. This token is added to all
outgoing POST forms and is expected on all incoming POST requests that
have a session ID cookie.
If you are setting cookies directly, instead of using Django's session
framework, this middleware will not work.
"""
def process_request(self, request):
if request.method == 'POST':
try:
session_id = request.COOKIES[settings.SESSION_COOKIE_NAME]
except KeyError:
# No session, no check required
return None
csrf_token = _make_token(session_id)
# check incoming token
try:
request_csrf_token = request.POST['csrfmiddlewaretoken']
except KeyError:
return HttpResponseForbidden(_ERROR_MSG)
if request_csrf_token != csrf_token:
return HttpResponseForbidden(_ERROR_MSG)
return None
def process_response(self, request, response):
csrf_token = None
try:
cookie = response.cookies[settings.SESSION_COOKIE_NAME]
csrf_token = _make_token(cookie.value)
except KeyError:
# No outgoing cookie to set session, but
# a session might already exist.
try:
session_id = request.COOKIES[settings.SESSION_COOKIE_NAME]
csrf_token = _make_token(session_id)
except KeyError:
# no incoming or outgoing cookie
pass
if csrf_token is not None and \
response['Content-Type'].split(';')[0] in _HTML_TYPES:
# ensure we don't add the 'id' attribute twice (HTML validity)
idattributes = itertools.chain(("id='csrfmiddlewaretoken'",),
itertools.repeat(''))
def add_csrf_field(match):
"""Returns the matched <form> tag plus the added <input> element"""
return mark_safe(match.group() + "<div style='display:none;'>" + \
"<input type='hidden' " + idattributes.next() + \
" name='csrfmiddlewaretoken' value='" + csrf_token + \
"' /></div>")
# Modify any POST forms
response.content = _POST_FORM_RE.sub(add_csrf_field, response.content)
return response
|
the-stack_0_26217
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from extensions.front.Pack import Pack
from extensions.front.TransposeOrderNormalizer import TransposeOrderNormalizer
from extensions.front.eltwise_n import EltwiseNReplacement
from extensions.front.tf.pad_tf_to_pad import PadTFToPad
from extensions.ops.DetectionOutput import DetectionOutput
from extensions.ops.activation_ops import Sigmoid
from extensions.ops.priorbox_clustered import PriorBoxClusteredOp
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral
from mo.graph.graph import Graph, Node
from mo.middle.passes.convert_data_type import data_type_str_to_np
from mo.ops.concat import Concat
from mo.ops.const import Const
from mo.ops.reshape import Reshape
from mo.ops.result import Result
class EfficientDet(FrontReplacementFromConfigFileGeneral):
replacement_id = 'AutomlEfficientDet'
run_not_recursively = True
def run_before(self):
from extensions.front.ExpandDimsToUnsqueeze import ExpandDimsToUnsqueeze
return [ExpandDimsToUnsqueeze, Pack, TransposeOrderNormalizer, PadTFToPad, EltwiseNReplacement]
class AnchorGenerator:
def __init__(self, min_level, aspect_ratios, num_scales, anchor_scale):
self.min_level = min_level
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.scales = [2 ** (float(s) / num_scales) for s in range(num_scales)]
def get(self, layer_id):
widths = []
heights = []
for s in self.scales:
for a in self.aspect_ratios:
base_anchor_size = 2 ** (self.min_level + layer_id) * self.anchor_scale
heights.append(base_anchor_size * s * a[1])
widths.append(base_anchor_size * s * a[0])
return widths, heights
def transform_graph(self, graph: Graph, replacement_descriptions: dict):
parameter_node = graph.get_op_nodes(op='Parameter')[0]
parameter_node['data_type'] = data_type_str_to_np(parameter_node.graph.graph['cmd_params'].data_type)
parameter_node.out_port(0).disconnect()
# remove existing Result operations to remove unsupported sub-graph
graph.remove_nodes_from([node.id for node in graph.get_op_nodes(op='Result')] + ['detections'])
# determine if the op which is a input/final result of mean value and scale applying to the input tensor
# then connect it to the input of the first convolution of the model, so we remove the image pre-processing
# which includes padding and resizing from the model
preprocessing_input_node_id = replacement_descriptions['preprocessing_input_node']
assert preprocessing_input_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
'should be a last node before image normalization and is specified' \
' in the json file.'.format(preprocessing_input_node_id)
preprocessing_input_node = Node(graph, preprocessing_input_node_id)
consumer_node = preprocessing_input_node.out_port(0).get_connection().get_destination().node
consumer_node.in_port(0).get_connection().set_source(parameter_node.out_port(0))
preprocessing_output_node_id = replacement_descriptions['preprocessing_output_node']
assert preprocessing_output_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
'node should provide scaled image output and is specified' \
' in the json file.'.format(preprocessing_output_node_id)
preprocessing_output_node = Node(graph, preprocessing_output_node_id)
preprocessing_output_node.out_port(0).disconnect()
convolution_nodes = [n for n in graph.pseudo_topological_sort() if n.soft_get('type') == 'Convolution']
convolution_nodes[0].in_port(0).get_connection().set_source(preprocessing_output_node.out_port(0))
# create prior boxes (anchors) generator
aspect_ratios = replacement_descriptions['aspect_ratios']
assert len(aspect_ratios) % 2 == 0
aspect_ratios = list(zip(aspect_ratios[::2], aspect_ratios[1::2]))
priors_generator = self.AnchorGenerator(min_level=int(replacement_descriptions['min_level']),
aspect_ratios=aspect_ratios,
num_scales=int(replacement_descriptions['num_scales']),
anchor_scale=replacement_descriptions['anchor_scale'])
prior_boxes = []
for i in range(100):
inp_name = 'box_net/box-predict{}/BiasAdd'.format('_%d' % i if i else '')
if inp_name not in graph:
break
widths, heights = priors_generator.get(i)
prior_box_op = PriorBoxClusteredOp(graph, {'width': np.array(widths),
'height': np.array(heights),
'clip': 0, 'flip': 0,
'variance': replacement_descriptions['variance'],
'offset': 0.5})
prior_boxes.append(prior_box_op.create_node([Node(graph, inp_name), parameter_node]))
# concatenate prior box operations
concat_prior_boxes = Concat(graph, {'axis': -1}).create_node()
for idx, node in enumerate(prior_boxes):
concat_prior_boxes.add_input_port(idx)
concat_prior_boxes.in_port(idx).connect(node.out_port(0))
conf = Sigmoid(graph, dict(name='concat/sigmoid')).create_node([Node(graph, 'concat')])
reshape_size_node = Const(graph, {'value': int64_array([0, -1])}).create_node([])
logits = Reshape(graph, dict(name=conf.name + '/Flatten')).create_node([conf, reshape_size_node])
deltas = Reshape(graph, dict(name='concat_1/Flatten')).create_node([Node(graph, 'concat_1'), reshape_size_node])
# revert convolution boxes prediction weights from yxYX to xyXY (convolutions share weights and bias)
weights = Node(graph, 'box_net/box-predict/pointwise_kernel')
weights.value = weights.value.reshape(-1, 4)[:, [1, 0, 3, 2]].reshape(weights.shape)
bias = Node(graph, 'box_net/box-predict/bias')
bias.value = bias.value.reshape(-1, 4)[:, [1, 0, 3, 2]].reshape(bias.shape)
detection_output_node = DetectionOutput(graph, dict(
name='detections',
share_location=1,
background_label_id=int(replacement_descriptions['num_classes']) + 1,
nms_threshold=replacement_descriptions['nms_threshold'],
confidence_threshold=replacement_descriptions['confidence_threshold'],
top_k=100,
keep_top_k=100,
code_type='caffe.PriorBoxParameter.CENTER_SIZE',
)).create_node([deltas, logits, concat_prior_boxes])
output_op = Result(graph, dict(name='output'))
output_op.create_node([detection_output_node])
|
the-stack_0_26218
|
import json
from datetime import date, timedelta
from django import forms
from django.utils.translation import ugettext_lazy as _lazy, ugettext as _
from kitsune.questions.marketplace import submit_ticket
from kitsune.questions.models import Answer
# labels and help text
SITE_AFFECTED_LABEL = _lazy(u"URL of affected site")
CRASH_ID_LABEL = _lazy(u"Crash ID(s)")
CRASH_ID_HELP = _lazy(
u"If you submit information to Mozilla when you crash, "
u"you'll be given a crash ID which uniquely identifies "
u"your crash and lets us look at details that may help "
u"identify the cause. To find your recently submitted "
u"crash IDs, go to <strong>about:crashes</strong> in "
u"your location bar. <a href='https://support.mozilla."
u"com/en-US/kb/Firefox+crashes#Getting_the_most_"
u"accurate_help_with_your_Firefox_crash' "
u"target='_blank'>Click for detailed instructions</a>."
)
TROUBLESHOOTING_LABEL = _lazy(u"Troubleshooting Information")
TROUBLESHOOTING_HELP = _lazy(
u"This information gives details about the "
u"internal workings of your browser that will "
u"help in answering your question."
)
FREQUENCY_LABEL = _lazy(u"This happens")
FREQUENCY_CHOICES = [
(u"", u""),
(u"NOT_SURE", _lazy(u"Not sure how often")),
(u"ONCE_OR_TWICE", _lazy(u"Just once or twice")),
(u"FEW_TIMES_WEEK", _lazy(u"A few times a week")),
(u"EVERY_TIME", _lazy(u"Every time Firefox opened")),
]
STARTED_LABEL = _lazy(u"This started when...")
TITLE_LABEL = _lazy(u"Question")
CONTENT_LABEL = _lazy(u"Details")
EMAIL_LABEL = _lazy(u"Email")
EMAIL_HELP = _lazy(
u"A confirmation email will be sent to this address in "
u"order to post your question."
)
FF_VERSION_LABEL = _lazy(u"Firefox version")
OS_LABEL = _lazy(u"Operating system")
PLUGINS_LABEL = _lazy(u"Installed plugins")
ADDON_LABEL = _lazy(u"Extension/plugin you are having trouble with")
DEVICE_LABEL = _lazy(u"Mobile device")
# Validation error messages
MSG_TITLE_REQUIRED = _lazy(u"Please provide a question.")
MSG_TITLE_SHORT = _lazy(
u"Your question is too short (%(show_value)s "
u"characters). It must be at least %(limit_value)s "
u"characters."
)
MSG_TITLE_LONG = _lazy(
u"Please keep the length of your question to "
u"%(limit_value)s characters or less. It is currently "
u"%(show_value)s characters."
)
MSG_CONTENT_REQUIRED = _lazy(u"Please provide content.")
MSG_CONTENT_SHORT = _lazy(
u"Your content is too short (%(show_value)s "
u"characters). It must be at least %(limit_value)s "
u"characters."
)
MSG_CONTENT_LONG = _lazy(
u"Please keep the length of your content to "
u"%(limit_value)s characters or less. It is "
u"currently %(show_value)s characters."
)
REPLY_PLACEHOLDER = _lazy(u"Enter your reply here.")
# Marketplace AAQ form
EMAIL_PLACEHOLDER = _lazy(u"Enter your email address here.")
SUBJECT_PLACEHOLDER = _lazy(u"Enter a subject here.")
SUBJECT_CONTENT_REQUIRED = _lazy(u"Please provide a subject.")
SUBJECT_CONTENT_SHORT = _lazy(
u"The subject is too short (%(show_value)s "
u"characters). It must be at least %(limit_value)s "
u"characters."
)
SUBJECT_CONTENT_LONG = _lazy(
u"Please keep the length of the subject to "
u"%(limit_value)s characters or less. It is "
u"currently %(show_value)s characters."
)
BODY_PLACEHOLDER = _lazy(u"Describe your issue here.")
BODY_CONTENT_REQUIRED = _lazy(u"Please describe your issue in the body.")
BODY_CONTENT_SHORT = _lazy(
u"The body content is too short (%(show_value)s "
u"characters). It must be at least %(limit_value)s "
u"characters."
)
BODY_CONTENT_LONG = _lazy(
u"Please keep the length of the body content to "
u"%(limit_value)s characters or less. It is "
u"currently %(show_value)s characters."
)
CATEGORY_CHOICES = [
(u"account", _lazy(u"Account Issues")),
(u"installation", _lazy(u"Installation Issues")),
(u"payment", _lazy(u"Payment Issues")),
(u"application", _lazy(u"Application Issues")),
]
# Marketplace Request Refund form
TRANSACTION_ID_PLACEHOLDER = _lazy(u"Enter the Transaction ID here.")
TRANSACTION_ID_REQUIRED = _lazy(u"Please provide the Transaction ID.")
REFUND_CATEGORY_CHOICES = [
(u"Defective", _lazy(u"Defective")),
(u"Malware", _lazy(u"Malware")),
(u"Did not work as expected", _lazy(u"Did not work as expected")),
(u"Seller will not provide support", _lazy(u"Seller will not provide support")),
]
# Marketplace Developer Request form
DEVELOPER_REQUEST_CATEGORY_CHOICES = [
(u"Account Administration", _lazy(u"Account Administration")),
(u"Review Process", _lazy(u"Review Process")),
(u"Payments/Settlement", _lazy(u"Payments/Settlement")),
]
class EditQuestionForm(forms.Form):
"""Form to edit an existing question"""
def __init__(self, product=None, category=None, *args, **kwargs):
"""Init the form.
We are adding fields here and not declaratively because the
form fields to include depend on the selected product/category.
"""
super(EditQuestionForm, self).__init__(*args, **kwargs)
# Extra fields required by product/category selected
extra_fields = []
if product:
extra_fields += product.get("extra_fields", [])
if category:
extra_fields += category.get("extra_fields", [])
# Add the fields to the form
title_error_messages = {
"required": MSG_TITLE_REQUIRED,
"min_length": MSG_TITLE_SHORT,
"max_length": MSG_TITLE_LONG,
}
title_field = forms.CharField(
label=TITLE_LABEL,
min_length=5,
max_length=160,
widget=forms.TextInput(),
error_messages=title_error_messages,
)
self.fields["title"] = title_field
content_error_messages = {
"required": MSG_CONTENT_REQUIRED,
"min_length": MSG_CONTENT_SHORT,
"max_length": MSG_CONTENT_LONG,
}
field = forms.CharField(
label=CONTENT_LABEL,
min_length=5,
max_length=10000,
widget=forms.Textarea(),
error_messages=content_error_messages,
)
self.fields["content"] = field
if "sites_affected" in extra_fields:
field = forms.CharField(
label=SITE_AFFECTED_LABEL,
initial="http://",
required=False,
max_length=255,
widget=forms.TextInput(),
)
self.fields["sites_affected"] = field
if "crash_id" in extra_fields:
field = forms.CharField(
label=CRASH_ID_LABEL,
help_text=CRASH_ID_HELP,
required=False,
max_length=255,
widget=forms.TextInput(),
)
self.fields["crash_id"] = field
if "frequency" in extra_fields:
field = forms.ChoiceField(
label=FREQUENCY_LABEL, choices=FREQUENCY_CHOICES, required=False
)
self.fields["frequency"] = field
if "started" in extra_fields:
field = forms.CharField(
label=STARTED_LABEL,
required=False,
max_length=255,
widget=forms.TextInput(),
)
self.fields["started"] = field
if "addon" in extra_fields:
field = forms.CharField(
label=ADDON_LABEL,
required=False,
max_length=255,
widget=forms.TextInput(),
)
self.fields["addon"] = field
if "troubleshooting" in extra_fields:
widget = forms.Textarea(attrs={"class": "troubleshooting"})
field = forms.CharField(
label=TROUBLESHOOTING_LABEL,
help_text=TROUBLESHOOTING_HELP,
required=False,
max_length=655360,
widget=widget,
)
self.fields["troubleshooting"] = field
if "ff_version" in extra_fields:
self.fields["ff_version"] = forms.CharField(
label=FF_VERSION_LABEL, required=False,
)
if "device" in extra_fields:
self.fields["device"] = forms.CharField(label=DEVICE_LABEL, required=False,)
if "os" in extra_fields:
self.fields["os"] = forms.CharField(label=OS_LABEL, required=False,)
if "plugins" in extra_fields:
widget = forms.Textarea(attrs={"class": "plugins"})
self.fields["plugins"] = forms.CharField(
label=PLUGINS_LABEL, required=False, widget=widget,
)
@property
def metadata_field_keys(self):
"""Returns the keys of the metadata fields for the current
form instance"""
non_metadata_fields = ["title", "content", "email"]
def metadata_filter(x):
return x not in non_metadata_fields
return filter(metadata_filter, self.fields.keys())
@property
def cleaned_metadata(self):
"""Returns a dict with cleaned metadata values. Omits
fields with empty string value."""
clean = {}
for key in self.metadata_field_keys:
if key in self.data and self.data[key] != u"":
clean[key] = self.cleaned_data[key]
# Clean up the troubleshooting data if we have it.
troubleshooting = clean.get("troubleshooting")
if troubleshooting:
try:
parsed = json.loads(troubleshooting)
except ValueError:
parsed = None
if parsed:
# Clean out unwanted garbage preferences.
if "modifiedPreferences" in parsed and isinstance(
parsed["modifiedPreferences"], dict
):
for pref in parsed["modifiedPreferences"].keys():
if pref.startswith("print.macosx.pagesetup"):
del parsed["modifiedPreferences"][pref]
clean["troubleshooting"] = json.dumps(parsed)
# Override ff_version with the version in troubleshooting
# which is more precise for the dot releases.
version = parsed.get("application", {}).get("version")
if version:
clean["ff_version"] = version
return clean
class NewQuestionForm(EditQuestionForm):
"""Form to start a new question"""
def __init__(self, product=None, category=None, *args, **kwargs):
"""Add fields particular to new questions."""
super(NewQuestionForm, self).__init__(
product=product, category=category, *args, **kwargs
)
# Collect user agent only when making a question for the first time.
# Otherwise, we could grab moderators' user agents.
self.fields["useragent"] = forms.CharField(
widget=forms.HiddenInput(), required=False
)
class AnswerForm(forms.Form):
"""Form for replying to a question."""
content = forms.CharField(
label=_lazy("Content:"),
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={"placeholder": REPLY_PLACEHOLDER}),
error_messages={
"required": MSG_CONTENT_REQUIRED,
"min_length": MSG_CONTENT_SHORT,
"max_length": MSG_CONTENT_LONG,
},
)
class Meta:
model = Answer
fields = ("content",)
class WatchQuestionForm(forms.Form):
"""Form to subscribe to question updates."""
EVENT_TYPE_CHOICES = (
("reply", "when anybody replies."),
("solution", "when a solution is found."),
)
email = forms.EmailField(
required=False, widget=forms.TextInput(attrs={"placeholder": EMAIL_PLACEHOLDER})
)
event_type = forms.ChoiceField(choices=EVENT_TYPE_CHOICES, widget=forms.RadioSelect)
def __init__(self, user, *args, **kwargs):
# Initialize with logged in user's email.
self.user = user
super(WatchQuestionForm, self).__init__(*args, **kwargs)
def clean_email(self):
if not self.user.is_authenticated() and not self.cleaned_data["email"]:
raise forms.ValidationError(_("Please provide an email."))
elif not self.user.is_authenticated():
return self.cleaned_data["email"]
# Clear out the email for logged in users, we don't want to use it.
return None
class BaseZendeskForm(forms.Form):
"""Base Form class for all Zendesk forms."""
def __init__(self, user, *args, **kwargs):
super(BaseZendeskForm, self).__init__(*args, **kwargs)
self.user = user
# Add email field for users not logged in.
if not user.is_authenticated():
email = forms.EmailField(
label=_lazy(u"Email:"),
widget=forms.TextInput(attrs={"placeholder": EMAIL_PLACEHOLDER}),
)
self.fields["email"] = email
subject = forms.CharField(
label=_lazy(u"Subject:"),
min_length=4,
max_length=255,
widget=forms.TextInput(attrs={"placeholder": SUBJECT_PLACEHOLDER}),
error_messages={
"required": SUBJECT_CONTENT_REQUIRED,
"min_length": SUBJECT_CONTENT_SHORT,
"max_length": SUBJECT_CONTENT_LONG,
},
)
body = forms.CharField(
label=_lazy(u"Body:"),
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={"placeholder": BODY_PLACEHOLDER}),
error_messages={
"required": BODY_CONTENT_REQUIRED,
"min_length": BODY_CONTENT_SHORT,
"max_length": BODY_CONTENT_LONG,
},
)
def ticket_body(self, email):
"""Body of the ticket to submit to Zendesk."""
return "Email: {email}\n{body}".format(
email=email, body=self.cleaned_data["body"]
)
def submit_ticket(self):
"""Submit the ticket to Zendesk."""
if self.user.is_authenticated():
email = self.user.email
else:
email = self.cleaned_data["email"]
submit_ticket(
email,
self.cleaned_data["category"],
self.cleaned_data["subject"],
self.ticket_body(email),
[],
)
class MarketplaceAaqForm(BaseZendeskForm):
category = forms.ChoiceField(label=_lazy(u"Category:"), choices=CATEGORY_CHOICES)
class MarketplaceRefundForm(BaseZendeskForm):
transaction_id = forms.CharField(
label=_lazy(u"Transaction ID:"),
widget=forms.TextInput(attrs={"placeholder": TRANSACTION_ID_PLACEHOLDER}),
error_messages={"required": TRANSACTION_ID_REQUIRED},
)
category = forms.ChoiceField(
label=_lazy(u"Category:"), choices=REFUND_CATEGORY_CHOICES
)
def ticket_body(self, email):
"""Body of the ticket to submit to Zendesk."""
return "Email: {email}\nTransaction ID: {id}\nCategory: {category}\n{body}".format(
email=email,
id=self.cleaned_data["transaction_id"],
category=self.cleaned_data["category"],
body=self.cleaned_data["body"],
)
class MarketplaceDeveloperRequestForm(BaseZendeskForm):
category = forms.ChoiceField(
label=_lazy(u"Category:"), choices=DEVELOPER_REQUEST_CATEGORY_CHOICES
)
def ticket_body(self, email):
"""Body of the ticket to submit to Zendesk."""
return "Email: {email}\nCategory: {category}\n{body}".format(
email=email,
category=self.cleaned_data["category"],
body=self.cleaned_data["body"],
)
bucket_choices = [(1, "1 day"), (7, "1 week"), (30, "1 month")]
class StatsForm(forms.Form):
bucket = forms.IntegerField(
min_value=1,
required=False,
label=_lazy(u"Interval"),
widget=forms.Select(choices=bucket_choices),
)
start = forms.DateField(required=False, label=_lazy(u"Start"))
end = forms.DateField(required=False, label=_lazy(u"End"))
def clean_bucket(self):
if self.cleaned_data.get("bucket") is None:
return 1
return self.cleaned_data["bucket"]
def clean_start(self):
if self.cleaned_data.get("start") is None:
return date.today() - timedelta(days=30)
return self.cleaned_data["start"]
def clean_end(self):
if self.cleaned_data.get("end") is None:
return date.today()
return self.cleaned_data["end"]
def clean(self):
start = self.cleaned_data.get("start")
end = self.cleaned_data.get("end")
if start and end and start > end:
raise forms.ValidationError("Start must be less than end.")
return self.cleaned_data
|
the-stack_0_26219
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.compat
import copy
import io
import os
import sys
import unittest
from collections import UserDict as UD, UserList as UL
import TestCmd
import TestUnit
from SCons.Environment import *
import SCons.Warnings
def diff_env(env1, env2):
s1 = "env1 = {\n"
s2 = "env2 = {\n"
d = {}
for k in list(env1._dict.keys()) + list(env2._dict.keys()):
d[k] = None
for k in sorted(d.keys()):
if k in env1:
if k in env2:
if env1[k] != env2[k]:
s1 = s1 + " " + repr(k) + " : " + repr(env1[k]) + "\n"
s2 = s2 + " " + repr(k) + " : " + repr(env2[k]) + "\n"
else:
s1 = s1 + " " + repr(k) + " : " + repr(env1[k]) + "\n"
elif k in env2:
s2 = s2 + " " + repr(k) + " : " + repr(env2[k]) + "\n"
s1 = s1 + "}\n"
s2 = s2 + "}\n"
return s1 + s2
def diff_dict(d1, d2):
s1 = "d1 = {\n"
s2 = "d2 = {\n"
d = {}
for k in list(d1.keys()) + list(d2.keys()):
d[k] = None
for k in sorted(d.keys()):
if k in d1:
if k in d2:
if d1[k] != d2[k]:
s1 = s1 + " " + repr(k) + " : " + repr(d1[k]) + "\n"
s2 = s2 + " " + repr(k) + " : " + repr(d2[k]) + "\n"
else:
s1 = s1 + " " + repr(k) + " : " + repr(d1[k]) + "\n"
elif k in d2:
s2 = s2 + " " + repr(k) + " : " + repr(d2[k]) + "\n"
s1 = s1 + "}\n"
s2 = s2 + "}\n"
return s1 + s2
called_it = {}
built_it = {}
class Builder(SCons.Builder.BuilderBase):
"""A dummy Builder class for testing purposes. "Building"
a target is simply setting a value in the dictionary.
"""
def __init__(self, name = None):
self.name = name
def __call__(self, env, target=None, source=None, **kw):
global called_it
called_it['target'] = target
called_it['source'] = source
called_it.update(kw)
def execute(self, target = None, **kw):
global built_it
built_it[target] = 1
scanned_it = {}
class Scanner(object):
"""A dummy Scanner class for testing purposes. "Scanning"
a target is simply setting a value in the dictionary.
"""
def __init__(self, name, skeys=[]):
self.name = name
self.skeys = skeys
def __call__(self, filename):
global scanned_it
scanned_it[filename] = 1
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except AttributeError:
return False
def get_skeys(self, env):
return self.skeys
def __str__(self):
return self.name
class CLVar(UL):
def __init__(self, seq):
if isinstance(seq, str):
seq = seq.split()
UL.__init__(self, seq)
def __add__(self, other):
return UL.__add__(self, CLVar(other))
def __radd__(self, other):
return UL.__radd__(self, CLVar(other))
class DummyNode(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def rfile(self):
return self
def get_subst_proxy(self):
return self
def test_tool( env ):
env['_F77INCFLAGS'] = '$( ${_concat(INCPREFIX, F77PATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
class TestEnvironmentFixture(object):
def TestEnvironment(self, *args, **kw):
if not kw or 'tools' not in kw:
kw['tools'] = [test_tool]
default_keys = { 'CC' : 'cc',
'CCFLAGS' : '-DNDEBUG',
'ENV' : { 'TMP' : '/tmp' } }
for key, value in default_keys.items():
if key not in kw:
kw[key] = value
if 'BUILDERS' not in kw:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
suffix = '.o',
single_source = 1)
kw['BUILDERS'] = {'Object' : static_obj}
static_obj.add_action('.cpp', 'fake action')
env = Environment(*args, **kw)
return env
class SubstitutionTestCase(unittest.TestCase):
def test___init__(self):
"""Test initializing a SubstitutionEnvironment
"""
env = SubstitutionEnvironment()
assert '__env__' not in env
def test___cmp__(self):
"""Test comparing SubstitutionEnvironments
"""
env1 = SubstitutionEnvironment(XXX = 'x')
env2 = SubstitutionEnvironment(XXX = 'x')
env3 = SubstitutionEnvironment(XXX = 'xxx')
env4 = SubstitutionEnvironment(XXX = 'x', YYY = 'x')
assert env1 == env2
assert env1 != env3
assert env1 != env4
def test___delitem__(self):
"""Test deleting a variable from a SubstitutionEnvironment
"""
env1 = SubstitutionEnvironment(XXX = 'x', YYY = 'y')
env2 = SubstitutionEnvironment(XXX = 'x')
del env1['YYY']
assert env1 == env2
def test___getitem__(self):
"""Test fetching a variable from a SubstitutionEnvironment
"""
env = SubstitutionEnvironment(XXX = 'x')
assert env['XXX'] == 'x', env['XXX']
def test___setitem__(self):
"""Test setting a variable in a SubstitutionEnvironment
"""
env1 = SubstitutionEnvironment(XXX = 'x')
env2 = SubstitutionEnvironment(XXX = 'x', YYY = 'y')
env1['YYY'] = 'y'
assert env1 == env2
def test_get(self):
"""Test the SubstitutionEnvironment get() method
"""
env = SubstitutionEnvironment(XXX = 'x')
assert env.get('XXX') == 'x', env.get('XXX')
assert env.get('YYY') is None, env.get('YYY')
def test_has_key(self):
"""Test the SubstitutionEnvironment has_key() method
"""
env = SubstitutionEnvironment(XXX = 'x')
assert 'XXX' in env
assert 'YYY' not in env
def test_contains(self):
"""Test the SubstitutionEnvironment __contains__() method
"""
env = SubstitutionEnvironment(XXX = 'x')
assert 'XXX' in env
assert not 'YYY' in env
def test_items(self):
"""Test the SubstitutionEnvironment items() method
"""
env = SubstitutionEnvironment(XXX = 'x', YYY = 'y')
items = list(env.items())
assert len(items) == 2 and ('XXX','x') in items and ('YYY','y') in items, items
# Was. This fails under py3 as order changes
# assert items == [('XXX','x'), ('YYY','y')], items
def test_arg2nodes(self):
"""Test the arg2nodes method
"""
env = SubstitutionEnvironment()
dict = {}
class X(SCons.Node.Node):
pass
def Factory(name, directory = None, create = 1, dict=dict, X=X):
if name not in dict:
dict[name] = X()
dict[name].name = name
return dict[name]
nodes = env.arg2nodes("Util.py UtilTests.py", Factory)
assert len(nodes) == 1, nodes
assert isinstance(nodes[0], X)
assert nodes[0].name == "Util.py UtilTests.py", nodes[0].name
nodes = env.arg2nodes(u"Util.py UtilTests.py", Factory)
assert len(nodes) == 1, nodes
assert isinstance(nodes[0], X)
assert nodes[0].name == u"Util.py UtilTests.py", nodes[0].name
nodes = env.arg2nodes(["Util.py", "UtilTests.py"], Factory)
assert len(nodes) == 2, nodes
assert isinstance(nodes[0], X)
assert isinstance(nodes[1], X)
assert nodes[0].name == "Util.py", nodes[0].name
assert nodes[1].name == "UtilTests.py", nodes[1].name
n1 = Factory("Util.py")
nodes = env.arg2nodes([n1, "UtilTests.py"], Factory)
assert len(nodes) == 2, nodes
assert isinstance(nodes[0], X)
assert isinstance(nodes[1], X)
assert nodes[0].name == "Util.py", nodes[0].name
assert nodes[1].name == "UtilTests.py", nodes[1].name
class SConsNode(SCons.Node.Node):
pass
nodes = env.arg2nodes(SConsNode())
assert len(nodes) == 1, nodes
assert isinstance(nodes[0], SConsNode), nodes[0]
class OtherNode(object):
pass
nodes = env.arg2nodes(OtherNode())
assert len(nodes) == 1, nodes
assert isinstance(nodes[0], OtherNode), nodes[0]
def lookup_a(str, F=Factory):
if str[0] == 'a':
n = F(str)
n.a = 1
return n
else:
return None
def lookup_b(str, F=Factory):
if str[0] == 'b':
n = F(str)
n.b = 1
return n
else:
return None
env_ll = SubstitutionEnvironment()
env_ll.lookup_list = [lookup_a, lookup_b]
nodes = env_ll.arg2nodes(['aaa', 'bbb', 'ccc'], Factory)
assert len(nodes) == 3, nodes
assert nodes[0].name == 'aaa', nodes[0]
assert nodes[0].a == 1, nodes[0]
assert not hasattr(nodes[0], 'b'), nodes[0]
assert nodes[1].name == 'bbb'
assert not hasattr(nodes[1], 'a'), nodes[1]
assert nodes[1].b == 1, nodes[1]
assert nodes[2].name == 'ccc'
assert not hasattr(nodes[2], 'a'), nodes[1]
assert not hasattr(nodes[2], 'b'), nodes[1]
def lookup_bbbb(str, F=Factory):
if str == 'bbbb':
n = F(str)
n.bbbb = 1
return n
else:
return None
def lookup_c(str, F=Factory):
if str[0] == 'c':
n = F(str)
n.c = 1
return n
else:
return None
nodes = env.arg2nodes(['bbbb', 'ccc'], Factory,
[lookup_c, lookup_bbbb, lookup_b])
assert len(nodes) == 2, nodes
assert nodes[0].name == 'bbbb'
assert not hasattr(nodes[0], 'a'), nodes[1]
assert not hasattr(nodes[0], 'b'), nodes[1]
assert nodes[0].bbbb == 1, nodes[1]
assert not hasattr(nodes[0], 'c'), nodes[0]
assert nodes[1].name == 'ccc'
assert not hasattr(nodes[1], 'a'), nodes[1]
assert not hasattr(nodes[1], 'b'), nodes[1]
assert not hasattr(nodes[1], 'bbbb'), nodes[0]
assert nodes[1].c == 1, nodes[1]
def test_arg2nodes_target_source(self):
"""Test the arg2nodes method with target= and source= keywords
"""
targets = [DummyNode('t1'), DummyNode('t2')]
sources = [DummyNode('s1'), DummyNode('s2')]
env = SubstitutionEnvironment()
nodes = env.arg2nodes(['${TARGET}-a',
'${SOURCE}-b',
'${TARGETS[1]}-c',
'${SOURCES[1]}-d'],
DummyNode,
target=targets,
source=sources)
names = [n.name for n in nodes]
assert names == ['t1-a', 's1-b', 't2-c', 's2-d'], names
def test_gvars(self):
"""Test the base class gvars() method"""
env = SubstitutionEnvironment()
gvars = env.gvars()
assert gvars == {}, gvars
def test_lvars(self):
"""Test the base class lvars() method"""
env = SubstitutionEnvironment()
lvars = env.lvars()
assert lvars == {}, lvars
def test_subst(self):
"""Test substituting construction variables within strings
Check various combinations, including recursive expansion
of variables into other variables.
"""
env = SubstitutionEnvironment(AAA = 'a', BBB = 'b')
mystr = env.subst("$AAA ${AAA}A $BBBB $BBB")
assert mystr == "a aA b", mystr
# Changed the tests below to reflect a bug fix in
# subst()
env = SubstitutionEnvironment(AAA = '$BBB', BBB = 'b', BBBA = 'foo')
mystr = env.subst("$AAA ${AAA}A ${AAA}B $BBB")
assert mystr == "b bA bB b", mystr
env = SubstitutionEnvironment(AAA = '$BBB', BBB = '$CCC', CCC = 'c')
mystr = env.subst("$AAA ${AAA}A ${AAA}B $BBB")
assert mystr == "c cA cB c", mystr
# Lists:
env = SubstitutionEnvironment(AAA = ['a', 'aa', 'aaa'])
mystr = env.subst("$AAA")
assert mystr == "a aa aaa", mystr
# Tuples:
env = SubstitutionEnvironment(AAA = ('a', 'aa', 'aaa'))
mystr = env.subst("$AAA")
assert mystr == "a aa aaa", mystr
t1 = DummyNode('t1')
t2 = DummyNode('t2')
s1 = DummyNode('s1')
s2 = DummyNode('s2')
env = SubstitutionEnvironment(AAA = 'aaa')
s = env.subst('$AAA $TARGET $SOURCES', target=[t1, t2], source=[s1, s2])
assert s == "aaa t1 s1 s2", s
s = env.subst('$AAA $TARGETS $SOURCE', target=[t1, t2], source=[s1, s2])
assert s == "aaa t1 t2 s1", s
# Test callables in the SubstitutionEnvironment
def foo(target, source, env, for_signature):
assert str(target) == 't', target
assert str(source) == 's', source
return env["FOO"]
env = SubstitutionEnvironment(BAR=foo, FOO='baz')
t = DummyNode('t')
s = DummyNode('s')
subst = env.subst('test $BAR', target=t, source=s)
assert subst == 'test baz', subst
# Test not calling callables in the SubstitutionEnvironment
if 0:
# This will take some serious surgery to subst() and
# subst_list(), so just leave these tests out until we can
# do that.
def bar(arg):
pass
env = SubstitutionEnvironment(BAR=bar, FOO='$BAR')
subst = env.subst('$BAR', call=None)
assert subst is bar, subst
subst = env.subst('$FOO', call=None)
assert subst is bar, subst
def test_subst_kw(self):
"""Test substituting construction variables within dictionaries"""
env = SubstitutionEnvironment(AAA = 'a', BBB = 'b')
kw = env.subst_kw({'$AAA' : 'aaa', 'bbb' : '$BBB'})
assert len(kw) == 2, kw
assert kw['a'] == 'aaa', kw['a']
assert kw['bbb'] == 'b', kw['bbb']
def test_subst_list(self):
"""Test substituting construction variables in command lists
"""
env = SubstitutionEnvironment(AAA = 'a', BBB = 'b')
l = env.subst_list("$AAA ${AAA}A $BBBB $BBB")
assert l == [["a", "aA", "b"]], l
# Changed the tests below to reflect a bug fix in
# subst()
env = SubstitutionEnvironment(AAA = '$BBB', BBB = 'b', BBBA = 'foo')
l = env.subst_list("$AAA ${AAA}A ${AAA}B $BBB")
assert l == [["b", "bA", "bB", "b"]], l
env = SubstitutionEnvironment(AAA = '$BBB', BBB = '$CCC', CCC = 'c')
l = env.subst_list("$AAA ${AAA}A ${AAA}B $BBB")
assert l == [["c", "cA", "cB", "c"]], l
env = SubstitutionEnvironment(AAA = '$BBB', BBB = '$CCC', CCC = [ 'a', 'b\nc' ])
lst = env.subst_list([ "$AAA", "B $CCC" ])
assert lst == [[ "a", "b"], ["c", "B a", "b"], ["c"]], lst
t1 = DummyNode('t1')
t2 = DummyNode('t2')
s1 = DummyNode('s1')
s2 = DummyNode('s2')
env = SubstitutionEnvironment(AAA = 'aaa')
s = env.subst_list('$AAA $TARGET $SOURCES', target=[t1, t2], source=[s1, s2])
assert s == [["aaa", "t1", "s1", "s2"]], s
s = env.subst_list('$AAA $TARGETS $SOURCE', target=[t1, t2], source=[s1, s2])
assert s == [["aaa", "t1", "t2", "s1"]], s
# Test callables in the SubstitutionEnvironment
def foo(target, source, env, for_signature):
assert str(target) == 't', target
assert str(source) == 's', source
return env["FOO"]
env = SubstitutionEnvironment(BAR=foo, FOO='baz')
t = DummyNode('t')
s = DummyNode('s')
lst = env.subst_list('test $BAR', target=t, source=s)
assert lst == [['test', 'baz']], lst
# Test not calling callables in the SubstitutionEnvironment
if 0:
# This will take some serious surgery to subst() and
# subst_list(), so just leave these tests out until we can
# do that.
def bar(arg):
pass
env = SubstitutionEnvironment(BAR=bar, FOO='$BAR')
subst = env.subst_list('$BAR', call=None)
assert subst is bar, subst
subst = env.subst_list('$FOO', call=None)
assert subst is bar, subst
def test_subst_path(self):
"""Test substituting a path list
"""
class MyProxy(object):
def __init__(self, val):
self.val = val
def get(self):
return self.val + '-proxy'
class MyNode(object):
def __init__(self, val):
self.val = val
def get_subst_proxy(self):
return self
def __str__(self):
return self.val
class MyObj(object):
def get(self):
return self
env = SubstitutionEnvironment(FOO='foo',
BAR='bar',
LIST=['one', 'two'],
PROXY=MyProxy('my1'))
r = env.subst_path('$FOO')
assert r == ['foo'], r
r = env.subst_path(['$FOO', 'xxx', '$BAR'])
assert r == ['foo', 'xxx', 'bar'], r
r = env.subst_path(['$FOO', '$LIST', '$BAR'])
assert list(map(str, r)) == ['foo', 'one two', 'bar'], r
r = env.subst_path(['$FOO', '$TARGET', '$SOURCE', '$BAR'])
assert r == ['foo', '', '', 'bar'], r
r = env.subst_path(['$FOO', '$TARGET', '$BAR'], target=MyNode('ttt'))
assert list(map(str, r)) == ['foo', 'ttt', 'bar'], r
r = env.subst_path(['$FOO', '$SOURCE', '$BAR'], source=MyNode('sss'))
assert list(map(str, r)) == ['foo', 'sss', 'bar'], r
n = MyObj()
r = env.subst_path(['$PROXY', MyProxy('my2'), n])
assert r == ['my1-proxy', 'my2-proxy', n], r
class StringableObj(object):
def __init__(self, s):
self.s = s
def __str__(self):
return self.s
env = SubstitutionEnvironment(FOO=StringableObj("foo"),
BAR=StringableObj("bar"))
r = env.subst_path([ "${FOO}/bar", "${BAR}/baz" ])
assert r == [ "foo/bar", "bar/baz" ], r
r = env.subst_path([ "bar/${FOO}", "baz/${BAR}" ])
assert r == [ "bar/foo", "baz/bar" ], r
r = env.subst_path([ "bar/${FOO}/bar", "baz/${BAR}/baz" ])
assert r == [ "bar/foo/bar", "baz/bar/baz" ], r
def test_subst_target_source(self):
"""Test the base environment subst_target_source() method"""
env = SubstitutionEnvironment(AAA = 'a', BBB = 'b')
mystr = env.subst_target_source("$AAA ${AAA}A $BBBB $BBB")
assert mystr == "a aA b", mystr
def test_backtick(self):
"""Test the backtick() method for capturing command output"""
env = SubstitutionEnvironment()
test = TestCmd.TestCmd(workdir = '')
test.write('stdout.py', """\
import sys
sys.stdout.write('this came from stdout.py\\n')
sys.exit(0)
""")
test.write('stderr.py', """\
import sys
sys.stderr.write('this came from stderr.py\\n')
sys.exit(0)
""")
test.write('fail.py', """\
import sys
sys.exit(1)
""")
test.write('echo.py', """\
import os, sys
sys.stdout.write(os.environ['ECHO'] + '\\n')
sys.exit(0)
""")
save_stderr = sys.stderr
python = '"' + sys.executable + '"'
try:
sys.stderr = io.StringIO()
cmd = '%s %s' % (python, test.workpath('stdout.py'))
output = env.backtick(cmd)
errout = sys.stderr.getvalue()
assert output == 'this came from stdout.py\n', output
assert errout == '', errout
sys.stderr = io.StringIO()
cmd = '%s %s' % (python, test.workpath('stderr.py'))
output = env.backtick(cmd)
errout = sys.stderr.getvalue()
assert output == '', output
assert errout == 'this came from stderr.py\n', errout
sys.stderr = io.StringIO()
cmd = '%s %s' % (python, test.workpath('fail.py'))
try:
env.backtick(cmd)
except OSError as e:
assert str(e) == "'%s' exited 1" % cmd, str(e)
else:
self.fail("did not catch expected OSError")
sys.stderr = io.StringIO()
cmd = '%s %s' % (python, test.workpath('echo.py'))
env['ENV'] = os.environ.copy()
env['ENV']['ECHO'] = 'this came from ECHO'
output = env.backtick(cmd)
errout = sys.stderr.getvalue()
assert output == 'this came from ECHO\n', output
assert errout == '', errout
finally:
sys.stderr = save_stderr
def test_AddMethod(self):
"""Test the AddMethod() method"""
env = SubstitutionEnvironment(FOO = 'foo')
def func(self):
return 'func-' + self['FOO']
assert not hasattr(env, 'func')
env.AddMethod(func)
r = env.func()
assert r == 'func-foo', r
assert not hasattr(env, 'bar')
env.AddMethod(func, 'bar')
r = env.bar()
assert r == 'func-foo', r
def func2(self, arg=''):
return 'func2-' + self['FOO'] + arg
env.AddMethod(func2)
r = env.func2()
assert r == 'func2-foo', r
r = env.func2('-xxx')
assert r == 'func2-foo-xxx', r
env.AddMethod(func2, 'func')
r = env.func()
assert r == 'func2-foo', r
r = env.func('-yyy')
assert r == 'func2-foo-yyy', r
# Test that clones of clones correctly re-bind added methods.
env1 = Environment(FOO = '1')
env1.AddMethod(func2)
env2 = env1.Clone(FOO = '2')
env3 = env2.Clone(FOO = '3')
env4 = env3.Clone(FOO = '4')
r = env1.func2()
assert r == 'func2-1', r
r = env2.func2()
assert r == 'func2-2', r
r = env3.func2()
assert r == 'func2-3', r
r = env4.func2()
assert r == 'func2-4', r
# Test that clones don't re-bind an attribute that the user
env1 = Environment(FOO = '1')
env1.AddMethod(func2)
def replace_func2():
return 'replace_func2'
env1.func2 = replace_func2
env2 = env1.Clone(FOO = '2')
r = env2.func2()
assert r == 'replace_func2', r
def test_Override(self):
"Test overriding construction variables"
env = SubstitutionEnvironment(ONE=1, TWO=2, THREE=3, FOUR=4)
assert env['ONE'] == 1, env['ONE']
assert env['TWO'] == 2, env['TWO']
assert env['THREE'] == 3, env['THREE']
assert env['FOUR'] == 4, env['FOUR']
env2 = env.Override({'TWO' : '10',
'THREE' :'x $THREE y',
'FOUR' : ['x', '$FOUR', 'y']})
assert env2['ONE'] == 1, env2['ONE']
assert env2['TWO'] == '10', env2['TWO']
assert env2['THREE'] == 'x 3 y', env2['THREE']
assert env2['FOUR'] == ['x', 4, 'y'], env2['FOUR']
assert env['ONE'] == 1, env['ONE']
assert env['TWO'] == 2, env['TWO']
assert env['THREE'] == 3, env['THREE']
assert env['FOUR'] == 4, env['FOUR']
env2.Replace(ONE = "won")
assert env2['ONE'] == "won", env2['ONE']
assert env['ONE'] == 1, env['ONE']
def test_ParseFlags(self):
"""Test the ParseFlags() method
"""
env = SubstitutionEnvironment()
empty = {
'ASFLAGS' : [],
'CFLAGS' : [],
'CCFLAGS' : [],
'CXXFLAGS' : [],
'CPPDEFINES' : [],
'CPPFLAGS' : [],
'CPPPATH' : [],
'FRAMEWORKPATH' : [],
'FRAMEWORKS' : [],
'LIBPATH' : [],
'LIBS' : [],
'LINKFLAGS' : [],
'RPATH' : [],
}
d = env.ParseFlags(None)
assert d == empty, d
d = env.ParseFlags('')
assert d == empty, d
d = env.ParseFlags([])
assert d == empty, d
s = "-I/usr/include/fum -I bar -X\n" + \
'-I"C:\\Program Files\\ASCEND\\include" ' + \
"-L/usr/fax -L foo -lxxx -l yyy " + \
'-L"C:\\Program Files\\ASCEND" -lascend ' + \
"-Wa,-as -Wl,-link " + \
"-Wl,-rpath=rpath1 " + \
"-Wl,-R,rpath2 " + \
"-Wl,-Rrpath3 " + \
"-Wp,-cpp " + \
"-std=c99 " + \
"-std=c++0x " + \
"-framework Carbon " + \
"-frameworkdir=fwd1 " + \
"-Ffwd2 " + \
"-F fwd3 " + \
"-dylib_file foo-dylib " + \
"-pthread " + \
"-fopenmp " + \
"-mno-cygwin -mwindows " + \
"-arch i386 -isysroot /tmp " + \
"-iquote /usr/include/foo1 " + \
"-isystem /usr/include/foo2 " + \
"-idirafter /usr/include/foo3 " + \
"+DD64 " + \
"-DFOO -DBAR=value -D BAZ "
d = env.ParseFlags(s)
assert d['ASFLAGS'] == ['-as'], d['ASFLAGS']
assert d['CFLAGS'] == ['-std=c99']
assert d['CCFLAGS'] == ['-X', '-Wa,-as',
'-pthread', '-fopenmp', '-mno-cygwin',
('-arch', 'i386'), ('-isysroot', '/tmp'),
('-iquote', '/usr/include/foo1'),
('-isystem', '/usr/include/foo2'),
('-idirafter', '/usr/include/foo3'),
'+DD64'], repr(d['CCFLAGS'])
assert d['CXXFLAGS'] == ['-std=c++0x'], repr(d['CXXFLAGS'])
assert d['CPPDEFINES'] == ['FOO', ['BAR', 'value'], 'BAZ'], d['CPPDEFINES']
assert d['CPPFLAGS'] == ['-Wp,-cpp'], d['CPPFLAGS']
assert d['CPPPATH'] == ['/usr/include/fum',
'bar',
'C:\\Program Files\\ASCEND\\include'], d['CPPPATH']
assert d['FRAMEWORKPATH'] == ['fwd1', 'fwd2', 'fwd3'], d['FRAMEWORKPATH']
assert d['FRAMEWORKS'] == ['Carbon'], d['FRAMEWORKS']
assert d['LIBPATH'] == ['/usr/fax',
'foo',
'C:\\Program Files\\ASCEND'], d['LIBPATH']
LIBS = list(map(str, d['LIBS']))
assert LIBS == ['xxx', 'yyy', 'ascend'], (d['LIBS'], LIBS)
assert d['LINKFLAGS'] == ['-Wl,-link',
'-dylib_file', 'foo-dylib',
'-pthread', '-fopenmp',
'-mno-cygwin', '-mwindows',
('-arch', 'i386'),
('-isysroot', '/tmp'),
'+DD64'], repr(d['LINKFLAGS'])
assert d['RPATH'] == ['rpath1', 'rpath2', 'rpath3'], d['RPATH']
def test_MergeFlags(self):
"""Test the MergeFlags() method
"""
env = SubstitutionEnvironment()
env.MergeFlags('')
assert 'CCFLAGS' not in env, env['CCFLAGS']
env.MergeFlags('-X')
assert env['CCFLAGS'] == ['-X'], env['CCFLAGS']
env.MergeFlags('-X')
assert env['CCFLAGS'] == ['-X'], env['CCFLAGS']
env = SubstitutionEnvironment(CCFLAGS=None)
env.MergeFlags('-Y')
assert env['CCFLAGS'] == ['-Y'], env['CCFLAGS']
env = SubstitutionEnvironment()
env.MergeFlags({'A':['aaa'], 'B':['bbb']})
assert env['A'] == ['aaa'], env['A']
assert env['B'] == ['bbb'], env['B']
class BaseTestCase(unittest.TestCase,TestEnvironmentFixture):
reserved_variables = [
'CHANGED_SOURCES',
'CHANGED_TARGETS',
'SOURCE',
'SOURCES',
'TARGET',
'TARGETS',
'UNCHANGED_SOURCES',
'UNCHANGED_TARGETS',
]
def test___init__(self):
"""Test construction Environment creation
Create two with identical arguments and check that
they compare the same.
"""
env1 = self.TestEnvironment(XXX = 'x', YYY = 'y')
env2 = self.TestEnvironment(XXX = 'x', YYY = 'y')
assert env1 == env2, diff_env(env1, env2)
assert '__env__' not in env1
assert '__env__' not in env2
def test_variables(self):
"""Test that variables only get applied once."""
class FakeOptions(object):
def __init__(self, key, val):
self.calls = 0
self.key = key
self.val = val
def keys(self):
return [self.key]
def Update(self, env):
env[self.key] = self.val
self.calls = self.calls + 1
o = FakeOptions('AAA', 'fake_opt')
env = Environment(variables=o, AAA='keyword_arg')
assert o.calls == 1, o.calls
assert env['AAA'] == 'fake_opt', env['AAA']
def test_get(self):
"""Test the get() method."""
env = self.TestEnvironment(aaa = 'AAA')
x = env.get('aaa')
assert x == 'AAA', x
x = env.get('aaa', 'XXX')
assert x == 'AAA', x
x = env.get('bbb')
assert x is None, x
x = env.get('bbb', 'XXX')
assert x == 'XXX', x
def test_Builder_calls(self):
"""Test Builder calls through different environments
"""
global called_it
b1 = Builder()
b2 = Builder()
env = Environment()
env.Replace(BUILDERS = { 'builder1' : b1,
'builder2' : b2 })
called_it = {}
env.builder1('in1')
assert called_it['target'] is None, called_it
assert called_it['source'] == ['in1'], called_it
called_it = {}
env.builder2(source = 'in2', xyzzy = 1)
assert called_it['target'] is None, called_it
assert called_it['source'] == ['in2'], called_it
assert called_it['xyzzy'] == 1, called_it
called_it = {}
env.builder1(foo = 'bar')
assert called_it['foo'] == 'bar', called_it
assert called_it['target'] is None, called_it
assert called_it['source'] is None, called_it
def test_BuilderWrapper_attributes(self):
"""Test getting and setting of BuilderWrapper attributes
"""
b1 = Builder()
b2 = Builder()
e1 = Environment()
e2 = Environment()
e1.Replace(BUILDERS = {'b' : b1})
bw = e1.b
assert bw.env is e1
bw.env = e2
assert bw.env is e2
assert bw.builder is b1
bw.builder = b2
assert bw.builder is b2
self.assertRaises(AttributeError, getattr, bw, 'foobar')
bw.foobar = 42
assert bw.foobar == 42
# This unit test is currently disabled because we don't think the
# underlying method it tests (Environment.BuilderWrapper.execute())
# is necessary, but we're leaving the code here for now in case
# that's mistaken.
def _DO_NOT_test_Builder_execs(self):
"""Test Builder execution through different environments
One environment is initialized with a single
Builder object, one with a list of a single Builder
object, and one with a list of two Builder objects.
"""
global built_it
b1 = Builder()
b2 = Builder()
built_it = {}
env3 = Environment()
env3.Replace(BUILDERS = { 'builder1' : b1,
'builder2' : b2 })
env3.builder1.execute(target = 'out1')
env3.builder2.execute(target = 'out2')
env3.builder1.execute(target = 'out3')
assert built_it['out1']
assert built_it['out2']
assert built_it['out3']
env4 = env3.Clone()
assert env4.builder1.env is env4, "builder1.env (%s) == env3 (%s)?" % (
env4.builder1.env, env3)
assert env4.builder2.env is env4, "builder2.env (%s) == env3 (%s)?" % (
env4.builder1.env, env3)
# Now test BUILDERS as a dictionary.
built_it = {}
env5 = self.TestEnvironment(BUILDERS={ 'foo' : b1 })
env5['BUILDERS']['bar'] = b2
env5.foo.execute(target='out1')
env5.bar.execute(target='out2')
assert built_it['out1']
assert built_it['out2']
built_it = {}
env6 = Environment()
env6['BUILDERS'] = { 'foo' : b1,
'bar' : b2 }
env6.foo.execute(target='out1')
env6.bar.execute(target='out2')
assert built_it['out1']
assert built_it['out2']
def test_Scanners(self):
"""Test setting SCANNERS in various ways
One environment is initialized with a single
Scanner object, one with a list of a single Scanner
object, and one with a list of two Scanner objects.
"""
global scanned_it
s1 = Scanner(name = 'scanner1', skeys = [".c", ".cc"])
s2 = Scanner(name = 'scanner2', skeys = [".m4"])
s3 = Scanner(name = 'scanner3', skeys = [".m4", ".m5"])
s4 = Scanner(name = 'scanner4', skeys = [None])
# XXX Tests for scanner execution through different environments,
# XXX if we ever want to do that some day
# scanned_it = {}
# env1 = self.TestEnvironment(SCANNERS = s1)
# env1.scanner1(filename = 'out1')
# assert scanned_it['out1']
#
# scanned_it = {}
# env2 = self.TestEnvironment(SCANNERS = [s1])
# env1.scanner1(filename = 'out1')
# assert scanned_it['out1']
#
# scanned_it = {}
# env3 = Environment()
# env3.Replace(SCANNERS = [s1])
# env3.scanner1(filename = 'out1')
# env3.scanner2(filename = 'out2')
# env3.scanner1(filename = 'out3')
# assert scanned_it['out1']
# assert scanned_it['out2']
# assert scanned_it['out3']
suffixes = [".c", ".cc", ".cxx", ".m4", ".m5"]
env = Environment()
try: del env['SCANNERS']
except KeyError: pass
s = list(map(env.get_scanner, suffixes))
assert s == [None, None, None, None, None], s
env = self.TestEnvironment(SCANNERS = [])
s = list(map(env.get_scanner, suffixes))
assert s == [None, None, None, None, None], s
env.Replace(SCANNERS = [s1])
s = list(map(env.get_scanner, suffixes))
assert s == [s1, s1, None, None, None], s
env.Append(SCANNERS = [s2])
s = list(map(env.get_scanner, suffixes))
assert s == [s1, s1, None, s2, None], s
env.AppendUnique(SCANNERS = [s3])
s = list(map(env.get_scanner, suffixes))
assert s == [s1, s1, None, s2, s3], s
env = env.Clone(SCANNERS = [s2])
s = list(map(env.get_scanner, suffixes))
assert s == [None, None, None, s2, None], s
env['SCANNERS'] = [s1]
s = list(map(env.get_scanner, suffixes))
assert s == [s1, s1, None, None, None], s
env.PrependUnique(SCANNERS = [s2, s1])
s = list(map(env.get_scanner, suffixes))
assert s == [s1, s1, None, s2, None], s
env.Prepend(SCANNERS = [s3])
s = list(map(env.get_scanner, suffixes))
assert s == [s1, s1, None, s3, s3], s
# Verify behavior of case-insensitive suffix matches on Windows.
uc_suffixes = [_.upper() for _ in suffixes]
env = Environment(SCANNERS = [s1, s2, s3],
PLATFORM = 'linux')
s = list(map(env.get_scanner, suffixes))
assert s == [s1, s1, None, s2, s3], s
s = list(map(env.get_scanner, uc_suffixes))
assert s == [None, None, None, None, None], s
env['PLATFORM'] = 'win32'
s = list(map(env.get_scanner, uc_suffixes))
assert s == [s1, s1, None, s2, s3], s
# Verify behavior for a scanner returning None (on Windows
# where we might try to perform case manipulation on None).
env.Replace(SCANNERS = [s4])
s = list(map(env.get_scanner, suffixes))
assert s == [None, None, None, None, None], s
def test_ENV(self):
"""Test setting the external ENV in Environments
"""
env = Environment()
assert 'ENV' in env.Dictionary()
env = self.TestEnvironment(ENV = { 'PATH' : '/foo:/bar' })
assert env.Dictionary('ENV')['PATH'] == '/foo:/bar'
def test_ReservedVariables(self):
"""Test warning generation when reserved variable names are set"""
reserved_variables = [
'CHANGED_SOURCES',
'CHANGED_TARGETS',
'SOURCE',
'SOURCES',
'TARGET',
'TARGETS',
'UNCHANGED_SOURCES',
'UNCHANGED_TARGETS',
]
warning = SCons.Warnings.ReservedVariableWarning
SCons.Warnings.enableWarningClass(warning)
old = SCons.Warnings.warningAsException(1)
try:
env4 = Environment()
for kw in self.reserved_variables:
exc_caught = None
try:
env4[kw] = 'xyzzy'
except warning:
exc_caught = 1
assert exc_caught, "Did not catch ReservedVariableWarning for `%s'" % kw
assert kw not in env4, "`%s' variable was incorrectly set" % kw
finally:
SCons.Warnings.warningAsException(old)
def test_FutureReservedVariables(self):
"""Test warning generation when future reserved variable names are set"""
future_reserved_variables = []
warning = SCons.Warnings.FutureReservedVariableWarning
SCons.Warnings.enableWarningClass(warning)
old = SCons.Warnings.warningAsException(1)
try:
env4 = Environment()
for kw in future_reserved_variables:
exc_caught = None
try:
env4[kw] = 'xyzzy'
except warning:
exc_caught = 1
assert exc_caught, "Did not catch FutureReservedVariableWarning for `%s'" % kw
assert kw in env4, "`%s' variable was not set" % kw
finally:
SCons.Warnings.warningAsException(old)
def test_IllegalVariables(self):
"""Test that use of illegal variables raises an exception"""
env = Environment()
def test_it(var, env=env):
exc_caught = None
try:
env[var] = 1
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch UserError for '%s'" % var
env['aaa'] = 1
assert env['aaa'] == 1, env['aaa']
test_it('foo/bar')
test_it('foo.bar')
test_it('foo-bar')
def test_autogenerate(self):
"""Test autogenerating variables in a dictionary."""
drive, p = os.path.splitdrive(os.getcwd())
def normalize_path(path, drive=drive):
if path[0] in '\\/':
path = drive + path
path = os.path.normpath(path)
drive, path = os.path.splitdrive(path)
return drive.lower() + path
env = self.TestEnvironment(LIBS = [ 'foo', 'bar', 'baz' ],
LIBLINKPREFIX = 'foo',
LIBLINKSUFFIX = 'bar')
def RDirs(pathlist, fs=env.fs):
return fs.Dir('xx').Rfindalldirs(pathlist)
env['RDirs'] = RDirs
flags = env.subst_list('$_LIBFLAGS', 1)[0]
assert flags == ['foobar', 'foobar', 'foobazbar'], flags
blat = env.fs.Dir('blat')
env.Replace(CPPPATH = [ 'foo', '$FOO/bar', blat ],
INCPREFIX = 'foo ',
INCSUFFIX = 'bar',
FOO = 'baz')
flags = env.subst_list('$_CPPINCFLAGS', 1)[0]
expect = [ '$(',
normalize_path('foo'),
normalize_path('xx/foobar'),
normalize_path('foo'),
normalize_path('xx/baz/bar'),
normalize_path('foo'),
normalize_path('blatbar'),
'$)',
]
assert flags == expect, flags
env.Replace(F77PATH = [ 'foo', '$FOO/bar', blat ],
INCPREFIX = 'foo ',
INCSUFFIX = 'bar',
FOO = 'baz')
flags = env.subst_list('$_F77INCFLAGS', 1)[0]
expect = [ '$(',
normalize_path('foo'),
normalize_path('xx/foobar'),
normalize_path('foo'),
normalize_path('xx/baz/bar'),
normalize_path('foo'),
normalize_path('blatbar'),
'$)',
]
assert flags == expect, flags
env.Replace(CPPPATH = '', F77PATH = '', LIBPATH = '')
l = env.subst_list('$_CPPINCFLAGS')
assert l == [[]], l
l = env.subst_list('$_F77INCFLAGS')
assert l == [[]], l
l = env.subst_list('$_LIBDIRFLAGS')
assert l == [[]], l
env.fs.Repository('/rep1')
env.fs.Repository('/rep2')
env.Replace(CPPPATH = [ 'foo', '/__a__/b', '$FOO/bar', blat],
INCPREFIX = '-I ',
INCSUFFIX = 'XXX',
FOO = 'baz')
flags = env.subst_list('$_CPPINCFLAGS', 1)[0]
expect = [ '$(',
'-I', normalize_path('xx/fooXXX'),
'-I', normalize_path('/rep1/xx/fooXXX'),
'-I', normalize_path('/rep2/xx/fooXXX'),
'-I', normalize_path('/__a__/bXXX'),
'-I', normalize_path('xx/baz/barXXX'),
'-I', normalize_path('/rep1/xx/baz/barXXX'),
'-I', normalize_path('/rep2/xx/baz/barXXX'),
'-I', normalize_path('blatXXX'),
'$)'
]
def normalize_if_path(arg, np=normalize_path):
if arg not in ('$(','$)','-I'):
return np(str(arg))
return arg
flags = list(map(normalize_if_path, flags))
assert flags == expect, flags
def test_platform(self):
"""Test specifying a platform callable when instantiating."""
class platform(object):
def __str__(self): return "TestPlatform"
def __call__(self, env): env['XYZZY'] = 777
def tool(env):
env['SET_TOOL'] = 'initialized'
assert env['PLATFORM'] == "TestPlatform"
env = self.TestEnvironment(platform = platform(), tools = [tool])
assert env['XYZZY'] == 777, env
assert env['PLATFORM'] == "TestPlatform"
assert env['SET_TOOL'] == "initialized"
def test_Default_PLATFORM(self):
"""Test overriding the default PLATFORM variable"""
class platform(object):
def __str__(self): return "DefaultTestPlatform"
def __call__(self, env): env['XYZZY'] = 888
def tool(env):
env['SET_TOOL'] = 'abcde'
assert env['PLATFORM'] == "DefaultTestPlatform"
import SCons.Defaults
save = SCons.Defaults.ConstructionEnvironment.copy()
try:
import SCons.Defaults
SCons.Defaults.ConstructionEnvironment.update({
'PLATFORM' : platform(),
})
env = self.TestEnvironment(tools = [tool])
assert env['XYZZY'] == 888, env
assert env['PLATFORM'] == "DefaultTestPlatform"
assert env['SET_TOOL'] == "abcde"
finally:
SCons.Defaults.ConstructionEnvironment = save
def test_tools(self):
"""Test specifying a tool callable when instantiating."""
def t1(env):
env['TOOL1'] = 111
def t2(env):
env['TOOL2'] = 222
def t3(env):
env['AAA'] = env['XYZ']
def t4(env):
env['TOOL4'] = 444
env = self.TestEnvironment(tools = [t1, t2, t3], XYZ = 'aaa')
assert env['TOOL1'] == 111, env['TOOL1']
assert env['TOOL2'] == 222, env
assert env['AAA'] == 'aaa', env
t4(env)
assert env['TOOL4'] == 444, env
test = TestCmd.TestCmd(workdir = '')
test.write('faketool.py', """\
def generate(env, **kw):
for k, v in kw.items():
env[k] = v
def exists(env):
return 1
""")
env = self.TestEnvironment(tools = [('faketool', {'a':1, 'b':2, 'c':3})],
toolpath = [test.workpath('')])
assert env['a'] == 1, env['a']
assert env['b'] == 2, env['b']
assert env['c'] == 3, env['c']
def test_Default_TOOLS(self):
"""Test overriding the default TOOLS variable"""
def t5(env):
env['TOOL5'] = 555
def t6(env):
env['TOOL6'] = 666
def t7(env):
env['BBB'] = env['XYZ']
def t8(env):
env['TOOL8'] = 888
import SCons.Defaults
save = SCons.Defaults.ConstructionEnvironment.copy()
try:
SCons.Defaults.ConstructionEnvironment.update({
'TOOLS' : [t5, t6, t7],
})
env = Environment(XYZ = 'bbb')
assert env['TOOL5'] == 555, env['TOOL5']
assert env['TOOL6'] == 666, env
assert env['BBB'] == 'bbb', env
t8(env)
assert env['TOOL8'] == 888, env
finally:
SCons.Defaults.ConstructionEnvironment = save
def test_null_tools(self):
"""Test specifying a tool of None is OK."""
def t1(env):
env['TOOL1'] = 111
def t2(env):
env['TOOL2'] = 222
env = self.TestEnvironment(tools = [t1, None, t2], XYZ = 'aaa')
assert env['TOOL1'] == 111, env['TOOL1']
assert env['TOOL2'] == 222, env
assert env['XYZ'] == 'aaa', env
env = self.TestEnvironment(tools = [None], XYZ = 'xyz')
assert env['XYZ'] == 'xyz', env
env = self.TestEnvironment(tools = [t1, '', t2], XYZ = 'ddd')
assert env['TOOL1'] == 111, env['TOOL1']
assert env['TOOL2'] == 222, env
assert env['XYZ'] == 'ddd', env
def test_concat(self):
"Test _concat()"
e1 = self.TestEnvironment(PRE='pre', SUF='suf', STR='a b', LIST=['a', 'b'])
s = e1.subst
x = s("${_concat('', '', '', __env__)}")
assert x == '', x
x = s("${_concat('', [], '', __env__)}")
assert x == '', x
x = s("${_concat(PRE, '', SUF, __env__)}")
assert x == '', x
x = s("${_concat(PRE, STR, SUF, __env__)}")
assert x == 'prea bsuf', x
x = s("${_concat(PRE, LIST, SUF, __env__)}")
assert x == 'preasuf prebsuf', x
def test_concat_nested(self):
"Test _concat() on a nested substitution strings."
e = self.TestEnvironment(PRE='pre', SUF='suf',
L1=['a', 'b'],
L2=['c', 'd'],
L3=['$L2'])
x = e.subst('$( ${_concat(PRE, L1, SUF, __env__)} $)')
assert x == 'preasuf prebsuf', x
e.AppendUnique(L1 = ['$L2'])
x = e.subst('$( ${_concat(PRE, L1, SUF, __env__)} $)')
assert x == 'preasuf prebsuf precsuf predsuf', x
e.AppendUnique(L1 = ['$L3'])
x = e.subst('$( ${_concat(PRE, L1, SUF, __env__)} $)')
assert x == 'preasuf prebsuf precsuf predsuf precsuf predsuf', x
def test_gvars(self):
"""Test the Environment gvars() method"""
env = self.TestEnvironment(XXX = 'x', YYY = 'y', ZZZ = 'z')
gvars = env.gvars()
assert gvars['XXX'] == 'x', gvars['XXX']
assert gvars['YYY'] == 'y', gvars['YYY']
assert gvars['ZZZ'] == 'z', gvars['ZZZ']
def test__update(self):
"""Test the _update() method"""
env = self.TestEnvironment(X = 'x', Y = 'y', Z = 'z')
assert env['X'] == 'x', env['X']
assert env['Y'] == 'y', env['Y']
assert env['Z'] == 'z', env['Z']
env._update({'X' : 'xxx',
'TARGET' : 't',
'TARGETS' : 'ttt',
'SOURCE' : 's',
'SOURCES' : 'sss',
'Z' : 'zzz'})
assert env['X'] == 'xxx', env['X']
assert env['Y'] == 'y', env['Y']
assert env['Z'] == 'zzz', env['Z']
assert env['TARGET'] == 't', env['TARGET']
assert env['TARGETS'] == 'ttt', env['TARGETS']
assert env['SOURCE'] == 's', env['SOURCE']
assert env['SOURCES'] == 'sss', env['SOURCES']
def test_Append(self):
"""Test appending to construction variables in an Environment
"""
b1 = Environment()['BUILDERS']
b2 = Environment()['BUILDERS']
assert b1 == b2, diff_dict(b1, b2)
cases = [
'a1', 'A1', 'a1A1',
'a2', ['A2'], ['a2', 'A2'],
'a3', UL(['A3']), UL(['a', '3', 'A3']),
'a4', '', 'a4',
'a5', [], ['a5'],
'a6', UL([]), UL(['a', '6']),
'a7', [''], ['a7', ''],
'a8', UL(['']), UL(['a', '8', '']),
['e1'], 'E1', ['e1', 'E1'],
['e2'], ['E2'], ['e2', 'E2'],
['e3'], UL(['E3']), UL(['e3', 'E3']),
['e4'], '', ['e4'],
['e5'], [], ['e5'],
['e6'], UL([]), UL(['e6']),
['e7'], [''], ['e7', ''],
['e8'], UL(['']), UL(['e8', '']),
UL(['i1']), 'I1', UL(['i1', 'I', '1']),
UL(['i2']), ['I2'], UL(['i2', 'I2']),
UL(['i3']), UL(['I3']), UL(['i3', 'I3']),
UL(['i4']), '', UL(['i4']),
UL(['i5']), [], UL(['i5']),
UL(['i6']), UL([]), UL(['i6']),
UL(['i7']), [''], UL(['i7', '']),
UL(['i8']), UL(['']), UL(['i8', '']),
{'d1':1}, 'D1', {'d1':1, 'D1':None},
{'d2':1}, ['D2'], {'d2':1, 'D2':None},
{'d3':1}, UL(['D3']), {'d3':1, 'D3':None},
{'d4':1}, {'D4':1}, {'d4':1, 'D4':1},
{'d5':1}, UD({'D5':1}), UD({'d5':1, 'D5':1}),
UD({'u1':1}), 'U1', UD({'u1':1, 'U1':None}),
UD({'u2':1}), ['U2'], UD({'u2':1, 'U2':None}),
UD({'u3':1}), UL(['U3']), UD({'u3':1, 'U3':None}),
UD({'u4':1}), {'U4':1}, UD({'u4':1, 'U4':1}),
UD({'u5':1}), UD({'U5':1}), UD({'u5':1, 'U5':1}),
'', 'M1', 'M1',
'', ['M2'], ['M2'],
'', UL(['M3']), UL(['M3']),
'', '', '',
'', [], [],
'', UL([]), UL([]),
'', [''], [''],
'', UL(['']), UL(['']),
[], 'N1', ['N1'],
[], ['N2'], ['N2'],
[], UL(['N3']), UL(['N3']),
[], '', [],
[], [], [],
[], UL([]), UL([]),
[], [''], [''],
[], UL(['']), UL(['']),
UL([]), 'O1', ['O', '1'],
UL([]), ['O2'], ['O2'],
UL([]), UL(['O3']), UL(['O3']),
UL([]), '', UL([]),
UL([]), [], UL([]),
UL([]), UL([]), UL([]),
UL([]), [''], UL(['']),
UL([]), UL(['']), UL(['']),
[''], 'P1', ['', 'P1'],
[''], ['P2'], ['', 'P2'],
[''], UL(['P3']), UL(['', 'P3']),
[''], '', [''],
[''], [], [''],
[''], UL([]), UL(['']),
[''], [''], ['', ''],
[''], UL(['']), UL(['', '']),
UL(['']), 'Q1', ['', 'Q', '1'],
UL(['']), ['Q2'], ['', 'Q2'],
UL(['']), UL(['Q3']), UL(['', 'Q3']),
UL(['']), '', UL(['']),
UL(['']), [], UL(['']),
UL(['']), UL([]), UL(['']),
UL(['']), [''], UL(['', '']),
UL(['']), UL(['']), UL(['', '']),
]
env = Environment()
failed = 0
while cases:
input, append, expect = cases[:3]
env['XXX'] = copy.copy(input)
try:
env.Append(XXX = append)
except Exception as e:
if failed == 0: print()
print(" %s Append %s exception: %s" % \
(repr(input), repr(append), e))
failed = failed + 1
else:
result = env['XXX']
if result != expect:
if failed == 0: print()
print(" %s Append %s => %s did not match %s" % \
(repr(input), repr(append), repr(result), repr(expect)))
failed = failed + 1
del cases[:3]
assert failed == 0, "%d Append() cases failed" % failed
env['UL'] = UL(['foo'])
env.Append(UL = 'bar')
result = env['UL']
assert isinstance(result, UL), repr(result)
assert result == ['foo', 'b', 'a', 'r'], result
env['CLVar'] = CLVar(['foo'])
env.Append(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['foo', 'bar'], result
class C(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __eq__(self, other):
raise Exception("should not compare")
ccc = C('ccc')
env2 = self.TestEnvironment(CCC1 = ['c1'], CCC2 = ccc)
env2.Append(CCC1 = ccc, CCC2 = ['c2'])
assert env2['CCC1'][0] == 'c1', env2['CCC1']
assert env2['CCC1'][1] is ccc, env2['CCC1']
assert env2['CCC2'][0] is ccc, env2['CCC2']
assert env2['CCC2'][1] == 'c2', env2['CCC2']
env3 = self.TestEnvironment(X = {'x1' : 7})
env3.Append(X = {'x1' : 8, 'x2' : 9}, Y = {'y1' : 10})
assert env3['X'] == {'x1': 8, 'x2': 9}, env3['X']
assert env3['Y'] == {'y1': 10}, env3['Y']
z1 = Builder()
z2 = Builder()
env4 = self.TestEnvironment(BUILDERS = {'z1' : z1})
env4.Append(BUILDERS = {'z2' : z2})
assert env4['BUILDERS'] == {'z1' : z1, 'z2' : z2}, env4['BUILDERS']
assert hasattr(env4, 'z1')
assert hasattr(env4, 'z2')
def test_AppendENVPath(self):
"""Test appending to an ENV path."""
env1 = self.TestEnvironment(ENV = {'PATH': r'C:\dir\num\one;C:\dir\num\two'},
MYENV = {'MYPATH': r'C:\mydir\num\one;C:\mydir\num\two'})
# have to include the pathsep here so that the test will work on UNIX too.
env1.AppendENVPath('PATH',r'C:\dir\num\two', sep = ';')
env1.AppendENVPath('PATH',r'C:\dir\num\three', sep = ';')
env1.AppendENVPath('MYPATH',r'C:\mydir\num\three','MYENV', sep = ';')
env1.AppendENVPath('MYPATH',r'C:\mydir\num\one','MYENV', sep = ';', delete_existing=1)
# this should do nothing since delete_existing is 0
env1.AppendENVPath('MYPATH',r'C:\mydir\num\three','MYENV', sep = ';')
assert(env1['ENV']['PATH'] == r'C:\dir\num\one;C:\dir\num\two;C:\dir\num\three')
assert(env1['MYENV']['MYPATH'] == r'C:\mydir\num\two;C:\mydir\num\three;C:\mydir\num\one')
test = TestCmd.TestCmd(workdir = '')
test.subdir('sub1', 'sub2')
p=env1['ENV']['PATH']
env1.AppendENVPath('PATH','#sub1', sep = ';')
env1.AppendENVPath('PATH',env1.fs.Dir('sub2'), sep = ';')
assert env1['ENV']['PATH'] == p + ';sub1;sub2', env1['ENV']['PATH']
def test_AppendUnique(self):
"""Test appending to unique values to construction variables
This strips values that are already present when lists are
involved."""
env = self.TestEnvironment(AAA1 = 'a1',
AAA2 = 'a2',
AAA3 = 'a3',
AAA4 = 'a4',
AAA5 = 'a5',
BBB1 = ['b1'],
BBB2 = ['b2'],
BBB3 = ['b3'],
BBB4 = ['b4'],
BBB5 = ['b5'],
CCC1 = '',
CCC2 = '',
DDD1 = ['a', 'b', 'c'])
env['LL1'] = [env.Literal('a literal'), env.Literal('b literal')]
env['LL2'] = [env.Literal('c literal'), env.Literal('b literal')]
env.AppendUnique(AAA1 = 'a1',
AAA2 = ['a2'],
AAA3 = ['a3', 'b', 'c', 'c', 'b', 'a3'], # ignore dups
AAA4 = 'a4.new',
AAA5 = ['a5.new'],
BBB1 = 'b1',
BBB2 = ['b2'],
BBB3 = ['b3', 'c', 'd', 'c', 'b3'],
BBB4 = 'b4.new',
BBB5 = ['b5.new'],
CCC1 = 'c1',
CCC2 = ['c2'],
DDD1 = 'b',
LL1 = env.Literal('a literal'),
LL2 = env.Literal('a literal'))
assert env['AAA1'] == 'a1a1', env['AAA1']
assert env['AAA2'] == ['a2'], env['AAA2']
assert env['AAA3'] == ['a3', 'b', 'c'], env['AAA3']
assert env['AAA4'] == 'a4a4.new', env['AAA4']
assert env['AAA5'] == ['a5', 'a5.new'], env['AAA5']
assert env['BBB1'] == ['b1'], env['BBB1']
assert env['BBB2'] == ['b2'], env['BBB2']
assert env['BBB3'] == ['b3', 'c', 'd'], env['BBB3']
assert env['BBB4'] == ['b4', 'b4.new'], env['BBB4']
assert env['BBB5'] == ['b5', 'b5.new'], env['BBB5']
assert env['CCC1'] == 'c1', env['CCC1']
assert env['CCC2'] == ['c2'], env['CCC2']
assert env['DDD1'] == ['a', 'b', 'c'], env['DDD1']
assert env['LL1'] == [env.Literal('a literal'), env.Literal('b literal')], env['LL1']
assert env['LL2'] == [env.Literal('c literal'), env.Literal('b literal'), env.Literal('a literal')], [str(x) for x in env['LL2']]
env.AppendUnique(DDD1 = 'b', delete_existing=1)
assert env['DDD1'] == ['a', 'c', 'b'], env['DDD1'] # b moves to end
env.AppendUnique(DDD1 = ['a','b'], delete_existing=1)
assert env['DDD1'] == ['c', 'a', 'b'], env['DDD1'] # a & b move to end
env.AppendUnique(DDD1 = ['e','f', 'e'], delete_existing=1)
assert env['DDD1'] == ['c', 'a', 'b', 'f', 'e'], env['DDD1'] # add last
env['CLVar'] = CLVar([])
env.AppendUnique(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['bar'], result
env['CLVar'] = CLVar(['abc'])
env.AppendUnique(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['abc', 'bar'], result
env['CLVar'] = CLVar(['bar'])
env.AppendUnique(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['bar'], result
def test_Clone(self):
"""Test construction environment copying
Update the copy independently afterwards and check that
the original remains intact (that is, no dangling
references point to objects in the copied environment).
Clone the original with some construction variable
updates and check that the original remains intact
and the copy has the updated values.
"""
env1 = self.TestEnvironment(XXX = 'x', YYY = 'y')
env2 = env1.Clone()
env1copy = env1.Clone()
assert env1copy == env1copy
assert env2 == env2
env2.Replace(YYY = 'yyy')
assert env2 == env2
assert env1 != env2
assert env1 == env1copy
env3 = env1.Clone(XXX = 'x3', ZZZ = 'z3')
assert env3 == env3
assert env3.Dictionary('XXX') == 'x3'
assert env3.Dictionary('YYY') == 'y'
assert env3.Dictionary('ZZZ') == 'z3'
assert env1 == env1copy
# Ensure that lists and dictionaries are
# deep copied, but not instances.
class TestA(object):
pass
env1 = self.TestEnvironment(XXX=TestA(), YYY = [ 1, 2, 3 ],
ZZZ = { 1:2, 3:4 })
env2=env1.Clone()
env2.Dictionary('YYY').append(4)
env2.Dictionary('ZZZ')[5] = 6
assert env1.Dictionary('XXX') is env2.Dictionary('XXX')
assert 4 in env2.Dictionary('YYY')
assert not 4 in env1.Dictionary('YYY')
assert 5 in env2.Dictionary('ZZZ')
assert 5 not in env1.Dictionary('ZZZ')
#
env1 = self.TestEnvironment(BUILDERS = {'b1' : Builder()})
assert hasattr(env1, 'b1'), "env1.b1 was not set"
assert env1.b1.object == env1, "b1.object doesn't point to env1"
env2 = env1.Clone(BUILDERS = {'b2' : Builder()})
assert env2 is env2
assert env2 == env2
assert hasattr(env1, 'b1'), "b1 was mistakenly cleared from env1"
assert env1.b1.object == env1, "b1.object was changed"
assert not hasattr(env2, 'b1'), "b1 was not cleared from env2"
assert hasattr(env2, 'b2'), "env2.b2 was not set"
assert env2.b2.object == env2, "b2.object doesn't point to env2"
# Ensure that specifying new tools in a copied environment
# works.
def foo(env): env['FOO'] = 1
def bar(env): env['BAR'] = 2
def baz(env): env['BAZ'] = 3
env1 = self.TestEnvironment(tools=[foo])
env2 = env1.Clone()
env3 = env1.Clone(tools=[bar, baz])
assert env1.get('FOO') == 1
assert env1.get('BAR') is None
assert env1.get('BAZ') is None
assert env2.get('FOO') == 1
assert env2.get('BAR') is None
assert env2.get('BAZ') is None
assert env3.get('FOO') == 1
assert env3.get('BAR') == 2
assert env3.get('BAZ') == 3
# Ensure that recursive variable substitution when copying
# environments works properly.
env1 = self.TestEnvironment(CCFLAGS = '-DFOO', XYZ = '-DXYZ')
env2 = env1.Clone(CCFLAGS = '$CCFLAGS -DBAR',
XYZ = ['-DABC', 'x $XYZ y', '-DDEF'])
x = env2.get('CCFLAGS')
assert x == '-DFOO -DBAR', x
x = env2.get('XYZ')
assert x == ['-DABC', 'x -DXYZ y', '-DDEF'], x
# Ensure that special properties of a class don't get
# lost on copying.
env1 = self.TestEnvironment(FLAGS = CLVar('flag1 flag2'))
x = env1.get('FLAGS')
assert x == ['flag1', 'flag2'], x
env2 = env1.Clone()
env2.Append(FLAGS = 'flag3 flag4')
x = env2.get('FLAGS')
assert x == ['flag1', 'flag2', 'flag3', 'flag4'], x
x = env1.get('FLAGS')
assert x == ['flag1', 'flag2'], x
# Ensure that appending directly to a copied CLVar
# doesn't modify the original.
env1 = self.TestEnvironment(FLAGS = CLVar('flag1 flag2'))
x = env1.get('FLAGS')
assert x == ['flag1', 'flag2'], x
env2 = env1.Clone()
env2['FLAGS'] += ['flag3', 'flag4']
x = env2.get('FLAGS')
assert x == ['flag1', 'flag2', 'flag3', 'flag4'], x
x = env1.get('FLAGS')
assert x == ['flag1', 'flag2'], x
# Test that the environment stores the toolpath and
# re-uses it for copies.
test = TestCmd.TestCmd(workdir = '')
test.write('xxx.py', """\
def exists(env):
1
def generate(env):
env['XXX'] = 'one'
""")
test.write('yyy.py', """\
def exists(env):
1
def generate(env):
env['YYY'] = 'two'
""")
env = self.TestEnvironment(tools=['xxx'], toolpath=[test.workpath('')])
assert env['XXX'] == 'one', env['XXX']
env = env.Clone(tools=['yyy'])
assert env['YYY'] == 'two', env['YYY']
# Test that
real_value = [4]
def my_tool(env, rv=real_value):
assert env['KEY_THAT_I_WANT'] == rv[0]
env['KEY_THAT_I_WANT'] = rv[0] + 1
env = self.TestEnvironment()
real_value[0] = 5
env = env.Clone(KEY_THAT_I_WANT=5, tools=[my_tool])
assert env['KEY_THAT_I_WANT'] == real_value[0], env['KEY_THAT_I_WANT']
real_value[0] = 6
env = env.Clone(KEY_THAT_I_WANT=6, tools=[my_tool])
assert env['KEY_THAT_I_WANT'] == real_value[0], env['KEY_THAT_I_WANT']
# test for pull request #150
env = self.TestEnvironment()
env._dict.pop('BUILDERS')
assert ('BUILDERS' in env) is False
env2 = env.Clone()
def test_Copy(self):
"""Test copying using the old env.Copy() method"""
env1 = self.TestEnvironment(XXX = 'x', YYY = 'y')
env2 = env1.Copy()
env1copy = env1.Copy()
assert env1copy == env1copy
assert env2 == env2
env2.Replace(YYY = 'yyy')
assert env2 == env2
assert env1 != env2
assert env1 == env1copy
def test_Detect(self):
"""Test Detect()ing tools"""
test = TestCmd.TestCmd(workdir = '')
test.subdir('sub1', 'sub2')
sub1 = test.workpath('sub1')
sub2 = test.workpath('sub2')
if sys.platform == 'win32':
test.write(['sub1', 'xxx'], "sub1/xxx\n")
test.write(['sub2', 'xxx'], "sub2/xxx\n")
env = self.TestEnvironment(ENV = { 'PATH' : [sub1, sub2] })
x = env.Detect('xxx.exe')
assert x is None, x
test.write(['sub2', 'xxx.exe'], "sub2/xxx.exe\n")
env = self.TestEnvironment(ENV = { 'PATH' : [sub1, sub2] })
x = env.Detect('xxx.exe')
assert x == 'xxx.exe', x
test.write(['sub1', 'xxx.exe'], "sub1/xxx.exe\n")
x = env.Detect('xxx.exe')
assert x == 'xxx.exe', x
else:
test.write(['sub1', 'xxx.exe'], "sub1/xxx.exe\n")
test.write(['sub2', 'xxx.exe'], "sub2/xxx.exe\n")
env = self.TestEnvironment(ENV = { 'PATH' : [sub1, sub2] })
x = env.Detect('xxx.exe')
assert x is None, x
sub2_xxx_exe = test.workpath('sub2', 'xxx.exe')
os.chmod(sub2_xxx_exe, 0o755)
env = self.TestEnvironment(ENV = { 'PATH' : [sub1, sub2] })
x = env.Detect('xxx.exe')
assert x == 'xxx.exe', x
sub1_xxx_exe = test.workpath('sub1', 'xxx.exe')
os.chmod(sub1_xxx_exe, 0o755)
x = env.Detect('xxx.exe')
assert x == 'xxx.exe', x
env = self.TestEnvironment(ENV = { 'PATH' : [] })
x = env.Detect('xxx.exe')
assert x is None, x
def test_Dictionary(self):
"""Test retrieval of known construction variables
Fetch them from the Dictionary and check for well-known
defaults that get inserted.
"""
env = self.TestEnvironment(XXX = 'x', YYY = 'y', ZZZ = 'z')
assert env.Dictionary('XXX') == 'x'
assert env.Dictionary('YYY') == 'y'
assert env.Dictionary('XXX', 'ZZZ') == ['x', 'z']
xxx, zzz = env.Dictionary('XXX', 'ZZZ')
assert xxx == 'x'
assert zzz == 'z'
assert 'BUILDERS' in env.Dictionary()
assert 'CC' in env.Dictionary()
assert 'CCFLAGS' in env.Dictionary()
assert 'ENV' in env.Dictionary()
assert env['XXX'] == 'x'
env['XXX'] = 'foo'
assert env.Dictionary('XXX') == 'foo'
del env['XXX']
assert 'XXX' not in env.Dictionary()
def test_FindIxes(self):
"Test FindIxes()"
env = self.TestEnvironment(LIBPREFIX='lib',
LIBSUFFIX='.a',
SHLIBPREFIX='lib',
SHLIBSUFFIX='.so',
PREFIX='pre',
SUFFIX='post')
paths = [os.path.join('dir', 'libfoo.a'),
os.path.join('dir', 'libfoo.so')]
assert paths[0] == env.FindIxes(paths, 'LIBPREFIX', 'LIBSUFFIX')
assert paths[1] == env.FindIxes(paths, 'SHLIBPREFIX', 'SHLIBSUFFIX')
assert None is env.FindIxes(paths, 'PREFIX', 'POST')
paths = ['libfoo.a', 'prefoopost']
assert paths[0] == env.FindIxes(paths, 'LIBPREFIX', 'LIBSUFFIX')
assert None is env.FindIxes(paths, 'SHLIBPREFIX', 'SHLIBSUFFIX')
assert paths[1] == env.FindIxes(paths, 'PREFIX', 'SUFFIX')
def test_ParseConfig(self):
"""Test the ParseConfig() method"""
env = self.TestEnvironment(COMMAND='command',
ASFLAGS='assembler',
CCFLAGS=[''],
CPPDEFINES=[],
CPPFLAGS=[''],
CPPPATH='string',
FRAMEWORKPATH=[],
FRAMEWORKS=[],
LIBPATH=['list'],
LIBS='',
LINKFLAGS=[''],
RPATH=[])
orig_backtick = env.backtick
class my_backtick(object):
def __init__(self, save_command, output):
self.save_command = save_command
self.output = output
def __call__(self, command):
self.save_command.append(command)
return self.output
try:
save_command = []
env.backtick = my_backtick(save_command,
"-I/usr/include/fum -I bar -X\n" + \
"-L/usr/fax -L foo -lxxx -l yyy " + \
"-Wa,-as -Wl,-link " + \
"-Wl,-rpath=rpath1 " + \
"-Wl,-R,rpath2 " + \
"-Wl,-Rrpath3 " + \
"-Wp,-cpp abc " + \
"-framework Carbon " + \
"-frameworkdir=fwd1 " + \
"-Ffwd2 " + \
"-F fwd3 " + \
"-pthread " + \
"-mno-cygwin -mwindows " + \
"-arch i386 -isysroot /tmp " + \
"-iquote /usr/include/foo1 " + \
"-isystem /usr/include/foo2 " + \
"-idirafter /usr/include/foo3 " + \
"+DD64 " + \
"-DFOO -DBAR=value")
env.ParseConfig("fake $COMMAND")
assert save_command == ['fake command'], save_command
assert env['ASFLAGS'] == ['assembler', '-as'], env['ASFLAGS']
assert env['CCFLAGS'] == ['', '-X', '-Wa,-as',
'-pthread', '-mno-cygwin',
('-arch', 'i386'), ('-isysroot', '/tmp'),
('-iquote', '/usr/include/foo1'),
('-isystem', '/usr/include/foo2'),
('-idirafter', '/usr/include/foo3'),
'+DD64'], env['CCFLAGS']
assert env['CPPDEFINES'] == ['FOO', ['BAR', 'value']], env['CPPDEFINES']
assert env['CPPFLAGS'] == ['', '-Wp,-cpp'], env['CPPFLAGS']
assert env['CPPPATH'] == ['string', '/usr/include/fum', 'bar'], env['CPPPATH']
assert env['FRAMEWORKPATH'] == ['fwd1', 'fwd2', 'fwd3'], env['FRAMEWORKPATH']
assert env['FRAMEWORKS'] == ['Carbon'], env['FRAMEWORKS']
assert env['LIBPATH'] == ['list', '/usr/fax', 'foo'], env['LIBPATH']
assert env['LIBS'] == ['xxx', 'yyy', env.File('abc')], env['LIBS']
assert env['LINKFLAGS'] == ['', '-Wl,-link', '-pthread',
'-mno-cygwin', '-mwindows',
('-arch', 'i386'),
('-isysroot', '/tmp'),
'+DD64'], env['LINKFLAGS']
assert env['RPATH'] == ['rpath1', 'rpath2', 'rpath3'], env['RPATH']
env.backtick = my_backtick([], "-Ibar")
env.ParseConfig("fake2")
assert env['CPPPATH'] == ['string', '/usr/include/fum', 'bar'], env['CPPPATH']
env.ParseConfig("fake2", unique=0)
assert env['CPPPATH'] == ['string', '/usr/include/fum', 'bar', 'bar'], env['CPPPATH']
finally:
env.backtick = orig_backtick
def test_ParseDepends(self):
"""Test the ParseDepends() method"""
test = TestCmd.TestCmd(workdir = '')
test.write('single', """
#file: dependency
f0: \
d1 \
d2 \
d3 \
""")
test.write('multiple', """
f1: foo
f2 f3: bar
f4: abc def
#file: dependency
f5: \
ghi \
jkl \
mno \
""")
env = self.TestEnvironment(SINGLE = test.workpath('single'))
tlist = []
dlist = []
def my_depends(target, dependency, tlist=tlist, dlist=dlist):
tlist.extend(target)
dlist.extend(dependency)
env.Depends = my_depends
env.ParseDepends(test.workpath('does_not_exist'))
exc_caught = None
try:
env.ParseDepends(test.workpath('does_not_exist'), must_exist=1)
except IOError:
exc_caught = 1
assert exc_caught, "did not catch expected IOError"
del tlist[:]
del dlist[:]
env.ParseDepends('$SINGLE', only_one=1)
t = list(map(str, tlist))
d = list(map(str, dlist))
assert t == ['f0'], t
assert d == ['d1', 'd2', 'd3'], d
del tlist[:]
del dlist[:]
env.ParseDepends(test.workpath('multiple'))
t = list(map(str, tlist))
d = list(map(str, dlist))
assert t == ['f1', 'f2', 'f3', 'f4', 'f5'], t
assert d == ['foo', 'bar', 'abc', 'def', 'ghi', 'jkl', 'mno'], d
exc_caught = None
try:
env.ParseDepends(test.workpath('multiple'), only_one=1)
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch expected UserError"
def test_Platform(self):
"""Test the Platform() method"""
env = self.TestEnvironment(WIN32='win32', NONE='no-such-platform')
exc_caught = None
try:
env.Platform('does_not_exist')
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch expected UserError"
exc_caught = None
try:
env.Platform('$NONE')
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch expected UserError"
env.Platform('posix')
assert env['OBJSUFFIX'] == '.o', env['OBJSUFFIX']
env.Platform('$WIN32')
assert env['OBJSUFFIX'] == '.obj', env['OBJSUFFIX']
def test_Prepend(self):
"""Test prepending to construction variables in an Environment
"""
cases = [
'a1', 'A1', 'A1a1',
'a2', ['A2'], ['A2', 'a2'],
'a3', UL(['A3']), UL(['A3', 'a', '3']),
'a4', '', 'a4',
'a5', [], ['a5'],
'a6', UL([]), UL(['a', '6']),
'a7', [''], ['', 'a7'],
'a8', UL(['']), UL(['', 'a', '8']),
['e1'], 'E1', ['E1', 'e1'],
['e2'], ['E2'], ['E2', 'e2'],
['e3'], UL(['E3']), UL(['E3', 'e3']),
['e4'], '', ['e4'],
['e5'], [], ['e5'],
['e6'], UL([]), UL(['e6']),
['e7'], [''], ['', 'e7'],
['e8'], UL(['']), UL(['', 'e8']),
UL(['i1']), 'I1', UL(['I', '1', 'i1']),
UL(['i2']), ['I2'], UL(['I2', 'i2']),
UL(['i3']), UL(['I3']), UL(['I3', 'i3']),
UL(['i4']), '', UL(['i4']),
UL(['i5']), [], UL(['i5']),
UL(['i6']), UL([]), UL(['i6']),
UL(['i7']), [''], UL(['', 'i7']),
UL(['i8']), UL(['']), UL(['', 'i8']),
{'d1':1}, 'D1', {'d1':1, 'D1':None},
{'d2':1}, ['D2'], {'d2':1, 'D2':None},
{'d3':1}, UL(['D3']), {'d3':1, 'D3':None},
{'d4':1}, {'D4':1}, {'d4':1, 'D4':1},
{'d5':1}, UD({'D5':1}), UD({'d5':1, 'D5':1}),
UD({'u1':1}), 'U1', UD({'u1':1, 'U1':None}),
UD({'u2':1}), ['U2'], UD({'u2':1, 'U2':None}),
UD({'u3':1}), UL(['U3']), UD({'u3':1, 'U3':None}),
UD({'u4':1}), {'U4':1}, UD({'u4':1, 'U4':1}),
UD({'u5':1}), UD({'U5':1}), UD({'u5':1, 'U5':1}),
'', 'M1', 'M1',
'', ['M2'], ['M2'],
'', UL(['M3']), UL(['M3']),
'', '', '',
'', [], [],
'', UL([]), UL([]),
'', [''], [''],
'', UL(['']), UL(['']),
[], 'N1', ['N1'],
[], ['N2'], ['N2'],
[], UL(['N3']), UL(['N3']),
[], '', [],
[], [], [],
[], UL([]), UL([]),
[], [''], [''],
[], UL(['']), UL(['']),
UL([]), 'O1', UL(['O', '1']),
UL([]), ['O2'], UL(['O2']),
UL([]), UL(['O3']), UL(['O3']),
UL([]), '', UL([]),
UL([]), [], UL([]),
UL([]), UL([]), UL([]),
UL([]), [''], UL(['']),
UL([]), UL(['']), UL(['']),
[''], 'P1', ['P1', ''],
[''], ['P2'], ['P2', ''],
[''], UL(['P3']), UL(['P3', '']),
[''], '', [''],
[''], [], [''],
[''], UL([]), UL(['']),
[''], [''], ['', ''],
[''], UL(['']), UL(['', '']),
UL(['']), 'Q1', UL(['Q', '1', '']),
UL(['']), ['Q2'], UL(['Q2', '']),
UL(['']), UL(['Q3']), UL(['Q3', '']),
UL(['']), '', UL(['']),
UL(['']), [], UL(['']),
UL(['']), UL([]), UL(['']),
UL(['']), [''], UL(['', '']),
UL(['']), UL(['']), UL(['', '']),
]
env = Environment()
failed = 0
while cases:
input, prepend, expect = cases[:3]
env['XXX'] = copy.copy(input)
try:
env.Prepend(XXX = prepend)
except Exception as e:
if failed == 0: print()
print(" %s Prepend %s exception: %s" % \
(repr(input), repr(prepend), e))
failed = failed + 1
else:
result = env['XXX']
if result != expect:
if failed == 0: print()
print(" %s Prepend %s => %s did not match %s" % \
(repr(input), repr(prepend), repr(result), repr(expect)))
failed = failed + 1
del cases[:3]
assert failed == 0, "%d Prepend() cases failed" % failed
env['UL'] = UL(['foo'])
env.Prepend(UL = 'bar')
result = env['UL']
assert isinstance(result, UL), repr(result)
assert result == ['b', 'a', 'r', 'foo'], result
env['CLVar'] = CLVar(['foo'])
env.Prepend(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['bar', 'foo'], result
env3 = self.TestEnvironment(X = {'x1' : 7})
env3.Prepend(X = {'x1' : 8, 'x2' : 9}, Y = {'y1' : 10})
assert env3['X'] == {'x1': 8, 'x2' : 9}, env3['X']
assert env3['Y'] == {'y1': 10}, env3['Y']
z1 = Builder()
z2 = Builder()
env4 = self.TestEnvironment(BUILDERS = {'z1' : z1})
env4.Prepend(BUILDERS = {'z2' : z2})
assert env4['BUILDERS'] == {'z1' : z1, 'z2' : z2}, env4['BUILDERS']
assert hasattr(env4, 'z1')
assert hasattr(env4, 'z2')
def test_PrependENVPath(self):
"""Test prepending to an ENV path."""
env1 = self.TestEnvironment(ENV = {'PATH': r'C:\dir\num\one;C:\dir\num\two'},
MYENV = {'MYPATH': r'C:\mydir\num\one;C:\mydir\num\two'})
# have to include the pathsep here so that the test will work on UNIX too.
env1.PrependENVPath('PATH',r'C:\dir\num\two',sep = ';')
env1.PrependENVPath('PATH',r'C:\dir\num\three',sep = ';')
env1.PrependENVPath('MYPATH',r'C:\mydir\num\three','MYENV',sep = ';')
env1.PrependENVPath('MYPATH',r'C:\mydir\num\one','MYENV',sep = ';')
# this should do nothing since delete_existing is 0
env1.PrependENVPath('MYPATH',r'C:\mydir\num\three','MYENV', sep = ';', delete_existing=0)
assert(env1['ENV']['PATH'] == r'C:\dir\num\three;C:\dir\num\two;C:\dir\num\one')
assert(env1['MYENV']['MYPATH'] == r'C:\mydir\num\one;C:\mydir\num\three;C:\mydir\num\two')
test = TestCmd.TestCmd(workdir = '')
test.subdir('sub1', 'sub2')
p=env1['ENV']['PATH']
env1.PrependENVPath('PATH','#sub1', sep = ';')
env1.PrependENVPath('PATH',env1.fs.Dir('sub2'), sep = ';')
assert env1['ENV']['PATH'] == 'sub2;sub1;' + p, env1['ENV']['PATH']
def test_PrependUnique(self):
"""Test prepending unique values to construction variables
This strips values that are already present when lists are
involved."""
env = self.TestEnvironment(AAA1 = 'a1',
AAA2 = 'a2',
AAA3 = 'a3',
AAA4 = 'a4',
AAA5 = 'a5',
BBB1 = ['b1'],
BBB2 = ['b2'],
BBB3 = ['b3'],
BBB4 = ['b4'],
BBB5 = ['b5'],
CCC1 = '',
CCC2 = '',
DDD1 = ['a', 'b', 'c'])
env.PrependUnique(AAA1 = 'a1',
AAA2 = ['a2'],
AAA3 = ['a3', 'b', 'c', 'b', 'a3'], # ignore dups
AAA4 = 'a4.new',
AAA5 = ['a5.new'],
BBB1 = 'b1',
BBB2 = ['b2'],
BBB3 = ['b3', 'b', 'c', 'b3'],
BBB4 = 'b4.new',
BBB5 = ['b5.new'],
CCC1 = 'c1',
CCC2 = ['c2'],
DDD1 = 'b')
assert env['AAA1'] == 'a1a1', env['AAA1']
assert env['AAA2'] == ['a2'], env['AAA2']
assert env['AAA3'] == ['c', 'b', 'a3'], env['AAA3']
assert env['AAA4'] == 'a4.newa4', env['AAA4']
assert env['AAA5'] == ['a5.new', 'a5'], env['AAA5']
assert env['BBB1'] == ['b1'], env['BBB1']
assert env['BBB2'] == ['b2'], env['BBB2']
assert env['BBB3'] == ['b', 'c', 'b3'], env['BBB3']
assert env['BBB4'] == ['b4.new', 'b4'], env['BBB4']
assert env['BBB5'] == ['b5.new', 'b5'], env['BBB5']
assert env['CCC1'] == 'c1', env['CCC1']
assert env['CCC2'] == ['c2'], env['CCC2']
assert env['DDD1'] == ['a', 'b', 'c'], env['DDD1']
env.PrependUnique(DDD1 = 'b', delete_existing=1)
assert env['DDD1'] == ['b', 'a', 'c'], env['DDD1'] # b moves to front
env.PrependUnique(DDD1 = ['a','c'], delete_existing=1)
assert env['DDD1'] == ['a', 'c', 'b'], env['DDD1'] # a & c move to front
env.PrependUnique(DDD1 = ['d','e','d'], delete_existing=1)
assert env['DDD1'] == ['d', 'e', 'a', 'c', 'b'], env['DDD1']
env['CLVar'] = CLVar([])
env.PrependUnique(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['bar'], result
env['CLVar'] = CLVar(['abc'])
env.PrependUnique(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['bar', 'abc'], result
env['CLVar'] = CLVar(['bar'])
env.PrependUnique(CLVar = 'bar')
result = env['CLVar']
assert isinstance(result, CLVar), repr(result)
assert result == ['bar'], result
def test_Replace(self):
"""Test replacing construction variables in an Environment
After creation of the Environment, of course.
"""
env1 = self.TestEnvironment(AAA = 'a', BBB = 'b')
env1.Replace(BBB = 'bbb', CCC = 'ccc')
env2 = self.TestEnvironment(AAA = 'a', BBB = 'bbb', CCC = 'ccc')
assert env1 == env2, diff_env(env1, env2)
b1 = Builder()
b2 = Builder()
env3 = self.TestEnvironment(BUILDERS = {'b1' : b1})
assert hasattr(env3, 'b1'), "b1 was not set"
env3.Replace(BUILDERS = {'b2' : b2})
assert not hasattr(env3, 'b1'), "b1 was not cleared"
assert hasattr(env3, 'b2'), "b2 was not set"
def test_ReplaceIxes(self):
"Test ReplaceIxes()"
env = self.TestEnvironment(LIBPREFIX='lib',
LIBSUFFIX='.a',
SHLIBPREFIX='lib',
SHLIBSUFFIX='.so',
PREFIX='pre',
SUFFIX='post')
assert 'libfoo.a' == env.ReplaceIxes('libfoo.so',
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX')
assert os.path.join('dir', 'libfoo.a') == env.ReplaceIxes(os.path.join('dir', 'libfoo.so'),
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX')
assert 'libfoo.a' == env.ReplaceIxes('prefoopost',
'PREFIX', 'SUFFIX',
'LIBPREFIX', 'LIBSUFFIX')
def test_SetDefault(self):
"""Test the SetDefault method"""
env = self.TestEnvironment(tools = [])
env.SetDefault(V1 = 1)
env.SetDefault(V1 = 2)
assert env['V1'] == 1
env['V2'] = 2
env.SetDefault(V2 = 1)
assert env['V2'] == 2
def test_Tool(self):
"""Test the Tool() method"""
env = self.TestEnvironment(LINK='link', NONE='no-such-tool')
exc_caught = None
try:
env.Tool('does_not_exist')
except SCons.Errors.SConsEnvironmentError:
exc_caught = 1
assert exc_caught, "did not catch expected SConsEnvironmentError"
exc_caught = None
try:
env.Tool('$NONE')
except SCons.Errors.SConsEnvironmentError:
exc_caught = 1
assert exc_caught, "did not catch expected SConsEnvironmentError"
# Use a non-existent toolpath directory just to make sure we
# can call Tool() with the keyword argument.
env.Tool('cc', toolpath=['/no/such/directory'])
assert env['CC'] == 'cc', env['CC']
env.Tool('$LINK')
assert env['LINK'] == '$SMARTLINK', env['LINK']
# Test that the environment stores the toolpath and
# re-uses it for later calls.
test = TestCmd.TestCmd(workdir = '')
test.write('xxx.py', """\
def exists(env):
1
def generate(env):
env['XXX'] = 'one'
""")
test.write('yyy.py', """\
def exists(env):
1
def generate(env):
env['YYY'] = 'two'
""")
env = self.TestEnvironment(tools=['xxx'], toolpath=[test.workpath('')])
assert env['XXX'] == 'one', env['XXX']
env.Tool('yyy')
assert env['YYY'] == 'two', env['YYY']
def test_WhereIs(self):
"""Test the WhereIs() method"""
test = TestCmd.TestCmd(workdir = '')
sub1_xxx_exe = test.workpath('sub1', 'xxx.exe')
sub2_xxx_exe = test.workpath('sub2', 'xxx.exe')
sub3_xxx_exe = test.workpath('sub3', 'xxx.exe')
sub4_xxx_exe = test.workpath('sub4', 'xxx.exe')
test.subdir('subdir', 'sub1', 'sub2', 'sub3', 'sub4')
if sys.platform != 'win32':
test.write(sub1_xxx_exe, "\n")
os.mkdir(sub2_xxx_exe)
test.write(sub3_xxx_exe, "\n")
os.chmod(sub3_xxx_exe, 0o777)
test.write(sub4_xxx_exe, "\n")
os.chmod(sub4_xxx_exe, 0o777)
env_path = os.environ['PATH']
pathdirs_1234 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub3'),
test.workpath('sub4'),
] + env_path.split(os.pathsep)
pathdirs_1243 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub4'),
test.workpath('sub3'),
] + env_path.split(os.pathsep)
path = os.pathsep.join(pathdirs_1234)
env = self.TestEnvironment(ENV = {'PATH' : path})
wi = env.WhereIs('xxx.exe')
assert wi == test.workpath(sub3_xxx_exe), wi
wi = env.WhereIs('xxx.exe', pathdirs_1243)
assert wi == test.workpath(sub4_xxx_exe), wi
wi = env.WhereIs('xxx.exe', os.pathsep.join(pathdirs_1243))
assert wi == test.workpath(sub4_xxx_exe), wi
wi = env.WhereIs('xxx.exe', reject = sub3_xxx_exe)
assert wi == test.workpath(sub4_xxx_exe), wi
wi = env.WhereIs('xxx.exe', pathdirs_1243, reject = sub3_xxx_exe)
assert wi == test.workpath(sub4_xxx_exe), wi
path = os.pathsep.join(pathdirs_1243)
env = self.TestEnvironment(ENV = {'PATH' : path})
wi = env.WhereIs('xxx.exe')
assert wi == test.workpath(sub4_xxx_exe), wi
wi = env.WhereIs('xxx.exe', pathdirs_1234)
assert wi == test.workpath(sub3_xxx_exe), wi
wi = env.WhereIs('xxx.exe', os.pathsep.join(pathdirs_1234))
assert wi == test.workpath(sub3_xxx_exe), wi
if sys.platform == 'win32':
wi = env.WhereIs('xxx', pathext = '')
assert wi is None, wi
wi = env.WhereIs('xxx', pathext = '.exe')
assert wi == test.workpath(sub4_xxx_exe), wi
wi = env.WhereIs('xxx', path = pathdirs_1234, pathext = '.BAT;.EXE')
assert wi.lower() == test.workpath(sub3_xxx_exe).lower(), wi
# Test that we return a normalized path even when
# the path contains forward slashes.
forward_slash = test.workpath('') + '/sub3'
wi = env.WhereIs('xxx', path = forward_slash, pathext = '.EXE')
assert wi.lower() == test.workpath(sub3_xxx_exe).lower(), wi
def test_Action(self):
"""Test the Action() method"""
import SCons.Action
env = self.TestEnvironment(FOO = 'xyzzy')
a = env.Action('foo')
assert a, a
assert a.__class__ is SCons.Action.CommandAction, a.__class__
a = env.Action('$FOO')
assert a, a
assert a.__class__ is SCons.Action.CommandAction, a.__class__
a = env.Action('$$FOO')
assert a, a
assert a.__class__ is SCons.Action.LazyAction, a.__class__
a = env.Action(['$FOO', 'foo'])
assert a, a
assert a.__class__ is SCons.Action.ListAction, a.__class__
def func(arg):
pass
a = env.Action(func)
assert a, a
assert a.__class__ is SCons.Action.FunctionAction, a.__class__
def test_AddPostAction(self):
"""Test the AddPostAction() method"""
env = self.TestEnvironment(FOO='fff', BAR='bbb')
n = env.AddPostAction('$FOO', lambda x: x)
assert str(n[0]) == 'fff', n[0]
n = env.AddPostAction(['ggg', '$BAR'], lambda x: x)
assert str(n[0]) == 'ggg', n[0]
assert str(n[1]) == 'bbb', n[1]
def test_AddPreAction(self):
"""Test the AddPreAction() method"""
env = self.TestEnvironment(FOO='fff', BAR='bbb')
n = env.AddPreAction('$FOO', lambda x: x)
assert str(n[0]) == 'fff', n[0]
n = env.AddPreAction(['ggg', '$BAR'], lambda x: x)
assert str(n[0]) == 'ggg', n[0]
assert str(n[1]) == 'bbb', n[1]
def test_Alias(self):
"""Test the Alias() method"""
env = self.TestEnvironment(FOO='kkk', BAR='lll', EA='export_alias')
tgt = env.Alias('new_alias')[0]
assert str(tgt) == 'new_alias', tgt
assert tgt.sources == [], tgt.sources
assert not hasattr(tgt, 'builder'), tgt.builder
tgt = env.Alias('None_alias', None)[0]
assert str(tgt) == 'None_alias', tgt
assert tgt.sources == [], tgt.sources
tgt = env.Alias('empty_list', [])[0]
assert str(tgt) == 'empty_list', tgt
assert tgt.sources == [], tgt.sources
tgt = env.Alias('export_alias', [ 'asrc1', '$FOO' ])[0]
assert str(tgt) == 'export_alias', tgt
assert len(tgt.sources) == 2, list(map(str, tgt.sources))
assert str(tgt.sources[0]) == 'asrc1', list(map(str, tgt.sources))
assert str(tgt.sources[1]) == 'kkk', list(map(str, tgt.sources))
n = env.Alias(tgt, source = ['$BAR', 'asrc4'])[0]
assert n is tgt, n
assert len(tgt.sources) == 4, list(map(str, tgt.sources))
assert str(tgt.sources[2]) == 'lll', list(map(str, tgt.sources))
assert str(tgt.sources[3]) == 'asrc4', list(map(str, tgt.sources))
n = env.Alias('$EA', 'asrc5')[0]
assert n is tgt, n
assert len(tgt.sources) == 5, list(map(str, tgt.sources))
assert str(tgt.sources[4]) == 'asrc5', list(map(str, tgt.sources))
t1, t2 = env.Alias(['t1', 't2'], ['asrc6', 'asrc7'])
assert str(t1) == 't1', t1
assert str(t2) == 't2', t2
assert len(t1.sources) == 2, list(map(str, t1.sources))
assert str(t1.sources[0]) == 'asrc6', list(map(str, t1.sources))
assert str(t1.sources[1]) == 'asrc7', list(map(str, t1.sources))
assert len(t2.sources) == 2, list(map(str, t2.sources))
assert str(t2.sources[0]) == 'asrc6', list(map(str, t2.sources))
assert str(t2.sources[1]) == 'asrc7', list(map(str, t2.sources))
tgt = env.Alias('add', 's1')
tgt = env.Alias('add', 's2')[0]
s = list(map(str, tgt.sources))
assert s == ['s1', 's2'], s
tgt = env.Alias(tgt, 's3')[0]
s = list(map(str, tgt.sources))
assert s == ['s1', 's2', 's3'], s
tgt = env.Alias('act', None, "action1")[0]
s = str(tgt.builder.action)
assert s == "action1", s
tgt = env.Alias('act', None, "action2")[0]
s = str(tgt.builder.action)
assert s == "action1\naction2", s
tgt = env.Alias(tgt, None, "action3")[0]
s = str(tgt.builder.action)
assert s == "action1\naction2\naction3", s
def test_AlwaysBuild(self):
"""Test the AlwaysBuild() method"""
env = self.TestEnvironment(FOO='fff', BAR='bbb')
t = env.AlwaysBuild('a', 'b$FOO', ['c', 'd'], '$BAR',
env.fs.Dir('dir'), env.fs.File('file'))
assert t[0].__class__.__name__ == 'Entry'
assert t[0].get_internal_path() == 'a'
assert t[0].always_build
assert t[1].__class__.__name__ == 'Entry'
assert t[1].get_internal_path() == 'bfff'
assert t[1].always_build
assert t[2].__class__.__name__ == 'Entry'
assert t[2].get_internal_path() == 'c'
assert t[2].always_build
assert t[3].__class__.__name__ == 'Entry'
assert t[3].get_internal_path() == 'd'
assert t[3].always_build
assert t[4].__class__.__name__ == 'Entry'
assert t[4].get_internal_path() == 'bbb'
assert t[4].always_build
assert t[5].__class__.__name__ == 'Dir'
assert t[5].get_internal_path() == 'dir'
assert t[5].always_build
assert t[6].__class__.__name__ == 'File'
assert t[6].get_internal_path() == 'file'
assert t[6].always_build
def test_VariantDir(self):
"""Test the VariantDir() method"""
class MyFS(object):
def Dir(self, name):
return name
def VariantDir(self, variant_dir, src_dir, duplicate):
self.variant_dir = variant_dir
self.src_dir = src_dir
self.duplicate = duplicate
env = self.TestEnvironment(FOO = 'fff', BAR = 'bbb')
env.fs = MyFS()
env.VariantDir('build', 'src')
assert env.fs.variant_dir == 'build', env.fs.variant_dir
assert env.fs.src_dir == 'src', env.fs.src_dir
assert env.fs.duplicate == 1, env.fs.duplicate
env.VariantDir('build${FOO}', '${BAR}src', 0)
assert env.fs.variant_dir == 'buildfff', env.fs.variant_dir
assert env.fs.src_dir == 'bbbsrc', env.fs.src_dir
assert env.fs.duplicate == 0, env.fs.duplicate
def test_Builder(self):
"""Test the Builder() method"""
env = self.TestEnvironment(FOO = 'xyzzy')
b = env.Builder(action = 'foo')
assert b is not None, b
b = env.Builder(action = '$FOO')
assert b is not None, b
b = env.Builder(action = ['$FOO', 'foo'])
assert b is not None, b
def func(arg):
pass
b = env.Builder(action = func)
assert b is not None, b
b = env.Builder(generator = func)
assert b is not None, b
def test_CacheDir(self):
"""Test the CacheDir() method"""
env = self.TestEnvironment(CD = 'CacheDir')
env.CacheDir('foo')
assert env._CacheDir_path == 'foo', env._CacheDir_path
env.CacheDir('$CD')
assert env._CacheDir_path == 'CacheDir', env._CacheDir_path
def test_Clean(self):
"""Test the Clean() method"""
env = self.TestEnvironment(FOO = 'fff', BAR = 'bbb')
CT = SCons.Environment.CleanTargets
foo = env.arg2nodes('foo')[0]
fff = env.arg2nodes('fff')[0]
t = env.Clean('foo', 'aaa')
l = list(map(str, CT[foo]))
assert l == ['aaa'], l
t = env.Clean(foo, ['$BAR', 'ccc'])
l = list(map(str, CT[foo]))
assert l == ['aaa', 'bbb', 'ccc'], l
eee = env.arg2nodes('eee')[0]
t = env.Clean('$FOO', 'ddd')
l = list(map(str, CT[fff]))
assert l == ['ddd'], l
t = env.Clean(fff, [eee, 'fff'])
l = list(map(str, CT[fff]))
assert l == ['ddd', 'eee', 'fff'], l
def test_Command(self):
"""Test the Command() method."""
env = Environment()
t = env.Command(target='foo.out', source=['foo1.in', 'foo2.in'],
action='buildfoo $target $source')[0]
assert t.builder is not None
assert t.builder.action.__class__.__name__ == 'CommandAction'
assert t.builder.action.cmd_list == 'buildfoo $target $source'
assert 'foo1.in' in [x.get_internal_path() for x in t.sources]
assert 'foo2.in' in [x.get_internal_path() for x in t.sources]
sub = env.fs.Dir('sub')
t = env.Command(target='bar.out', source='sub',
action='buildbar $target $source')[0]
assert 'sub' in [x.get_internal_path() for x in t.sources]
def testFunc(env, target, source):
assert str(target[0]) == 'foo.out'
assert 'foo1.in' in list(map(str, source)) and 'foo2.in' in list(map(str, source)), list(map(str, source))
return 0
t = env.Command(target='foo.out', source=['foo1.in','foo2.in'],
action=testFunc)[0]
assert t.builder is not None
assert t.builder.action.__class__.__name__ == 'FunctionAction'
t.build()
assert 'foo1.in' in [x.get_internal_path() for x in t.sources]
assert 'foo2.in' in [x.get_internal_path() for x in t.sources]
x = []
def test2(baz, x=x):
x.append(baz)
env = self.TestEnvironment(TEST2 = test2)
t = env.Command(target='baz.out', source='baz.in',
action='${TEST2(XYZ)}',
XYZ='magic word')[0]
assert t.builder is not None
t.build()
assert x[0] == 'magic word', x
t = env.Command(target='${X}.out', source='${X}.in',
action = 'foo',
X = 'xxx')[0]
assert str(t) == 'xxx.out', str(t)
assert 'xxx.in' in [x.get_internal_path() for x in t.sources]
env = self.TestEnvironment(source_scanner = 'should_not_find_this')
t = env.Command(target='file.out', source='file.in',
action = 'foo',
source_scanner = 'fake')[0]
assert t.builder.source_scanner == 'fake', t.builder.source_scanner
def test_Configure(self):
"""Test the Configure() method"""
# Configure() will write to a local temporary file.
test = TestCmd.TestCmd(workdir = '')
save = os.getcwd()
try:
os.chdir(test.workpath())
env = self.TestEnvironment(FOO = 'xyzzy')
def func(arg):
pass
c = env.Configure()
assert c is not None, c
c.Finish()
c = env.Configure(custom_tests = {'foo' : func, '$FOO' : func})
assert c is not None, c
assert hasattr(c, 'foo')
assert hasattr(c, 'xyzzy')
c.Finish()
finally:
os.chdir(save)
def test_Depends(self):
"""Test the explicit Depends method."""
env = self.TestEnvironment(FOO = 'xxx', BAR='yyy')
env.Dir('dir1')
env.Dir('dir2')
env.File('xxx.py')
env.File('yyy.py')
t = env.Depends(target='EnvironmentTest.py',
dependency='Environment.py')[0]
assert t.__class__.__name__ == 'Entry', t.__class__.__name__
assert t.get_internal_path() == 'EnvironmentTest.py'
assert len(t.depends) == 1
d = t.depends[0]
assert d.__class__.__name__ == 'Entry', d.__class__.__name__
assert d.get_internal_path() == 'Environment.py'
t = env.Depends(target='${FOO}.py', dependency='${BAR}.py')[0]
assert t.__class__.__name__ == 'File', t.__class__.__name__
assert t.get_internal_path() == 'xxx.py'
assert len(t.depends) == 1
d = t.depends[0]
assert d.__class__.__name__ == 'File', d.__class__.__name__
assert d.get_internal_path() == 'yyy.py'
t = env.Depends(target='dir1', dependency='dir2')[0]
assert t.__class__.__name__ == 'Dir', t.__class__.__name__
assert t.get_internal_path() == 'dir1'
assert len(t.depends) == 1
d = t.depends[0]
assert d.__class__.__name__ == 'Dir', d.__class__.__name__
assert d.get_internal_path() == 'dir2'
def test_Dir(self):
"""Test the Dir() method"""
class MyFS(object):
def Dir(self, name):
return 'Dir(%s)' % name
env = self.TestEnvironment(FOO = 'foodir', BAR = 'bardir')
env.fs = MyFS()
d = env.Dir('d')
assert d == 'Dir(d)', d
d = env.Dir('$FOO')
assert d == 'Dir(foodir)', d
d = env.Dir('${BAR}_$BAR')
assert d == 'Dir(bardir_bardir)', d
d = env.Dir(['dir1'])
assert d == ['Dir(dir1)'], d
d = env.Dir(['dir1', 'dir2'])
assert d == ['Dir(dir1)', 'Dir(dir2)'], d
def test_NoClean(self):
"""Test the NoClean() method"""
env = self.TestEnvironment(FOO='ggg', BAR='hhh')
env.Dir('p_hhhb')
env.File('p_d')
t = env.NoClean('p_a', 'p_${BAR}b', ['p_c', 'p_d'], 'p_$FOO')
assert t[0].__class__.__name__ == 'Entry', t[0].__class__.__name__
assert t[0].get_internal_path() == 'p_a'
assert t[0].noclean
assert t[1].__class__.__name__ == 'Dir', t[1].__class__.__name__
assert t[1].get_internal_path() == 'p_hhhb'
assert t[1].noclean
assert t[2].__class__.__name__ == 'Entry', t[2].__class__.__name__
assert t[2].get_internal_path() == 'p_c'
assert t[2].noclean
assert t[3].__class__.__name__ == 'File', t[3].__class__.__name__
assert t[3].get_internal_path() == 'p_d'
assert t[3].noclean
assert t[4].__class__.__name__ == 'Entry', t[4].__class__.__name__
assert t[4].get_internal_path() == 'p_ggg'
assert t[4].noclean
def test_Dump(self):
"""Test the Dump() method"""
env = self.TestEnvironment(FOO = 'foo')
assert env.Dump('FOO') == "'foo'", env.Dump('FOO')
assert len(env.Dump()) > 200, env.Dump() # no args version
def test_Environment(self):
"""Test the Environment() method"""
env = self.TestEnvironment(FOO = 'xxx', BAR = 'yyy')
e2 = env.Environment(X = '$FOO', Y = '$BAR')
assert e2['X'] == 'xxx', e2['X']
assert e2['Y'] == 'yyy', e2['Y']
def test_Execute(self):
"""Test the Execute() method"""
class MyAction(object):
def __init__(self, *args, **kw):
self.args = args
def __call__(self, target, source, env):
return "%s executed" % self.args
env = Environment()
env.Action = MyAction
result = env.Execute("foo")
assert result == "foo executed", result
def test_Entry(self):
"""Test the Entry() method"""
class MyFS(object):
def Entry(self, name):
return 'Entry(%s)' % name
env = self.TestEnvironment(FOO = 'fooentry', BAR = 'barentry')
env.fs = MyFS()
e = env.Entry('e')
assert e == 'Entry(e)', e
e = env.Entry('$FOO')
assert e == 'Entry(fooentry)', e
e = env.Entry('${BAR}_$BAR')
assert e == 'Entry(barentry_barentry)', e
e = env.Entry(['entry1'])
assert e == ['Entry(entry1)'], e
e = env.Entry(['entry1', 'entry2'])
assert e == ['Entry(entry1)', 'Entry(entry2)'], e
def test_File(self):
"""Test the File() method"""
class MyFS(object):
def File(self, name):
return 'File(%s)' % name
env = self.TestEnvironment(FOO = 'foofile', BAR = 'barfile')
env.fs = MyFS()
f = env.File('f')
assert f == 'File(f)', f
f = env.File('$FOO')
assert f == 'File(foofile)', f
f = env.File('${BAR}_$BAR')
assert f == 'File(barfile_barfile)', f
f = env.File(['file1'])
assert f == ['File(file1)'], f
f = env.File(['file1', 'file2'])
assert f == ['File(file1)', 'File(file2)'], f
def test_FindFile(self):
"""Test the FindFile() method"""
env = self.TestEnvironment(FOO = 'fff', BAR = 'bbb')
r = env.FindFile('foo', ['no_such_directory'])
assert r is None, r
# XXX
def test_Flatten(self):
"""Test the Flatten() method"""
env = Environment()
l = env.Flatten([1])
assert l == [1]
l = env.Flatten([1, [2, [3, [4]]]])
assert l == [1, 2, 3, 4], l
def test_GetBuildPath(self):
"""Test the GetBuildPath() method."""
env = self.TestEnvironment(MAGIC = 'xyzzy')
p = env.GetBuildPath('foo')
assert p == 'foo', p
p = env.GetBuildPath('$MAGIC')
assert p == 'xyzzy', p
def test_Ignore(self):
"""Test the explicit Ignore method."""
env = self.TestEnvironment(FOO='yyy', BAR='zzz')
env.Dir('dir1')
env.Dir('dir2')
env.File('yyyzzz')
env.File('zzzyyy')
t = env.Ignore(target='targ.py', dependency='dep.py')[0]
assert t.__class__.__name__ == 'Entry', t.__class__.__name__
assert t.get_internal_path() == 'targ.py'
assert len(t.ignore) == 1
i = t.ignore[0]
assert i.__class__.__name__ == 'Entry', i.__class__.__name__
assert i.get_internal_path() == 'dep.py'
t = env.Ignore(target='$FOO$BAR', dependency='$BAR$FOO')[0]
assert t.__class__.__name__ == 'File', t.__class__.__name__
assert t.get_internal_path() == 'yyyzzz'
assert len(t.ignore) == 1
i = t.ignore[0]
assert i.__class__.__name__ == 'File', i.__class__.__name__
assert i.get_internal_path() == 'zzzyyy'
t = env.Ignore(target='dir1', dependency='dir2')[0]
assert t.__class__.__name__ == 'Dir', t.__class__.__name__
assert t.get_internal_path() == 'dir1'
assert len(t.ignore) == 1
i = t.ignore[0]
assert i.__class__.__name__ == 'Dir', i.__class__.__name__
assert i.get_internal_path() == 'dir2'
def test_Literal(self):
"""Test the Literal() method"""
env = self.TestEnvironment(FOO='fff', BAR='bbb')
list = env.subst_list([env.Literal('$FOO'), '$BAR'])[0]
assert list == ['$FOO', 'bbb'], list
list = env.subst_list(['$FOO', env.Literal('$BAR')])[0]
assert list == ['fff', '$BAR'], list
def test_Local(self):
"""Test the Local() method."""
env = self.TestEnvironment(FOO='lll')
l = env.Local(env.fs.File('fff'))
assert str(l[0]) == 'fff', l[0]
l = env.Local('ggg', '$FOO')
assert str(l[0]) == 'ggg', l[0]
assert str(l[1]) == 'lll', l[1]
def test_Precious(self):
"""Test the Precious() method"""
env = self.TestEnvironment(FOO='ggg', BAR='hhh')
env.Dir('p_hhhb')
env.File('p_d')
t = env.Precious('p_a', 'p_${BAR}b', ['p_c', 'p_d'], 'p_$FOO')
assert t[0].__class__.__name__ == 'Entry', t[0].__class__.__name__
assert t[0].get_internal_path() == 'p_a'
assert t[0].precious
assert t[1].__class__.__name__ == 'Dir', t[1].__class__.__name__
assert t[1].get_internal_path() == 'p_hhhb'
assert t[1].precious
assert t[2].__class__.__name__ == 'Entry', t[2].__class__.__name__
assert t[2].get_internal_path() == 'p_c'
assert t[2].precious
assert t[3].__class__.__name__ == 'File', t[3].__class__.__name__
assert t[3].get_internal_path() == 'p_d'
assert t[3].precious
assert t[4].__class__.__name__ == 'Entry', t[4].__class__.__name__
assert t[4].get_internal_path() == 'p_ggg'
assert t[4].precious
def test_Pseudo(self):
"""Test the Pseudo() method"""
env = self.TestEnvironment(FOO='ggg', BAR='hhh')
env.Dir('p_hhhb')
env.File('p_d')
t = env.Pseudo('p_a', 'p_${BAR}b', ['p_c', 'p_d'], 'p_$FOO')
assert t[0].__class__.__name__ == 'Entry', t[0].__class__.__name__
assert t[0].get_internal_path() == 'p_a'
assert t[0].pseudo
assert t[1].__class__.__name__ == 'Dir', t[1].__class__.__name__
assert t[1].get_internal_path() == 'p_hhhb'
assert t[1].pseudo
assert t[2].__class__.__name__ == 'Entry', t[2].__class__.__name__
assert t[2].get_internal_path() == 'p_c'
assert t[2].pseudo
assert t[3].__class__.__name__ == 'File', t[3].__class__.__name__
assert t[3].get_internal_path() == 'p_d'
assert t[3].pseudo
assert t[4].__class__.__name__ == 'Entry', t[4].__class__.__name__
assert t[4].get_internal_path() == 'p_ggg'
assert t[4].pseudo
def test_Repository(self):
"""Test the Repository() method."""
class MyFS(object):
def __init__(self):
self.list = []
def Repository(self, *dirs):
self.list.extend(list(dirs))
def Dir(self, name):
return name
env = self.TestEnvironment(FOO='rrr', BAR='sss')
env.fs = MyFS()
env.Repository('/tmp/foo')
env.Repository('/tmp/$FOO', '/tmp/$BAR/foo')
expect = ['/tmp/foo', '/tmp/rrr', '/tmp/sss/foo']
assert env.fs.list == expect, env.fs.list
def test_Scanner(self):
"""Test the Scanner() method"""
def scan(node, env, target, arg):
pass
env = self.TestEnvironment(FOO = scan)
s = env.Scanner('foo')
assert s is not None, s
s = env.Scanner(function = 'foo')
assert s is not None, s
if 0:
s = env.Scanner('$FOO')
assert s is not None, s
s = env.Scanner(function = '$FOO')
assert s is not None, s
def test_SConsignFile(self):
"""Test the SConsignFile() method"""
import SCons.SConsign
class MyFS(object):
SConstruct_dir = os.sep + 'dir'
env = self.TestEnvironment(FOO = 'SConsign',
BAR = os.path.join(os.sep, 'File'))
env.fs = MyFS()
env.Execute = lambda action: None
try:
fnames = []
dbms = []
def capture(name, dbm_module, fnames=fnames, dbms=dbms):
fnames.append(name)
dbms.append(dbm_module)
save_SConsign_File = SCons.SConsign.File
SCons.SConsign.File = capture
env.SConsignFile('foo')
assert fnames[-1] == os.path.join(os.sep, 'dir', 'foo'), fnames
assert dbms[-1] is None, dbms
env.SConsignFile('$FOO')
assert fnames[-1] == os.path.join(os.sep, 'dir', 'SConsign'), fnames
assert dbms[-1] is None, dbms
env.SConsignFile('/$FOO')
assert fnames[-1] == os.sep + 'SConsign', fnames
assert dbms[-1] is None, dbms
env.SConsignFile(os.sep + '$FOO')
assert fnames[-1] == os.sep + 'SConsign', fnames
assert dbms[-1] is None, dbms
env.SConsignFile('$BAR', 'x')
assert fnames[-1] == os.path.join(os.sep, 'File'), fnames
assert dbms[-1] == 'x', dbms
env.SConsignFile('__$BAR', 7)
assert fnames[-1] == os.path.join(os.sep, 'dir', '__', 'File'), fnames
assert dbms[-1] == 7, dbms
env.SConsignFile()
assert fnames[-1] == os.path.join(os.sep, 'dir', '.sconsign'), fnames
assert dbms[-1] is None, dbms
env.SConsignFile(None)
assert fnames[-1] is None, fnames
assert dbms[-1] is None, dbms
finally:
SCons.SConsign.File = save_SConsign_File
def test_SideEffect(self):
"""Test the SideEffect() method"""
env = self.TestEnvironment(LIB='lll', FOO='fff', BAR='bbb')
env.File('mylll.pdb')
env.Dir('mymmm.pdb')
foo = env.Object('foo.obj', 'foo.cpp')[0]
bar = env.Object('bar.obj', 'bar.cpp')[0]
s = env.SideEffect('mylib.pdb', ['foo.obj', 'bar.obj'])[0]
assert s.__class__.__name__ == 'Entry', s.__class__.__name__
assert s.get_internal_path() == 'mylib.pdb'
assert s.side_effect
assert foo.side_effects == [s]
assert bar.side_effects == [s]
fff = env.Object('fff.obj', 'fff.cpp')[0]
bbb = env.Object('bbb.obj', 'bbb.cpp')[0]
s = env.SideEffect('my${LIB}.pdb', ['${FOO}.obj', '${BAR}.obj'])[0]
assert s.__class__.__name__ == 'File', s.__class__.__name__
assert s.get_internal_path() == 'mylll.pdb'
assert s.side_effect
assert fff.side_effects == [s], fff.side_effects
assert bbb.side_effects == [s], bbb.side_effects
ggg = env.Object('ggg.obj', 'ggg.cpp')[0]
ccc = env.Object('ccc.obj', 'ccc.cpp')[0]
s = env.SideEffect('mymmm.pdb', ['ggg.obj', 'ccc.obj'])[0]
assert s.__class__.__name__ == 'Dir', s.__class__.__name__
assert s.get_internal_path() == 'mymmm.pdb'
assert s.side_effect
assert ggg.side_effects == [s], ggg.side_effects
assert ccc.side_effects == [s], ccc.side_effects
def test_SourceCode(self):
"""Test the SourceCode() method."""
env = self.TestEnvironment(FOO='mmm', BAR='nnn')
e = env.SourceCode('foo', None)[0]
assert e.get_internal_path() == 'foo'
s = e.src_builder()
assert s is None, s
b = Builder()
e = env.SourceCode(e, b)[0]
assert e.get_internal_path() == 'foo'
s = e.src_builder()
assert s is b, s
e = env.SourceCode('$BAR$FOO', None)[0]
assert e.get_internal_path() == 'nnnmmm'
s = e.src_builder()
assert s is None, s
def test_SourceSignatures(self):
"""Test the SourceSignatures() method"""
import SCons.Errors
env = self.TestEnvironment(M = 'MD5', T = 'timestamp')
exc_caught = None
try:
env.SourceSignatures('invalid_type')
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch expected UserError"
env.SourceSignatures('MD5')
assert env.src_sig_type == 'MD5', env.src_sig_type
env.SourceSignatures('$M')
assert env.src_sig_type == 'MD5', env.src_sig_type
env.SourceSignatures('timestamp')
assert env.src_sig_type == 'timestamp', env.src_sig_type
env.SourceSignatures('$T')
assert env.src_sig_type == 'timestamp', env.src_sig_type
try:
import SCons.Util
save_md5 = SCons.Util.md5
SCons.Util.md5 = None
try:
env.SourceSignatures('MD5')
except SCons.Errors.UserError:
pass
else:
self.fail('Did not catch expected UserError')
finally:
SCons.Util.md5 = save_md5
def test_Split(self):
"""Test the Split() method"""
env = self.TestEnvironment(FOO = 'fff', BAR = 'bbb')
s = env.Split("foo bar")
assert s == ["foo", "bar"], s
s = env.Split("$FOO bar")
assert s == ["fff", "bar"], s
s = env.Split(["foo", "bar"])
assert s == ["foo", "bar"], s
s = env.Split(["foo", "${BAR}-bbb"])
assert s == ["foo", "bbb-bbb"], s
s = env.Split("foo")
assert s == ["foo"], s
s = env.Split("$FOO$BAR")
assert s == ["fffbbb"], s
def test_TargetSignatures(self):
"""Test the TargetSignatures() method"""
import SCons.Errors
env = self.TestEnvironment(B='build', C='content')
exc_caught = None
try:
env.TargetSignatures('invalid_type')
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch expected UserError"
assert not hasattr(env, '_build_signature')
env.TargetSignatures('build')
assert env.tgt_sig_type == 'build', env.tgt_sig_type
env.TargetSignatures('$B')
assert env.tgt_sig_type == 'build', env.tgt_sig_type
env.TargetSignatures('content')
assert env.tgt_sig_type == 'content', env.tgt_sig_type
env.TargetSignatures('$C')
assert env.tgt_sig_type == 'content', env.tgt_sig_type
env.TargetSignatures('MD5')
assert env.tgt_sig_type == 'MD5', env.tgt_sig_type
env.TargetSignatures('timestamp')
assert env.tgt_sig_type == 'timestamp', env.tgt_sig_type
try:
import SCons.Util
save_md5 = SCons.Util.md5
SCons.Util.md5 = None
try:
env.TargetSignatures('MD5')
except SCons.Errors.UserError:
pass
else:
self.fail('Did not catch expected UserError')
try:
env.TargetSignatures('content')
except SCons.Errors.UserError:
pass
else:
self.fail('Did not catch expected UserError')
finally:
SCons.Util.md5 = save_md5
def test_Value(self):
"""Test creating a Value() object
"""
env = Environment()
v1 = env.Value('a')
assert v1.value == 'a', v1.value
value2 = 'a'
v2 = env.Value(value2)
assert v2.value == value2, v2.value
assert v2.value is value2, v2.value
assert not v1 is v2
assert v1.value == v2.value
v3 = env.Value('c', 'build-c')
assert v3.value == 'c', v3.value
def test_Environment_global_variable(self):
"""Test setting Environment variable to an Environment.Base subclass"""
class MyEnv(SCons.Environment.Base):
def xxx(self, string):
return self.subst(string)
SCons.Environment.Environment = MyEnv
env = SCons.Environment.Environment(FOO = 'foo')
f = env.subst('$FOO')
assert f == 'foo', f
f = env.xxx('$FOO')
assert f == 'foo', f
def test_bad_keywords(self):
"""Test trying to use reserved keywords in an Environment"""
added = []
env = self.TestEnvironment(TARGETS = 'targets',
SOURCES = 'sources',
SOURCE = 'source',
TARGET = 'target',
CHANGED_SOURCES = 'changed_sources',
CHANGED_TARGETS = 'changed_targets',
UNCHANGED_SOURCES = 'unchanged_sources',
UNCHANGED_TARGETS = 'unchanged_targets',
INIT = 'init')
bad_msg = '%s is not reserved, but got omitted; see Environment.construction_var_name_ok'
added.append('INIT')
for x in self.reserved_variables:
assert x not in env, env[x]
for x in added:
assert x in env, bad_msg % x
env.Append(TARGETS = 'targets',
SOURCES = 'sources',
SOURCE = 'source',
TARGET = 'target',
CHANGED_SOURCES = 'changed_sources',
CHANGED_TARGETS = 'changed_targets',
UNCHANGED_SOURCES = 'unchanged_sources',
UNCHANGED_TARGETS = 'unchanged_targets',
APPEND = 'append')
added.append('APPEND')
for x in self.reserved_variables:
assert x not in env, env[x]
for x in added:
assert x in env, bad_msg % x
env.AppendUnique(TARGETS = 'targets',
SOURCES = 'sources',
SOURCE = 'source',
TARGET = 'target',
CHANGED_SOURCES = 'changed_sources',
CHANGED_TARGETS = 'changed_targets',
UNCHANGED_SOURCES = 'unchanged_sources',
UNCHANGED_TARGETS = 'unchanged_targets',
APPENDUNIQUE = 'appendunique')
added.append('APPENDUNIQUE')
for x in self.reserved_variables:
assert x not in env, env[x]
for x in added:
assert x in env, bad_msg % x
env.Prepend(TARGETS = 'targets',
SOURCES = 'sources',
SOURCE = 'source',
TARGET = 'target',
CHANGED_SOURCES = 'changed_sources',
CHANGED_TARGETS = 'changed_targets',
UNCHANGED_SOURCES = 'unchanged_sources',
UNCHANGED_TARGETS = 'unchanged_targets',
PREPEND = 'prepend')
added.append('PREPEND')
for x in self.reserved_variables:
assert x not in env, env[x]
for x in added:
assert x in env, bad_msg % x
env.Prepend(TARGETS = 'targets',
SOURCES = 'sources',
SOURCE = 'source',
TARGET = 'target',
CHANGED_SOURCES = 'changed_sources',
CHANGED_TARGETS = 'changed_targets',
UNCHANGED_SOURCES = 'unchanged_sources',
UNCHANGED_TARGETS = 'unchanged_targets',
PREPENDUNIQUE = 'prependunique')
added.append('PREPENDUNIQUE')
for x in self.reserved_variables:
assert x not in env, env[x]
for x in added:
assert x in env, bad_msg % x
env.Replace(TARGETS = 'targets',
SOURCES = 'sources',
SOURCE = 'source',
TARGET = 'target',
CHANGED_SOURCES = 'changed_sources',
CHANGED_TARGETS = 'changed_targets',
UNCHANGED_SOURCES = 'unchanged_sources',
UNCHANGED_TARGETS = 'unchanged_targets',
REPLACE = 'replace')
added.append('REPLACE')
for x in self.reserved_variables:
assert x not in env, env[x]
for x in added:
assert x in env, bad_msg % x
copy = env.Clone(TARGETS = 'targets',
SOURCES = 'sources',
SOURCE = 'source',
TARGET = 'target',
CHANGED_SOURCES = 'changed_sources',
CHANGED_TARGETS = 'changed_targets',
UNCHANGED_SOURCES = 'unchanged_sources',
UNCHANGED_TARGETS = 'unchanged_targets',
COPY = 'copy')
for x in self.reserved_variables:
assert x not in copy, env[x]
for x in added + ['COPY']:
assert x in copy, bad_msg % x
over = env.Override({'TARGETS' : 'targets',
'SOURCES' : 'sources',
'SOURCE' : 'source',
'TARGET' : 'target',
'CHANGED_SOURCES' : 'changed_sources',
'CHANGED_TARGETS' : 'changed_targets',
'UNCHANGED_SOURCES' : 'unchanged_sources',
'UNCHANGED_TARGETS' : 'unchanged_targets',
'OVERRIDE' : 'override'})
for x in self.reserved_variables:
assert x not in over, over[x]
for x in added + ['OVERRIDE']:
assert x in over, bad_msg % x
def test_parse_flags(self):
'''Test the Base class parse_flags argument'''
# all we have to show is that it gets to MergeFlags internally
env = Environment(tools=[], parse_flags = '-X')
assert env['CCFLAGS'] == ['-X'], env['CCFLAGS']
env = Environment(tools=[], CCFLAGS=None, parse_flags = '-Y')
assert env['CCFLAGS'] == ['-Y'], env['CCFLAGS']
env = Environment(tools=[], CPPDEFINES = 'FOO', parse_flags = '-std=c99 -X -DBAR')
assert env['CFLAGS'] == ['-std=c99'], env['CFLAGS']
assert env['CCFLAGS'] == ['-X'], env['CCFLAGS']
assert env['CPPDEFINES'] == ['FOO', 'BAR'], env['CPPDEFINES']
def test_clone_parse_flags(self):
'''Test the env.Clone() parse_flags argument'''
# all we have to show is that it gets to MergeFlags internally
env = Environment(tools = [])
env2 = env.Clone(parse_flags = '-X')
assert 'CCFLAGS' not in env
assert env2['CCFLAGS'] == ['-X'], env2['CCFLAGS']
env = Environment(tools = [], CCFLAGS=None)
env2 = env.Clone(parse_flags = '-Y')
assert env['CCFLAGS'] is None, env['CCFLAGS']
assert env2['CCFLAGS'] == ['-Y'], env2['CCFLAGS']
env = Environment(tools = [], CPPDEFINES = 'FOO')
env2 = env.Clone(parse_flags = '-std=c99 -X -DBAR')
assert 'CFLAGS' not in env
assert env2['CFLAGS'] == ['-std=c99'], env2['CFLAGS']
assert 'CCFLAGS' not in env
assert env2['CCFLAGS'] == ['-X'], env2['CCFLAGS']
assert env['CPPDEFINES'] == 'FOO', env['CPPDEFINES']
assert env2['CPPDEFINES'] == ['FOO','BAR'], env2['CPPDEFINES']
class OverrideEnvironmentTestCase(unittest.TestCase,TestEnvironmentFixture):
def setUp(self):
env = Environment()
env._dict = {'XXX' : 'x', 'YYY' : 'y'}
env2 = OverrideEnvironment(env, {'XXX' : 'x2'})
env3 = OverrideEnvironment(env2, {'XXX' : 'x3', 'YYY' : 'y3', 'ZZZ' : 'z3'})
self.envs = [ env, env2, env3 ]
def checkpath(self, node, expect):
return str(node) == os.path.normpath(expect)
def test___init__(self):
"""Test OverrideEnvironment initialization"""
env, env2, env3 = self.envs
assert env['XXX'] == 'x', env['XXX']
assert env2['XXX'] == 'x2', env2['XXX']
assert env3['XXX'] == 'x3', env3['XXX']
assert env['YYY'] == 'y', env['YYY']
assert env2['YYY'] == 'y', env2['YYY']
assert env3['YYY'] == 'y3', env3['YYY']
def test___delitem__(self):
"""Test deleting variables from an OverrideEnvironment"""
env, env2, env3 = self.envs
del env3['XXX']
assert 'XXX' not in env, "env has XXX?"
assert 'XXX' not in env2, "env2 has XXX?"
assert 'XXX' not in env3, "env3 has XXX?"
del env3['YYY']
assert 'YYY' not in env, "env has YYY?"
assert 'YYY' not in env2, "env2 has YYY?"
assert 'YYY' not in env3, "env3 has YYY?"
del env3['ZZZ']
assert 'ZZZ' not in env, "env has ZZZ?"
assert 'ZZZ' not in env2, "env2 has ZZZ?"
assert 'ZZZ' not in env3, "env3 has ZZZ?"
def test_get(self):
"""Test the OverrideEnvironment get() method"""
env, env2, env3 = self.envs
assert env.get('XXX') == 'x', env.get('XXX')
assert env2.get('XXX') == 'x2', env2.get('XXX')
assert env3.get('XXX') == 'x3', env3.get('XXX')
assert env.get('YYY') == 'y', env.get('YYY')
assert env2.get('YYY') == 'y', env2.get('YYY')
assert env3.get('YYY') == 'y3', env3.get('YYY')
assert env.get('ZZZ') is None, env.get('ZZZ')
assert env2.get('ZZZ') is None, env2.get('ZZZ')
assert env3.get('ZZZ') == 'z3', env3.get('ZZZ')
def test_has_key(self):
"""Test the OverrideEnvironment has_key() method"""
env, env2, env3 = self.envs
assert 'XXX' in env, 'XXX' in env
assert 'XXX' in env2, 'XXX' in env2
assert 'XXX' in env3, 'XXX' in env3
assert 'YYY' in env, 'YYY' in env
assert 'YYY' in env2, 'YYY' in env2
assert 'YYY' in env3, 'YYY' in env3
assert 'ZZZ' not in env, 'ZZZ' in env
assert 'ZZZ' not in env2, 'ZZZ' in env2
assert 'ZZZ' in env3, 'ZZZ' in env3
def test_contains(self):
"""Test the OverrideEnvironment __contains__() method"""
env, env2, env3 = self.envs
assert 'XXX' in env
assert 'XXX' in env2
assert 'XXX' in env3
assert 'YYY' in env
assert 'YYY' in env2
assert 'YYY' in env3
assert not 'ZZZ' in env
assert not 'ZZZ' in env2
assert 'ZZZ' in env3
def test_items(self):
"""Test the OverrideEnvironment Dictionary() method"""
env, env2, env3 = self.envs
items = env.Dictionary()
assert items == {'XXX' : 'x', 'YYY' : 'y'}, items
items = env2.Dictionary()
assert items == {'XXX' : 'x2', 'YYY' : 'y'}, items
items = env3.Dictionary()
assert items == {'XXX' : 'x3', 'YYY' : 'y3', 'ZZZ' : 'z3'}, items
def test_items(self):
"""Test the OverrideEnvironment items() method"""
env, env2, env3 = self.envs
items = sorted(env.items())
assert items == [('XXX', 'x'), ('YYY', 'y')], items
items = sorted(env2.items())
assert items == [('XXX', 'x2'), ('YYY', 'y')], items
items = sorted(env3.items())
assert items == [('XXX', 'x3'), ('YYY', 'y3'), ('ZZZ', 'z3')], items
def test_gvars(self):
"""Test the OverrideEnvironment gvars() method"""
env, env2, env3 = self.envs
gvars = env.gvars()
assert gvars == {'XXX' : 'x', 'YYY' : 'y'}, gvars
gvars = env2.gvars()
assert gvars == {'XXX' : 'x', 'YYY' : 'y'}, gvars
gvars = env3.gvars()
assert gvars == {'XXX' : 'x', 'YYY' : 'y'}, gvars
def test_lvars(self):
"""Test the OverrideEnvironment lvars() method"""
env, env2, env3 = self.envs
lvars = env.lvars()
assert lvars == {}, lvars
lvars = env2.lvars()
assert lvars == {'XXX' : 'x2'}, lvars
lvars = env3.lvars()
assert lvars == {'XXX' : 'x3', 'YYY' : 'y3', 'ZZZ' : 'z3'}, lvars
def test_Replace(self):
"""Test the OverrideEnvironment Replace() method"""
env, env2, env3 = self.envs
assert env['XXX'] == 'x', env['XXX']
assert env2['XXX'] == 'x2', env2['XXX']
assert env3['XXX'] == 'x3', env3['XXX']
assert env['YYY'] == 'y', env['YYY']
assert env2['YYY'] == 'y', env2['YYY']
assert env3['YYY'] == 'y3', env3['YYY']
env.Replace(YYY = 'y4')
assert env['XXX'] == 'x', env['XXX']
assert env2['XXX'] == 'x2', env2['XXX']
assert env3['XXX'] == 'x3', env3['XXX']
assert env['YYY'] == 'y4', env['YYY']
assert env2['YYY'] == 'y4', env2['YYY']
assert env3['YYY'] == 'y3', env3['YYY']
# Tests a number of Base methods through an OverrideEnvironment to
# make sure they handle overridden constructionv variables properly.
#
# The following Base methods also call self.subst(), and so could
# theoretically be subject to problems with evaluating overridden
# variables, but they're never really called that way in the rest
# of our code, so we won't worry about them (at least for now):
#
# ParseConfig()
# ParseDepends()
# Platform()
# Tool()
#
# Action()
# Alias()
# Builder()
# CacheDir()
# Configure()
# Environment()
# FindFile()
# Scanner()
# SourceSignatures()
# TargetSignatures()
# It's unlikely Clone() will ever be called this way, so let the
# other methods test that handling overridden values works.
#def test_Clone(self):
# """Test the OverrideEnvironment Clone() method"""
# pass
def test_FindIxes(self):
"""Test the OverrideEnvironment FindIxes() method"""
env, env2, env3 = self.envs
x = env.FindIxes(['xaaay'], 'XXX', 'YYY')
assert x == 'xaaay', x
x = env2.FindIxes(['x2aaay'], 'XXX', 'YYY')
assert x == 'x2aaay', x
x = env3.FindIxes(['x3aaay3'], 'XXX', 'YYY')
assert x == 'x3aaay3', x
def test_ReplaceIxes(self):
"""Test the OverrideEnvironment ReplaceIxes() method"""
env, env2, env3 = self.envs
x = env.ReplaceIxes('xaaay', 'XXX', 'YYY', 'YYY', 'XXX')
assert x == 'yaaax', x
x = env2.ReplaceIxes('x2aaay', 'XXX', 'YYY', 'YYY', 'XXX')
assert x == 'yaaax2', x
x = env3.ReplaceIxes('x3aaay3', 'XXX', 'YYY', 'YYY', 'XXX')
assert x == 'y3aaax3', x
# It's unlikely WhereIs() will ever be called this way, so let the
# other methods test that handling overridden values works.
#def test_WhereIs(self):
# """Test the OverrideEnvironment WhereIs() method"""
# pass
def test_Dir(self):
"""Test the OverrideEnvironment Dir() method"""
env, env2, env3 = self.envs
x = env.Dir('ddir/$XXX')
assert self.checkpath(x, 'ddir/x'), str(x)
x = env2.Dir('ddir/$XXX')
assert self.checkpath(x, 'ddir/x2'), str(x)
x = env3.Dir('ddir/$XXX')
assert self.checkpath(x, 'ddir/x3'), str(x)
def test_Entry(self):
"""Test the OverrideEnvironment Entry() method"""
env, env2, env3 = self.envs
x = env.Entry('edir/$XXX')
assert self.checkpath(x, 'edir/x'), str(x)
x = env2.Entry('edir/$XXX')
assert self.checkpath(x, 'edir/x2'), str(x)
x = env3.Entry('edir/$XXX')
assert self.checkpath(x, 'edir/x3'), str(x)
def test_File(self):
"""Test the OverrideEnvironment File() method"""
env, env2, env3 = self.envs
x = env.File('fdir/$XXX')
assert self.checkpath(x, 'fdir/x'), str(x)
x = env2.File('fdir/$XXX')
assert self.checkpath(x, 'fdir/x2'), str(x)
x = env3.File('fdir/$XXX')
assert self.checkpath(x, 'fdir/x3'), str(x)
def test_Split(self):
"""Test the OverrideEnvironment Split() method"""
env, env2, env3 = self.envs
env['AAA'] = '$XXX $YYY $ZZZ'
x = env.Split('$AAA')
assert x == ['x', 'y'], x
x = env2.Split('$AAA')
assert x == ['x2', 'y'], x
x = env3.Split('$AAA')
assert x == ['x3', 'y3', 'z3'], x
def test_parse_flags(self):
'''Test the OverrideEnvironment parse_flags argument'''
# all we have to show is that it gets to MergeFlags internally
env = SubstitutionEnvironment()
env2 = env.Override({'parse_flags' : '-X'})
assert 'CCFLAGS' not in env
assert env2['CCFLAGS'] == ['-X'], env2['CCFLAGS']
env = SubstitutionEnvironment(CCFLAGS=None)
env2 = env.Override({'parse_flags' : '-Y'})
assert env['CCFLAGS'] is None, env['CCFLAGS']
assert env2['CCFLAGS'] == ['-Y'], env2['CCFLAGS']
env = SubstitutionEnvironment(CPPDEFINES = 'FOO')
env2 = env.Override({'parse_flags' : '-std=c99 -X -DBAR'})
assert 'CFLAGS' not in env
assert env2['CFLAGS'] == ['-std=c99'], env2['CFLAGS']
assert 'CCFLAGS' not in env
assert env2['CCFLAGS'] == ['-X'], env2['CCFLAGS']
assert env['CPPDEFINES'] == 'FOO', env['CPPDEFINES']
assert env2['CPPDEFINES'] == ['FOO','BAR'], env2['CPPDEFINES']
class NoSubstitutionProxyTestCase(unittest.TestCase,TestEnvironmentFixture):
def test___init__(self):
"""Test NoSubstitutionProxy initialization"""
env = self.TestEnvironment(XXX = 'x', YYY = 'y')
assert env['XXX'] == 'x', env['XXX']
assert env['YYY'] == 'y', env['YYY']
proxy = NoSubstitutionProxy(env)
assert proxy['XXX'] == 'x', proxy['XXX']
assert proxy['YYY'] == 'y', proxy['YYY']
def test_attributes(self):
"""Test getting and setting NoSubstitutionProxy attributes"""
env = Environment()
setattr(env, 'env_attr', 'value1')
proxy = NoSubstitutionProxy(env)
setattr(proxy, 'proxy_attr', 'value2')
x = getattr(env, 'env_attr')
assert x == 'value1', x
x = getattr(proxy, 'env_attr')
assert x == 'value1', x
x = getattr(env, 'proxy_attr')
assert x == 'value2', x
x = getattr(proxy, 'proxy_attr')
assert x == 'value2', x
def test_subst(self):
"""Test the NoSubstitutionProxy.subst() method"""
env = self.TestEnvironment(XXX = 'x', YYY = 'y')
assert env['XXX'] == 'x', env['XXX']
assert env['YYY'] == 'y', env['YYY']
proxy = NoSubstitutionProxy(env)
assert proxy['XXX'] == 'x', proxy['XXX']
assert proxy['YYY'] == 'y', proxy['YYY']
x = env.subst('$XXX')
assert x == 'x', x
x = proxy.subst('$XXX')
assert x == '$XXX', x
x = proxy.subst('$YYY', raw=7, target=None, source=None,
conv=None,
extra_meaningless_keyword_argument=None)
assert x == '$YYY', x
def test_subst_kw(self):
"""Test the NoSubstitutionProxy.subst_kw() method"""
env = self.TestEnvironment(XXX = 'x', YYY = 'y')
assert env['XXX'] == 'x', env['XXX']
assert env['YYY'] == 'y', env['YYY']
proxy = NoSubstitutionProxy(env)
assert proxy['XXX'] == 'x', proxy['XXX']
assert proxy['YYY'] == 'y', proxy['YYY']
x = env.subst_kw({'$XXX':'$YYY'})
assert x == {'x':'y'}, x
x = proxy.subst_kw({'$XXX':'$YYY'})
assert x == {'$XXX':'$YYY'}, x
def test_subst_list(self):
"""Test the NoSubstitutionProxy.subst_list() method"""
env = self.TestEnvironment(XXX = 'x', YYY = 'y')
assert env['XXX'] == 'x', env['XXX']
assert env['YYY'] == 'y', env['YYY']
proxy = NoSubstitutionProxy(env)
assert proxy['XXX'] == 'x', proxy['XXX']
assert proxy['YYY'] == 'y', proxy['YYY']
x = env.subst_list('$XXX')
assert x == [['x']], x
x = proxy.subst_list('$XXX')
assert x == [[]], x
x = proxy.subst_list('$YYY', raw=0, target=None, source=None, conv=None)
assert x == [[]], x
def test_subst_target_source(self):
"""Test the NoSubstitutionProxy.subst_target_source() method"""
env = self.TestEnvironment(XXX = 'x', YYY = 'y')
assert env['XXX'] == 'x', env['XXX']
assert env['YYY'] == 'y', env['YYY']
proxy = NoSubstitutionProxy(env)
assert proxy['XXX'] == 'x', proxy['XXX']
assert proxy['YYY'] == 'y', proxy['YYY']
args = ('$XXX $TARGET $SOURCE $YYY',)
kw = {'target' : DummyNode('ttt'), 'source' : DummyNode('sss')}
x = env.subst_target_source(*args, **kw)
assert x == 'x ttt sss y', x
x = proxy.subst_target_source(*args, **kw)
assert x == ' ttt sss ', x
class EnvironmentVariableTestCase(unittest.TestCase):
def test_is_valid_construction_var(self):
"""Testing is_valid_construction_var()"""
r = is_valid_construction_var("_a")
assert r is not None, r
r = is_valid_construction_var("z_")
assert r is not None, r
r = is_valid_construction_var("X_")
assert r is not None, r
r = is_valid_construction_var("2a")
assert r is None, r
r = is_valid_construction_var("a2_")
assert r is not None, r
r = is_valid_construction_var("/")
assert r is None, r
r = is_valid_construction_var("_/")
assert r is None, r
r = is_valid_construction_var("a/")
assert r is None, r
r = is_valid_construction_var(".b")
assert r is None, r
r = is_valid_construction_var("_.b")
assert r is None, r
r = is_valid_construction_var("b1._")
assert r is None, r
r = is_valid_construction_var("-b")
assert r is None, r
r = is_valid_construction_var("_-b")
assert r is None, r
r = is_valid_construction_var("b1-_")
assert r is None, r
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ SubstitutionTestCase,
BaseTestCase,
OverrideEnvironmentTestCase,
NoSubstitutionProxyTestCase,
EnvironmentVariableTestCase ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_26221
|
import factory
from factory.fuzzy import FuzzyChoice
from coordinator.api.models.release import Release
from coordinator.api.factories.study import StudyFactory
class ReleaseFactory(factory.django.DjangoModelFactory):
class Meta:
model = Release
name = factory.Faker("bs")
author = factory.Faker("name")
description = factory.Faker("bs")
tasks = factory.RelatedFactory(
"coordinator.api.factories.task.TaskFactory", "release"
)
state = FuzzyChoice(
["waiting", "failed", "canceled", "staged", "rejected", "published"]
)
@factory.post_generation
def studies(self, create, extracted, **kwargs):
if not create:
self.studies.add(StudyFactory())
return
if extracted:
for study in extracted:
self.studies.add(study)
else:
study = FuzzyChoice(
[
StudyFactory(kf_id="SD_ME0WME0W"),
StudyFactory(kf_id="SD_W2PQV9FJ"),
StudyFactory(kf_id="SD_QQXC6C3V"),
StudyFactory(kf_id="SD_ODWXI1TE"),
]
).fuzz()
self.studies.add(study)
|
the-stack_0_26227
|
"""Module for creating Cloudnet liquid water content file using scaled-adiabatic method."""
from typing import Optional, Tuple
import numpy as np
from numpy import ma
from cloudnetpy import output, utils
from cloudnetpy.categorize import atmos
from cloudnetpy.datasource import DataSource
from cloudnetpy.metadata import MetaData
from cloudnetpy.products import product_tools as p_tools
from cloudnetpy.products.product_tools import CategorizeBits, get_is_rain
G_TO_KG = 0.001
def generate_lwc(categorize_file: str, output_file: str, uuid: Optional[str] = None) -> str:
"""Generates Cloudnet liquid water content product.
This function calculates cloud liquid water content using the so-called
adiabatic-scaled method. In this method, liquid water content measured by
microwave radiometer is used to constrain the theoretical liquid water
content of observed liquid clouds. The results are written in a netCDF file.
Args:
categorize_file: Categorize file name.
output_file: Output file name.
uuid: Set specific UUID for the file.
Returns:
str: UUID of the generated file.
Examples:
>>> from cloudnetpy.products import generate_lwc
>>> generate_lwc('categorize.nc', 'lwc.nc')
References:
Illingworth, A.J., R.J. Hogan, E. O'Connor, D. Bouniol, M.E. Brooks,
J. Delanoé, D.P. Donovan, J.D. Eastment, N. Gaussiat, J.W. Goddard,
M. Haeffelin, H.K. Baltink, O.A. Krasnov, J. Pelon, J. Piriou, A. Protat,
H.W. Russchenberg, A. Seifert, A.M. Tompkins, G. van Zadelhoff, F. Vinit,
U. Willén, D.R. Wilson, and C.L. Wrench, 2007: Cloudnet.
Bull. Amer. Meteor. Soc., 88, 883–898, https://doi.org/10.1175/BAMS-88-6-883
"""
with LwcSource(categorize_file) as lwc_source:
lwc = Lwc(lwc_source)
clouds = CloudAdjustor(lwc_source, lwc)
lwc_error = LwcError(lwc_source, lwc)
lwc_source.append_results(lwc.lwc, clouds.status, lwc_error.error)
date = lwc_source.get_date()
attributes = output.add_time_attribute(LWC_ATTRIBUTES, date)
output.update_attributes(lwc_source.data, attributes)
uuid = output.save_product_file(
"lwc",
lwc_source,
output_file,
uuid,
copy_from_cat=(
"lwp",
"lwp_error",
),
)
return uuid
class LwcSource(DataSource):
"""Data container for liquid water content calculations. Child of DataSource.
This class reads input data from a categorize file and provides data
structures and methods for holding the results.
Args:
categorize_file: Categorize file name.
Attributes:
lwp (ndarray): 1D liquid water path.
lwp_error (ndarray): 1D error of liquid water path.
is_rain (ndarray): 1D array denoting presence of rain.
dheight (float): Median difference in height vector.
atmosphere (dict): Dictionary containing interpolated fields `temperature` and `pressure`.
categorize_bits (CategorizeBits): The :class:`CategorizeBits` instance.
"""
def __init__(self, categorize_file: str):
super().__init__(categorize_file)
self.lwp = self.getvar("lwp")
self.lwp[self.lwp < 0] = 0
self.lwp_error = self.getvar("lwp_error")
self.is_rain = get_is_rain(categorize_file)
self.dheight = utils.mdiff(self.getvar("height"))
self.atmosphere = self._get_atmosphere(categorize_file)
self.categorize_bits = CategorizeBits(categorize_file)
def append_results(self, lwc: np.ndarray, status: np.ndarray, error: np.ndarray) -> None:
self.append_data(lwc * G_TO_KG, "lwc", units="kg m-3")
self.append_data(status, "lwc_retrieval_status")
self.append_data(error, "lwc_error", units="dB")
@staticmethod
def _get_atmosphere(categorize_file: str) -> Tuple[np.ndarray, np.ndarray]:
fields = ["temperature", "pressure"]
atmosphere = p_tools.interpolate_model(categorize_file, fields)
return atmosphere["temperature"], atmosphere["pressure"]
class Lwc:
"""Class handling the actual LWC calculations.
Args:
lwc_source: The :class:`LwcSource` instance.
Attributes:
lwc_source (LwcSource): The :class:`LwcSource` instance.
dheight (float): Median difference in height vector.
is_liquid (ndarray): 2D array denoting liquid.
lwc_adiabatic (ndarray): 2D array storing adiabatic lwc.
lwc (ndarray): 2D array of liquid water content (scaled with lwp).
"""
def __init__(self, lwc_source: LwcSource):
self.lwc_source = lwc_source
self.dheight = self.lwc_source.dheight
self.is_liquid = self._get_liquid()
self.lwc_adiabatic = self._init_lwc_adiabatic()
self.lwc = self._adiabatic_lwc_to_lwc()
self._mask_rain()
def _get_liquid(self) -> np.ndarray:
category_bits = self.lwc_source.categorize_bits.category_bits
return category_bits["droplet"]
def _init_lwc_adiabatic(self) -> np.ndarray:
"""Returns theoretical adiabatic lwc in liquid clouds (g/m3)."""
lwc_dz = atmos.fill_clouds_with_lwc_dz(self.lwc_source.atmosphere, self.is_liquid)
return atmos.calc_adiabatic_lwc(lwc_dz, self.dheight)
def _adiabatic_lwc_to_lwc(self) -> np.ndarray:
"""Initialises liquid water content (g/m3).
Calculates LWC for ALL profiles (rain, lwp > theoretical, etc.),
"""
lwc_scaled = atmos.distribute_lwp_to_liquid_clouds(self.lwc_adiabatic, self.lwc_source.lwp)
return lwc_scaled / self.dheight
def _mask_rain(self) -> None:
is_rain = self.lwc_source.is_rain.astype(bool)
self.lwc[is_rain, :] = ma.masked
class CloudAdjustor:
"""Adjusts clouds (where possible) so that theoretical and measured LWP agree.
Args:
lwc_source: The :class:`LwcSource` instance.
lwc: The :class:`Lwc` instance.
Attributes:
lwc_source (LwcSource): The :class:`LwcSource` instance.
lwc (ndarray): Liquid water content data.
is_liquid (ndarray): 2D array denoting liquid.
lwc_adiabatic (ndarray): 2D array storing adiabatic lwc.
echo (dict): Dictionary storing radar and lidar echos
status (ndarray): 2D array storing lwc status classification
"""
def __init__(self, lwc_source: LwcSource, lwc: Lwc):
self.lwc_source = lwc_source
self.lwc = lwc.lwc
self.is_liquid = lwc.is_liquid
self.lwc_adiabatic = lwc.lwc_adiabatic
self.echo = self._get_echo()
self.status = self._init_status()
self._adjust_cloud_tops(self._find_adjustable_clouds())
self._mask_rain()
self._mask_missing()
def _get_echo(self) -> dict:
quality_bits = self.lwc_source.categorize_bits.quality_bits
return {key: quality_bits[key] for key in ("radar", "lidar")}
def _init_status(self) -> ma.MaskedArray:
status = ma.zeros(self.is_liquid.shape, dtype=int)
status[self.is_liquid] = 1
return status
def _adjust_cloud_tops(self, adjustable_clouds: np.ndarray) -> None:
"""Adjusts cloud top index so that measured lwc corresponds to theoretical value."""
for time_index in np.unique(np.where(adjustable_clouds)[0]):
base_index = np.where(adjustable_clouds[time_index, :])[0][0]
self._update_status(time_index)
self._adjust_lwc(time_index, base_index)
def _update_status(self, time_ind: int) -> None:
alt_indices = np.where(self.is_liquid[time_ind, :])[0]
self.status[time_ind, alt_indices] = 2
def _adjust_lwc(self, time_ind: int, base_ind: int) -> None:
lwc_base = self.lwc_adiabatic[time_ind, base_ind]
distance_from_base = 1
while True:
top_ind = base_ind + distance_from_base
lwc_top = lwc_base * (distance_from_base + 1)
self.lwc_adiabatic[time_ind, top_ind] = lwc_top
if not self.status[time_ind, top_ind]:
self.status[time_ind, top_ind] = 3
if self._has_converged(time_ind) or self._out_of_bound(top_ind):
break
distance_from_base += 1
def _has_converged(self, ind: int) -> bool:
lwc_sum = ma.sum(self.lwc_adiabatic[ind, :])
if lwc_sum * self.lwc_source.dheight > self.lwc_source.lwp[ind]:
return True
return False
def _out_of_bound(self, ind: int) -> bool:
return ind >= self.lwc.shape[1] - 1
def _find_adjustable_clouds(self) -> np.ndarray:
top_clouds = self._find_topmost_clouds()
detection_type = self._find_echo_combinations_in_liquid()
detection_type[~top_clouds] = 0
lidar_only_clouds = self._find_lidar_only_clouds(detection_type)
top_clouds[~lidar_only_clouds, :] = 0
top_clouds = self._remove_good_profiles(top_clouds)
return top_clouds
def _find_topmost_clouds(self) -> np.ndarray:
top_clouds = np.copy(self.is_liquid)
cloud_edges = top_clouds[:, :-1][:, ::-1] < top_clouds[:, 1:][:, ::-1]
topmost_bases = self.is_liquid.shape[1] - 1 - np.argmax(cloud_edges, axis=1)
for n, base in enumerate(topmost_bases):
top_clouds[n, :base] = 0
return top_clouds
def _find_echo_combinations_in_liquid(self) -> np.ndarray:
"""Classifies liquid clouds by detection type: 1=lidar, 2=radar, 3=both."""
lidar_detected = (self.is_liquid & self.echo["lidar"]).astype(int)
radar_detected = (self.is_liquid & self.echo["radar"]).astype(int) * 2
return lidar_detected + radar_detected
@staticmethod
def _find_lidar_only_clouds(detection: np.ndarray) -> np.ndarray:
"""Finds top clouds that contain only lidar-detected pixels.
Args:
detection: Array of integers where 1=lidar, 2=radar, 3=both.
Returns:
Boolean array containing top-clouds that are detected only by lidar.
"""
sum_of_cloud_pixels = ma.sum(detection > 0, axis=1)
sum_of_detection_type = ma.sum(detection, axis=1)
return sum_of_cloud_pixels / sum_of_detection_type == 1
def _remove_good_profiles(self, top_clouds: np.ndarray) -> np.ndarray:
no_rain = ~self.lwc_source.is_rain.astype(bool)
lwp_difference = self._find_lwp_difference()
dubious_profiles = (lwp_difference < 0) & no_rain
top_clouds[~dubious_profiles, :] = 0
return top_clouds
def _find_lwp_difference(self) -> np.ndarray:
"""Returns difference of theoretical LWP and measured LWP (g/m2).
In theory, this difference should be always positive. Negative values
indicate missing (or too narrow) liquid clouds.
"""
lwc_sum = ma.sum(self.lwc_adiabatic, axis=1) * self.lwc_source.dheight
return lwc_sum - self.lwc_source.lwp
def _mask_rain(self) -> None:
is_rain = self.lwc_source.is_rain.astype(bool)
self.status[is_rain, :] = 4
def _mask_missing(self) -> None:
is_missing = np.where(self.lwc_source.lwp == ma.masked)
self.status[is_missing, :] = 4
class LwcError:
"""Calculates liquid water content error.
Args:
lwc_source: The :class:`LwcSource` instance.
lwc: The :class:`Lwc` instance.
Attributes:
lwc_source (LwcSource): The :class:`LwcSource` instance.
lwc (ndarray): Liquid water content data.
error (ndarray): 2D array storing lwc_error.
"""
def __init__(self, lwc_source: LwcSource, lwc: Lwc):
self.lwc = lwc.lwc
self.lwc_source = lwc_source
self.error = self._calculate_lwc_error()
self._mask_rain()
def _calculate_lwc_error(self) -> np.ndarray:
lwc_relative_error = self._calc_lwc_relative_error()
lwp_relative_error = self._calc_lwp_relative_error()
combined_error = self._calc_combined_error(lwc_relative_error, lwp_relative_error)
return self._fill_error_array(combined_error)
def _calc_lwc_relative_error(self) -> np.ndarray:
lwc_gradient = self._calc_lwc_gradient()
error = lwc_gradient / self.lwc / 2
return self._limit_error(error, 5)
def _calc_lwc_gradient(self) -> np.ndarray:
assert isinstance(self.lwc, ma.MaskedArray)
gradient_elements = np.gradient(self.lwc.filled(0))
return utils.l2norm(*gradient_elements)
def _calc_lwp_relative_error(self) -> np.ndarray:
err = self.lwc_source.lwp_error
value = self.lwc_source.lwp
error = np.divide(err, value, out=np.zeros_like(err), where=value != 0)
return self._limit_error(error, 10)
@staticmethod
def _limit_error(error: np.ndarray, max_value: float) -> np.ndarray:
error[error > max_value] = max_value
return error
@staticmethod
def _calc_combined_error(error_2d: np.ndarray, error_1d: np.ndarray) -> np.ndarray:
error_1d_transposed = utils.transpose(error_1d)
return utils.l2norm(error_2d, error_1d_transposed)
def _fill_error_array(self, error_in: np.ndarray) -> np.ndarray:
lwc_error = ma.masked_all(self.lwc.shape)
ind = ma.where(self.lwc)
lwc_error[ind] = error_in[ind]
return lwc_error
def _mask_rain(self) -> None:
is_rain = self.lwc_source.is_rain.astype(bool)
self.error[is_rain, :] = ma.masked
COMMENTS = {
"lwc": (
"This variable was calculated for the profiles where the categorization data has\n"
"diagnosed that liquid water is present and liquid water path is available from\n"
"a coincident microwave radiometer. The model temperature and pressure were used\n"
"to estimate the theoretical adiabatic liquid water content gradient for each\n"
"cloud base and the adiabatic liquid water content is then scaled that its\n"
"integral matches the radiometer measurement so that the liquid water content\n"
"now follows a quasi-adiabatic profile. If the liquid layer is detected by the\n"
"lidar only, there is the potential for cloud top height to be underestimated\n"
"and so if the adiabatic integrated liquid water content is less than that\n"
"measured by the microwave radiometer, the cloud top is extended until the\n"
"adiabatic integrated liquid water content agrees with the value measured by the\n"
"microwave radiometer. Missing values indicate that either\n"
"1) a liquid water layer was diagnosed but no microwave radiometer data was\n"
" available,\n"
"2) a liquid water layer was diagnosed but the microwave radiometer data was\n"
" unreliable; this may be because a melting layer was present in the profile,\n"
" or because the retrieved lwp was unphysical (values of zero are not uncommon\n"
" for thin supercooled liquid layers)\n"
"3) that rain is present in the profile and therefore, the vertical extent of\n"
" liquid layers is difficult to ascertain."
),
"lwc_error": (
"This variable is an estimate of the random error in liquid water content\n"
"due to the uncertainty in the microwave radiometer liquid water path\n"
"retrieval and the uncertainty in cloud base and/or cloud top height."
),
"lwc_retrieval_status": (
"This variable describes whether a retrieval was performed for each pixel, and\n"
"its associated quality, in the form of 6 different classes. The classes are\n"
"defined in the definition attribute. The most reliable retrieval is that when\n"
"both radar and lidar detect the liquid layer, and microwave radiometer data is\n"
"present, indicated by the value 1. The next most reliable is when microwave\n"
"radiometer data is used to adjust the cloud depth when the radar does not\n"
"detect the liquid layer, indicated by the value 2, with a value of 3 indicating\n"
"the cloud pixels that have been added at cloud top to avoid the profile\n"
"becoming superadiabatic. A value of 4 indicates that microwave radiometer data\n"
"were not available or not reliable (melting level present or unphysical values)\n"
"but the liquid layers were well defined. If cloud top was not well defined\n"
"then this is indicated by a value of 5. The full retrieval of liquid water\n"
"content, which requires reliable liquid water path from the microwave\n"
"radiometer, was only performed for classes 1-3. No attempt is made to retrieve\n"
"liquid water content when rain is present; this is indicated by the value 6."
),
}
DEFINITIONS = {
"lwc_retrieval_status": (
"\n"
"Value 0: No liquid water detected.\n"
"Value 1: Reliable retrieval.\n"
"Value 2: Adiabatic retrieval where cloud top has been adjusted to match liquid\n"
" water path from microwave radiometer because layer is not detected by radar.\n"
"Value 3: Adiabatic retrieval: new cloud pixels where cloud top has been\n"
" adjusted to match liquid water path from microwave radiometer because\n"
" layer is not detected by radar.\n"
"Value 4: No retrieval: either no liquid water path is available or liquid water\n"
" path is uncertain.\n"
"Value 5: No retrieval: liquid water layer detected only by the lidar and liquid\n"
" water path is unavailable or uncertain: cloud top may be higher than\n"
" diagnosed cloud top since lidar signal has been attenuated.\n"
"Value 6: Rain present: cloud extent is difficult to ascertain and liquid water\n"
" path also uncertain."
)
}
LWC_ATTRIBUTES = {
"lwc": MetaData(
long_name="Liquid water content", comment=COMMENTS["lwc"], ancillary_variables="lwc_error"
),
"lwc_error": MetaData(
long_name="Random error in liquid water content, one standard deviation",
comment=COMMENTS["lwc_error"],
units="dB",
),
"lwc_retrieval_status": MetaData(
long_name="Liquid water content retrieval status",
comment=COMMENTS["lwc_retrieval_status"],
definition=DEFINITIONS["lwc_retrieval_status"],
units="1",
),
}
|
the-stack_0_26228
|
from .config import Config
from .logger import PosLogger
from .web import Web
class App(object):
APP_INITIAL_MSG = "POS web"
def __init__(self, version, params):
self.version = version
print("{} {}".format(self.APP_INITIAL_MSG, self.version))
self.config = Config(params = params)
self.logger = PosLogger(self.config, version)
self.logger.info(self.APP_INITIAL_MSG + " started")
def run(self, application):
self.application = application
self.application.run(self.config, self.logger)
def finished(self):
self.logger.info(self.APP_INITIAL_MSG + " finished")
|
the-stack_0_26231
|
#!/usr/bin/python3
# Mostly copied from https://picamera.readthedocs.io/en/release-1.13/recipes2.html
# Run this script, then point a web browser at http:<this-ip-address>:8000
# Note: needs simplejpeg to be installed (pip3 install simplejpeg).
from picamera2.picamera2 import *
from picamera2.encoders.jpeg_encoder import *
import io
import logging
import socketserver
from threading import Condition, Thread
from http import server
PAGE = """\
<html>
<head>
<title>picamera2 MJPEG streaming demo</title>
</head>
<body>
<h1>Picamera2 MJPEG Streaming Demo</h1>
<img src="stream.mjpg" width="640" height="480" />
</body>
</html>
"""
class StreamingOutput(io.BufferedIOBase):
def __init__(self):
self.frame = None
self.condition = Condition()
def write(self, buf):
with self.condition:
self.frame = buf
self.condition.notify_all()
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
elif self.path == '/index.html':
content = PAGE.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/stream.mjpg':
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(frame))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning(
'Removed streaming client %s: %s',
self.client_address, str(e))
else:
self.send_error(404)
self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
picam2 = Picamera2()
picam2.start_preview()
picam2.configure(picam2.video_configuration(main={"size": (640, 480)}))
output = StreamingOutput()
picam2.start_recording(JpegEncoder(), output)
try:
address = ('', 8000)
server = StreamingServer(address, StreamingHandler)
server.serve_forever()
finally:
picam2.stop_recording()
|
the-stack_0_26232
|
import asyncio
from gear import Database
async def async_main():
db = Database()
await db.async_init()
await db.just_execute(f"INSERT INTO hello2 (greeting) VALUES ('hello, hello!');")
loop = asyncio.get_event_loop()
loop.run_until_complete(async_main())
|
the-stack_0_26234
|
# coding: utf8
from __future__ import unicode_literals
_exc = {
# Slang
'прив': 'привет',
'дарова': 'привет',
'дак': 'так',
'дык': 'так',
'здарова': 'привет',
'пакедава': 'пока',
'пакедаво': 'пока',
'ща': 'сейчас',
'спс': 'спасибо',
'пжлст': 'пожалуйста',
'плиз': 'пожалуйста',
'ладненько': 'ладно',
'лады': 'ладно',
'лан': 'ладно',
'ясн': 'ясно',
'всм': 'всмысле',
'хош': 'хочешь',
'хаюшки': 'привет',
'оч': 'очень',
'че': 'что',
'чо': 'что',
'шо': 'что'
}
NORM_EXCEPTIONS = {}
for string, norm in _exc.items():
NORM_EXCEPTIONS[string] = norm
NORM_EXCEPTIONS[string.title()] = norm
|
the-stack_0_26236
|
import numpy as np
import os
import tensorflow as tf
import pathlib
import glob
import fnmatch
# this is important to ensure that your script is using the local "utils" folder
os.chdir(os.getcwd())
from PIL import Image
# export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim # Don't forget to run this on the research folder!!!
from utils import ops as utils_ops
from utils import label_map_util
from utils import visualization_utils as vis_util
# patch tf1 into `utils.ops`
utils_ops.tf = tf.compat.v1
# Patch the location of gfile
tf.gfile = tf.io.gfile
# If you don't want to use colab, change the path here!
PATH_TO_LABELS = '/content/tf-models/research/object_detection/label_map/label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
PATH_TO_UNLABELED_IMAGES_DIR = pathlib.Path('/content/tf-models/research/object_detection/unlabeled_data')
UNLABELED_IMAGE_PATHS = sorted(list(PATH_TO_UNLABELED_IMAGES_DIR.glob("*.jpg")))
def load_model(mode_dir):
model_dir = pathlib.Path(mode_dir)
model = tf.compat.v2.saved_model.load(str(model_dir), None)
model = model.signatures['serving_default']
return model
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
output_dict = model(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy()
for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
detection_model = load_model('/content/tf-models/research/fine_tuned_model/saved_model')
def show_inference(model, image_path):
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
file_name = image_path.stem
image_np = np.array(Image.open(image_path))
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
xml_file_name=file_name,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=8)
# image = Image.fromarray(image_np)
# image.show()
for image_path in UNLABELED_IMAGE_PATHS:
show_inference(detection_model, image_path)
def partition_data(test_ratio):
# counts all JPG files in each folder
new_labeled_files_count = fnmatch.filter(os.listdir('/content/tf-models/research/object_detection/labeled_data/'), '*.jpg')
train_images_count = len(fnmatch.filter(os.listdir('/content/tf-models/research/object_detection/train_images'), '*.jpg'))
test_images_count = len(fnmatch.filter(os.listdir('/content/tf-models/research/object_detection/test_images'), '*.jpg'))
if len(new_labeled_files_count) > 0:
# calculate the number of images in test folder (all the rest will go to the train)
test_quantity = int((test_ratio*(
len(new_labeled_files_count) + train_images_count + test_images_count)) / 100)
for image in new_labeled_files_count:
if test_quantity > 0:
new_img_path = '/content/tf-models/research/object_detection/test_images/' + image # set path to test folder
new_xml_path = '/content/tf-models/research/object_detection/test_images/' + image.replace('jpg','xml')
else:
new_img_path = '/content/tf-models/research/object_detection/train_images/' + image # set path to train folder
new_xml_path = '/content/tf-models/research/object_detection/train_images/' + image.replace('jpg','xml')
old_img_path = '/content/tf-models/research/object_detection/labeled_data/' + image
old_xml_path = '/content/tf-models/research/object_detection/labeled_data/' + image.replace('jpg','xml')
os.rename(old_img_path, new_img_path) # move JPG
os.rename(old_xml_path, new_xml_path) # move XML
test_quantity -= 1 # decreases test, when 0 all the files goes to train
print('IMAGES TRANSPORTED')
partition_data(20) # pass a int between 0 and 100
|
the-stack_0_26237
|
import types
from flask import Blueprint
from flask.ext.restful import Api, reqparse
def api_route(self, *args, **kwargs):
def wrapper(cls):
self.add_resource(cls, *args, **kwargs)
return cls
return wrapper
default_per_page = 5
parser = reqparse.RequestParser()
parser.add_argument('per_page', type=int, location='args')
parser.add_argument('page', type=int, location='args')
api_bp = Blueprint('api', __name__, url_prefix='/api')
api = Api(api_bp)
api.route = types.MethodType(api_route, api)
import user, book, comment, log, tag
|
the-stack_0_26238
|
#!/usr/bin/env python
""""
Simple implementation of http://arxiv.org/pdf/1502.04623v2.pdf in TensorFlow
Example Usage:
python draw.py --data_dir=/tmp/draw --read_attn=True --write_attn=True
Author: Eric Jang
"""
import tensorflow as tf
from tensorflow.examples.tutorials import mnist
import numpy as np
import os
import time
import sys
import load_trace
tf.flags.DEFINE_string("data_dir", "", "")
tf.flags.DEFINE_boolean("read_attn", True, "enable attention for reader")
tf.flags.DEFINE_boolean("write_attn",True, "enable attention for writer")
FLAGS = tf.flags.FLAGS
## MODEL PARAMETERS ##
A,B = 100,100 # image width,height
img_size = B*A # the canvas size
enc_size = 256 # number of hidden units / output size in LSTM
dec_size = 256
read_n = 10#12 # read glimpse grid width/height
write_n = 5#12 # write glimpse grid width/height
read_size = 2*read_n*read_n if FLAGS.read_attn else 2*img_size
write_size = write_n*write_n if FLAGS.write_attn else img_size
z_size = 9#10#2 # QSampler output size
T = 5#100 # MNIST generation sequence length
batch_size = 1#00 # training minibatch size
train_iters = 500000
learning_rate = 1e-3 # learning rate for optimizer
eps = 1e-8 # epsilon for numerical stability
## BUILD MODEL ##
DO_SHARE=None # workaround for variable_scope(reuse=True)
x = tf.placeholder(tf.float32,shape=(batch_size,img_size)) # input (batch_size * img_size)
onehot_labels = tf.placeholder(tf.float32, shape=(batch_size, z_size))
lstm_enc = tf.contrib.rnn.LSTMCell(enc_size, state_is_tuple=True) # encoder Op
lstm_dec = tf.contrib.rnn.LSTMCell(dec_size, state_is_tuple=True) # decoder Op
def linear(x,output_dim):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
w=tf.get_variable("w", [x.get_shape()[1], output_dim])
b=tf.get_variable("b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x,w)+b
def filterbank(gx, gy, sigma2,delta, N):
grid_i = tf.reshape(tf.cast(tf.range(N), tf.float32), [1, -1])
mu_x = gx + (grid_i - N / 2 - 0.5) * delta # eq 19
mu_y = gy + (grid_i - N / 2 - 0.5) * delta # eq 20
a = tf.reshape(tf.cast(tf.range(A), tf.float32), [1, 1, -1])
b = tf.reshape(tf.cast(tf.range(B), tf.float32), [1, 1, -1])
mu_x = tf.reshape(mu_x, [-1, N, 1])
mu_y = tf.reshape(mu_y, [-1, N, 1])
sigma2 = tf.reshape(sigma2, [-1, 1, 1])
Fx = tf.exp(-tf.square((a - mu_x) / (2*sigma2))) # 2*sigma2?
Fy = tf.exp(-tf.square((b - mu_y) / (2*sigma2))) # batch x N x B
# normalize, sum over A and B dims
Fx=Fx/tf.maximum(tf.reduce_sum(Fx,2,keep_dims=True),eps)
Fy=Fy/tf.maximum(tf.reduce_sum(Fy,2,keep_dims=True),eps)
return Fx,Fy
def attn_window(scope,h_dec,N):
with tf.variable_scope(scope,reuse=DO_SHARE):
params=linear(h_dec,5)
gx_,gy_,log_sigma2,log_delta,log_gamma=tf.split(params,5,1)
gx1=(A+1)/2*(gx_+1)
gy1=(B+1)/2*(gy_+1)
gx = gx1
gy = gy1
# gx = tf.where(tf.less(gx1, tf.zeros_like(gx1) + A), gx1, tf.zeros_like(gx1) + A)
# gx = tf.where(tf.greater(gx1, tf.zeros_like(gx1)), gx1, tf.zeros_like(gx1))
# gy = tf.where(tf.less(gy1, tf.zeros_like(gy1) + B), gy1, tf.zeros_like(gy1) + B)
# gy = tf.where(tf.greater(gy1, tf.zeros_like(gy1)), gy1, tf.zeros_like(gy1))
sigma2=tf.exp(log_sigma2)
d = (max(A,B)-1)/(N-1)*tf.exp(log_delta) # batch x N
delta = d
# delta = tf.where(tf.less(d, tf.zeros_like(d) + A / read_n), d, tf.zeros_like(d) + A / read_n)
Fx, Fy = filterbank(gx,gy,sigma2,delta,N)
gamma = tf.exp(log_gamma)
return Fx, Fy, gamma, gx, gy, delta
## READ ##
def read(x,h_dec_prev):
Fx,Fy,gamma, gx, gy, delta=attn_window("read",h_dec_prev,read_n)
stats = Fx, Fy, gamma
new_stats = gx, gy, delta
def filter_img(img,Fx,Fy,gamma,N):
Fxt=tf.transpose(Fx,perm=[0,2,1])
img=tf.reshape(img,[-1,B,A])
glimpse=tf.matmul(Fy,tf.matmul(img,Fxt))
glimpse=tf.reshape(glimpse,[-1,N*N])
return glimpse*tf.reshape(gamma,[-1,1])
x=filter_img(x,Fx,Fy,gamma,read_n) # batch x (read_n*read_n)
return x, new_stats
## ENCODE ##
def encode(input, state):
"""
run LSTM
state = previous encoder state
input = cat(read,h_dec_prev)
returns: (output, new_state)
"""
with tf.variable_scope("encoder/LSTMCell",reuse=DO_SHARE):
return lstm_enc(input,state)
## DECODER ##
def decode(input, state):
with tf.variable_scope("decoder/LSTMCell",reuse=DO_SHARE):
return lstm_dec(input, state)
## STATE VARIABLES ##
# initial states
h_dec_prev=tf.zeros((batch_size,dec_size))
enc_state=lstm_enc.zero_state(batch_size, tf.float32)
dec_state=lstm_dec.zero_state(batch_size, tf.float32)
## DRAW MODEL ##
viz_data = list()
pqs = list()
# construct the unrolled computational graph
for t in range(T):
r, stats = read(x, h_dec_prev)
h_enc, enc_state = encode(tf.concat([r, h_dec_prev], 1), enc_state)
with tf.variable_scope("z",reuse=DO_SHARE):
z = linear(h_enc, z_size)
h_dec, dec_state = decode(z, dec_state)
h_dec_prev = h_dec
with tf.variable_scope("hidden1",reuse=DO_SHARE):
hidden = tf.nn.relu(linear(h_dec_prev, 256))
with tf.variable_scope("output",reuse=DO_SHARE):
classification = tf.nn.softmax(linear(hidden, z_size))
viz_data.append({
"classification": classification,
"r": r,
"h_dec": h_dec,
"stats": stats,
})
DO_SHARE=True # from now on, share variables
pq = tf.log(classification + 1e-5) * onehot_labels
pq = tf.reduce_mean(pq, 0)
pqs.append(pq)
predquality = tf.reduce_mean(pqs)
correct = tf.arg_max(onehot_labels, 1)
prediction = tf.arg_max(classification, 1)
R = tf.cast(tf.equal(correct, prediction), tf.float32)
reward = tf.reduce_mean(R)
## LOSS FUNCTION ##
def binary_crossentropy(t,o):
return -(t*tf.log(o+eps) + (1.0-t)*tf.log(1.0-o+eps))
def evaluate():
data = load_trace.TraceData()
data.get_test(1)
batches_in_epoch = len(data.images) // batch_size
accuracy = 0
for i in range(batches_in_epoch):
nextX, nextY = data.next_batch(batch_size)
feed_dict = {x: nextX, onehot_labels:nextY}
r = sess.run(reward, feed_dict=feed_dict)
accuracy += r
accuracy /= batches_in_epoch
print("ACCURACY: " + str(accuracy))
return accuracy
predcost = -predquality
## OPTIMIZER ##
optimizer=tf.train.AdamOptimizer(learning_rate, epsilon=1)
grads=optimizer.compute_gradients(predcost)
for i,(g,v) in enumerate(grads):
if g is not None:
grads[i]=(tf.clip_by_norm(g,5),v) # clip gradients
train_op=optimizer.apply_gradients(grads)
## RUN TRAINING ##
#data_directory = os.path.join(FLAGS.data_dir, "mnist")
#if not os.path.exists(data_directory):
# os.makedirs(data_directory)
#train_data = mnist.input_data.read_data_sets(data_directory, one_hot=True).train # binarized (0-1) mnist data
train_data = load_trace.TraceData()
train_data.get_train()
fetches=[]
fetches.extend([reward, train_op])
if __name__ == '__main__':
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess=tf.InteractiveSession()
saver = tf.train.Saver() # saves variables learned during training
tf.global_variables_initializer().run()
## CHANGE THE MODEL SETTINGS HERE #########################
model_directory = "model_runs/blob_classification_5_5_0_9"
if not os.path.exists(model_directory):
os.makedirs(model_directory)
start_ckpt = 0
#saver.restore(sess, model_directory + "/drawmodel.ckpt") # to restore from model, uncomment this line
#saver.restore(sess, model_directory + "/drawmodel_" + str(start_ckpt) + ".ckpt") # to restore from model, uncomment this line, may need to change filename!!!
start_time = time.clock()
extra_time = 0
for i in range(start_ckpt, train_iters):
xtrain, ytrain = train_data.next_batch(batch_size) # xtrain is (batch_size x img_size)
feed_dict={x:xtrain, onehot_labels: ytrain}
results=sess.run(fetches,feed_dict)
reward_fetched, _ = results
if i%100 == 0:
print("iter=%d : Reward: %f" % (i, reward_fetched))
sys.stdout.flush()
if i%1000==0:
train_data = load_trace.TraceData()
train_data.get_train()
if i %10000==0:
## SAVE TRAINING CHECKPOINT ##
start_evaluate = time.clock()
test_accuracy = evaluate()
saver = tf.train.Saver(tf.global_variables())
extra_time = extra_time + time.clock() - start_evaluate
print("--- %s CPU seconds ---" % (time.clock() - start_time - extra_time))
ckpt_file=os.path.join(FLAGS.data_dir, model_directory + "/drawmodel_" + str(i) + ".ckpt")
print("Model saved in file: %s" % saver.save(sess,ckpt_file))
sess.close()
|
the-stack_0_26240
|
import torch
import torch.nn as nn
from graph_model.dynamic_gnn_with_mtgat_prune import DynamicMTGATPruneModel
from consts import GlobalConsts as gc
class NetMTGATAverageUnalignedConcatMHA(nn.Module):
def __init__(self, num_gat_layers, use_transformer=False, use_prune=False, use_pe=False):
super(NetMTGATAverageUnalignedConcatMHA, self).__init__()
if use_transformer:
raise NotImplementedError
# if not use_prune:
# self.dgnn = DynamicGNNModelWithTransformerPadding(gc.config, concat=True, num_gat_layers=num_gat_layers)
# else:
# self.dgnn = DynamicGNNModelWithTransformerPaddingPrune(gc.config, concat=True,
# num_gat_layers=num_gat_layers)
else:
if not use_prune:
raise NotImplementedError('Only pruned version is implemented now.')
else:
self.dgnn = DynamicMTGATPruneModel(gc.config, concat=True, num_gat_layers=num_gat_layers, use_pe=use_pe)
label_dim = 1
if gc.dataset == "mosei":
label_dim = 7
elif gc.dataset in ['iemocap', 'iemocap_unaligned']:
label_dim = 8 # 2 x 4category
self.finalW = nn.Sequential(
nn.Linear(gc.config['graph_conv_out_dim'], gc.config['graph_conv_out_dim'] // 4),
nn.ReLU(),
# nn.Linear(gc.config['graph_conv_out_dim'] // 4, label_dim),
nn.Linear(gc.config['graph_conv_out_dim'] // 4, gc.config['graph_conv_out_dim'] // 4),
nn.ReLU(),
nn.Linear(gc.config['graph_conv_out_dim'] // 4, label_dim),
)
def forward(self, **kwargs):
state = self.dgnn(**kwargs)
state = torch.stack([torch.mean(state_i, dim=0) for state_i in state], 0)
return self.finalW(state).squeeze()
def inference_return_layer_outputs(self, **kwargs):
state, batch, nodes_rec, edge_indices_rec, edge_weights_rec, edge_types_rec = self.dgnn(**kwargs)
state = torch.stack([torch.mean(state_i, dim=0) for state_i in state], 0)
return self.finalW(state).squeeze(), batch, nodes_rec, edge_indices_rec, edge_weights_rec, edge_types_rec
|
the-stack_0_26241
|
"""Tests for httplib2 when the socket module is missing.
This helps ensure compatibility with environments such as AppEngine.
"""
import os
import sys
import unittest
import httplib2
class MissingSocketTest(unittest.TestCase):
def setUp(self):
self._oldsocks = httplib2.socks
httplib2.socks = None
def tearDown(self):
httplib2.socks = self._oldsocks
def testProxyDisabled(self):
proxy_info = httplib2.ProxyInfo('blah',
'localhost', 0)
client = httplib2.Http(proxy_info=proxy_info)
self.assertRaises(httplib2.ProxiesUnavailableError,
client.request, 'http://localhost:-1/')
|
the-stack_0_26242
|
import asyncio
class EchoServerClientProtocol(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('Connection from {}'.format(peername))
self.transport = transport
def data_received(self, data):
message = data.decode()
print('Data received: {!r}'.format(message))
reply = input()
print('Send: {!r}'.format(reply))
self.transport.write(reply.encode())
#print('Close the client socket')
#self.transport.close()
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(EchoServerClientProtocol, '127.0.0.1', 8888)
server = loop.run_until_complete(coro)
# Serve requests until CTRL+c is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
the-stack_0_26244
|
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-16 14:39:58
# @Last Modified by: YangZhou
# @Last Modified time: 2018-01-11 15:21:38
import aces.tools as tl
from aces.graph import fig, setLegend, pl
import numpy as np
from aces.f import binmeanx
from aces.io.shengbte import get_w_final, get_qpoints, get_omega, get_tau, get_v
text_style = dict(
horizontalalignment='left',
verticalalignment='center',
fontsize=12,
fontdict={'family': 'serif'})
vs = '2l1,2lh,3l1,4l1,5l1,6l1'.split(',')
with fig('gv.eps'):
fi, axes = pl.subplots(2, 3, sharex=True, sharey=True, figsize=(8, 5))
for i, v in enumerate(vs):
print(v)
ax = axes[i // 3, i % 3]
if v == "2lh":
v = "2lhex"
if v == "2lr":
v = "2lrt3"
dir = "%s/0/SHENG" % v
# file = v + "/0/secondorder/groupv/mesh.yaml"
# data = tl.parseyaml(file)
# freqs = []
# gvs = []
# for phonon in data['phonon']:
# qp = phonon['q-position']
# for band in phonon['band']:
# frequency = band['frequency']
# gv = np.array(band['group_velocity'])
# freqs.append(frequency)
# gvs.append(gv)
omega = get_omega(dir)
cv = get_v(dir)
freqs = omega.flatten()
gvs = np.c_[cv[:, :, 0].flatten(), cv[:, :, 1].flatten()]
freqs = np.array(freqs)
gvs = np.array(gvs)
gvs = np.abs(gvs)
N = 40
df = 20.0 / N
v1 = v
if v1 == '2lhex':
v1 = '2l3'
x, y = binmeanx(np.c_[freqs, gvs[:, 0]], [0, 20], df)
ax.plot(x, y, color="k", lw=3, label=v1 + "z")
x, y = binmeanx(np.c_[freqs, gvs[:, 1]], [0, 20], df)
ax.plot(
x,
y,
color="r",
ls="--",
lw=3,
label=(
v1 +
"a").replace(
'l',
'L'))
# ax.text(.02,.8,"("+v+")",transform=ax.transAxes,**text_style)
setLegend(ax, ncol=1, fontsize=12)
# ax.set_yticks([])
ax.set_xlim([0.05, 19.9])
ax.set_ylim([0.05, None])
fi.text(0.5, 0.00, 'Phonon Frequency (THz)', ha='center')
fi.text(
0.07,
0.5,
'Phonon Group Velocity (A/ps)',
va='center',
rotation='vertical')
fi.subplots_adjust(
left=None, bottom=None, right=None, top=None, wspace=0, hspace=0)
|
the-stack_0_26245
|
#/usr/bin/python
import sys
def mixupstep(conffile,savepath):
conf=open(conffile);
for line in conf.readlines():
save=open('%s/mixupstep%d.hed'%(savepath,int(line)),'w');
# if int(line)==1:
# save.write('MU %d {w.state[2].mix}\n'%(int(line)*2));
# else :
# save.write('MU %d {*.state[2].mix}\n'%(int(line)));
# save.write('MU %d {w.state[2].mix}\n'%(int(line)*2));
save.write('MU %d {*.state[2].mix}\n'%(int(line)*2));
save.close();
conf.close();
if __name__=='__main__':
mixupstep(sys.argv[1],sys.argv[2]);
|
the-stack_0_26247
|
# -*- coding:utf-8 -*-
import numpy as np
import collections
import math
import os
import random
import tensorflow as tf
import zipfile
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
session = tf.Session(config = config)
def read_data(filename):
with zipfile.ZipFile(filename) as f:
fread = f.namelist()[0]
content = f.read(fread)
data = tf.compat.as_str(content).split()
return data
filename = "text8.zip"
words = read_data(filename)
print("Data Size: %d" % len(words))
vocabulary_size = 30000
def build_dataset(words):
count = [["UNK", -1]]
wordscounts = collections.Counter(words)
words_common = wordscounts.most_common(vocabulary_size - 1)
count.extend(words_common)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
print("Most common words (including UNK):", count[:10])
print("Sample data:", data[:10])
del words
data_index = 0
def generate_batch(batch_size, skip_window):
global data_index
span = 2 * skip_window + 1
batch = np.ndarray(shape = (batch_size, span - 1), dtype = np.int32)
labels = np.ndarray(shape = (batch_size, 1), dtype = np.int32)
buffer = collections.deque(maxlen = span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size):
target = skip_window
targets_to_avoid = [skip_window]
col_idx = 0
for j in range(span):
if j == span // 2:
continue
batch[i, col_idx] = buffer[j]
col_idx += 1
labels[i, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
assert batch.shape[0] == batch_size and batch.shape[1] == span - 1
return batch, labels
num_steps = 100001
if __name__ == '__main__':
batch_size = 128
embedding_size = 128
skip_window = 1
valid_size = 16
valid_window = 100
valid_examples = np.array(random.sample(range(valid_window), valid_size // 2))
valid_examples = np.append(valid_examples,
random.sample(range(1000, 1000 + valid_window), valid_size // 2))
num_sampled = 64
graph = tf.Graph()
with graph.as_default():
train_dataset = tf.placeholder(tf.int32, shape = [batch_size, 2 * skip_window])
train_labels = tf.placeholder(tf.int32, shape = [batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype = tf.int32)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev = 1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embeds = None
for i in range(2 * skip_window):
embedding_i = tf.nn.embedding_lookup(embeddings, train_dataset[:, i])
print("embedding %d shape: %s" % (i, embedding_i.get_shape().as_list()))
emb_x, emb_y = embedding_i.get_shape().as_list()
if embeds is None:
embeds = tf.reshape(embedding_i, [emb_x, emb_y, 1])
else:
embeds = tf.concat([embeds, tf.reshape(embedding_i, [emb_x, emb_y, 1])], axis = 2)
# assert embeds.get_shape().as_list()[2] == 2 * skip_window
print("Concat embedding size: %s" % embeds.get_shape().as_list())
avg_embed = tf.reduce_mean(embeds, 2, keep_dims = False)
print("Average embedding size: %s" % avg_embed.get_shape().as_list())
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(
weights = softmax_weights,
biases = softmax_biases,
inputs = avg_embed,
labels = train_labels,
num_sampled = num_sampled,
num_classes = vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims = True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
with tf.Session(graph = graph) as session:
tf.global_variables_initializer().run()
print("Initialized!")
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = generate_batch(
batch_size = batch_size,
skip_window = skip_window)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, lo = session.run([optimizer, loss], feed_dict = feed_dict)
average_loss += lo
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
print("Average loss at step %d: %f " % (step, average_loss))
average_loss = 0
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8
nearest = (-sim[i, :]).argsort()[1: top_k + 1]
log = "Nearest to %s: " % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = log + " " + close_word + ","
print(log)
final_embeddings = normalized_embeddings.eval()
|
the-stack_0_26248
|
import os
import dj_database_url
from .base import *
DEBUG = False
USE_X_FORWARDED_HOST = True
ALLOWED_HOSTS = ['*'] # proxied
#FORCE_SCRIPT_NAME = '/tock'
STATIC_ROOT = '/app/tock/tock/static/'
STATIC_URL = '/tock/static/'
DATABASES = {}
DATABASES['default'] = dj_database_url.config()
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
try:
from .local_settings import *
except ImportError:
pass
|
the-stack_0_26249
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from Commands.PythonCommandBase import PythonCommand, ImageProcPythonCommand
from Commands.Keys import KeyPress, Button, Direction, Stick
class InfinityFeather(PythonCommand):
NAME = '無限羽回収'
def __init__(self):
super().__init__()
def do(self):
# 時間等確認用。使用する際は "import time" すること
# start = time.time()
# i = 0 # カウンタ
print('Start collecting feathers')
while True:
self.wait(0.75)
# i += 1
# print('Map')
self.press(Button.X, wait=1.5) # open up a map
self.press(Button.A, wait=3.0)
self.press(Direction(Stick.LEFT, 45), duration=0.05) # Select a Pokémon Day Care
self.press(Button.A, wait=1)
self.press(Button.A, wait=4.0)
# print('pick feather')
self.press(Direction.DOWN_RIGHT, duration=0.15)
self.press(Direction.RIGHT, duration=3)
self.press(Button.A, wait=0.3)
self.press(Button.A, wait=0.3)
self.press(Button.A, wait=0.3)
self.press(Button.A, wait=0.3)
# print('Time leap')
self.timeLeap()
# tm = round(time.time() - start, 2)
# print('Loop : {} in {} sec. Average: {} sec/loop'.format(i, tm, round(tm / i, 2)))
|
the-stack_0_26250
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(5)
n_inputs = 28
n_neurons = 150
n_layers = 3
n_steps = 28
n_outputs = 10
learning_rate = 0.001
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)])
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax')
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(x_entropy, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
states
top_layer_h_state
n_epochs = 25
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
|
the-stack_0_26251
|
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import misc as misc_nn_ops
from allennlp.nn import Activation
class StdConv(nn.Module):
def __init__(self,
nin: int,
nout: int,
kernel_size: int = 3,
activation: Activation = nn.ReLU(),
stride: int = 1,
padding: int = 1,
dilation: int = 1,
dropout: float = 0.1):
super(StdConv, self).__init__()
self.conv = misc_nn_ops.Conv2d(nin, nout, kernel_size, stride=stride, padding=padding, dilation=dilation)
self.bn = misc_nn_ops.BatchNorm2d(nout)
self.drop = nn.Dropout(dropout)
self.activation = activation
def forward(self, x):
return self.drop(self.bn(self.activation(self.conv(x))))
class Upsample(nn.Module):
def __init__(self, in_channels: int, out_channels: int, num_stages: int, scale_factor: int = 2):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.convs = [StdConv(in_channels, out_channels, padding=1)] + \
[StdConv(out_channels, out_channels, padding=1) for _ in range(num_stages - 1)]
self.convs = nn.ModuleList(self.convs)
self.num_stages = num_stages
def forward(self, inputs: torch.Tensor):
out = inputs
for conv in self.convs:
out = conv.forward(out)
if self.num_stages != 0:
out = F.interpolate(out, scale_factor=self.scale_factor)
return out
class RPNHead(nn.Module):
def __init__(self, in_channels, num_anchors):
super(RPNHead, self).__init__()
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
def forward(self,
features: List[torch.Tensor]) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
objectness: List[torch.Tensor] = []
rpn_box_regression: List[torch.Tensor] = []
for feature in features:
t = F.relu(self.conv(feature))
objectness.append(self.cls_logits(t))
rpn_box_regression.append(self.bbox_pred(t))
return objectness, rpn_box_regression
|
the-stack_0_26254
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the 'License'). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $
def empty():
return {}
def ok_deployment_stack_empty(*args):
return {
'StackStatus': 'CREATE_COMPLETE',
'StackResources': {
'EmptyDeployment': {
'ResourceType': 'Custom::EmptyDeployment'
}
}
}
def ok_project_stack(permissions = {}):
result = {
'StackStatus': 'UPDATE_COMPLETE',
'StackResources': {
'AccessControl': {
'ResourceType': 'Custom::AccessControl'
},
'CloudGemPortal': {
'ResourceType': 'AWS::S3::Bucket'
},
'CloudGemPortalAdministratorRole': {
'ResourceType': 'AWS::IAM::Role'
},
'CloudGemPortalBucketPolicy': {
'ResourceType': 'AWS::S3::BucketPolicy'
},
'CloudGemPortalUserAccess': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
},
'CloudGemPortalUserRole': {
'ResourceType': 'AWS::IAM::Role'
},
'Configuration': {
'ResourceType': 'AWS::S3::Bucket'
},
'ConfigurationBucketPolicy': {
'ResourceType': 'AWS::S3::BucketPolicy'
},
'CoreResourceTypes': {
'ResourceType': 'Custom::ResourceTypes'
},
'Logs': {
'ResourceType': 'AWS::S3::Bucket'
},
'Helper': {
'ResourceType': 'Custom::Helper'
},
'PlayerAccessTokenExchange': {
'ResourceType': 'AWS::Lambda::Function'
},
'PlayerAccessTokenExchangeExecution': {
'ResourceType': 'AWS::IAM::Role'
},
'ProjectAccess': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
},
'ProjectAdmin': {
'ResourceType': 'AWS::IAM::Role'
},
'ProjectAdminRestrictions': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
},
'ProjectOwner': {
'ResourceType': 'AWS::IAM::Role'
},
'ProjectOwnerAccess': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
},
'ProjectIdentityPool': {
'ResourceType': 'Custom::CognitoIdentityPool'
},
'ProjectIdentityPoolAuthenticatedRole': {
'ResourceType': 'AWS::IAM::Role'
},
'ProjectIdentityPoolUnauthenticatedRole': {
'ResourceType': 'AWS::IAM::Role'
},
'ProjectResourceHandler': {
'ResourceType': 'AWS::Lambda::Function'
},
'ProjectResourceHandlerExecution': {
'ResourceType': 'AWS::IAM::Role'
},
'ProjectUserPool': {
'ResourceType': 'Custom::CognitoUserPool'
},
'ServiceApi': {
'ResourceType': 'Custom::ServiceApi'
},
'ServiceLambda': {
'ResourceType': 'AWS::Lambda::Function'
},
'ServiceLambdaConfiguration': {
'ResourceType': 'Custom::LambdaConfiguration'
},
'ServiceLambdaExecution': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
}
}
}
for k,v in permissions.iteritems():
result[k]['Permissions'] = v
return result
def ok_deployment_access_stack(permissions = {}):
result = {
'StackStatus': 'CREATE_COMPLETE',
'StackResources': {
'Player': {
'ResourceType': 'AWS::IAM::Role'
},
'Server': {
'ResourceType': 'AWS::IAM::Role'
},
'AccessControl': {
'ResourceType': 'Custom::AccessControl'
},
'PlayerAccessIdentityPool': {
'ResourceType': 'Custom::CognitoIdentityPool'
},
'PlayerLoginIdentityPool': {
'ResourceType': 'Custom::CognitoIdentityPool'
},
'PlayerLoginRole': {
'ResourceType': 'AWS::IAM::Role'
},
'DeploymentAdmin': {
'ResourceType': 'AWS::IAM::Role'
},
'DeploymentOwner': {
'ResourceType': 'AWS::IAM::Role'
},
'DeploymentAccess': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
},
'DeploymentOwnerAccess': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
},
'DeploymentAdminRestrictions': {
'ResourceType': 'AWS::IAM::ManagedPolicy'
},
'Helper': {
'ResourceType': 'Custom::Helper'
}
}
}
for k,v in permissions.iteritems():
result[k]['Permissions'] = v
return result
|
the-stack_0_26255
|
from django.test import TestCase
from gw_cwfollowup.jwt_tools import jwt_get_user_by_payload
class TestJWTTools(TestCase):
def test_jwt_get_user_by_payload(self):
"""
Check that jwt_get_user_by_payload works as expected
"""
# Create a test payload
payload = {
'username': 'billy',
'userId': 43
}
# Get the user from the payload and verify that the returned GWCloudUser object is valid
user = jwt_get_user_by_payload(payload)
# Assert that the fields of the gwcloud user are accurate
self.assertEqual(user.username, payload['username'])
self.assertEqual(user.user_id, payload['userId'])
# The user object should indicate that the user is active
self.assertEqual(user.is_active, True)
# The user should also be authenticated by default
self.assertEqual(user.is_authenticated, True)
# The user should not be anonymous
self.assertEqual(user.is_anonymous, False)
|
the-stack_0_26256
|
#!/usr/bin/env python3.6
import sys
import os
import unittest
from pathlib import Path
sys.path.append(os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, os.pardir)))
sys.path.append(os.path.realpath(os.path.join(__file__, os.pardir, os.pardir)))
sys.path.append(os.path.realpath(os.path.join(__file__, os.pardir)))
import utils # do not remove, prevents cyclic import problems
import aYaml
from configVar import config_vars
from configVar import ConfigVarYamlReader
def normalize_yaml_lines(yaml_file):
retVal = list()
with open(yaml_file, "r") as rfd:
for line in rfd.readlines():
striped_line = line.strip()
if striped_line:
retVal.append(striped_line)
return retVal
class TestConfigVar(unittest.TestCase):
def setUp(self):
config_vars.clear()
def tearDown(self):
pass
def test_list_issues(self):
name_1 = ["Lili", "Marlen"]
name_2 = ["Lili", "Allen"]
config_vars["ALL_NAMES"] = "$(NAME_ONE)", "$(NAME_TWO)", "$(NAME_THREE)"
config_vars["NAME_ONE"] = name_1
config_vars["NAME_TWO"] = "shraga"
config_vars["NAME_THREE"] = name_2
# list() on the to configVar should return the lists of resolved values if a value is
# and only is a configVar reference $(...)
all_names = list(config_vars["ALL_NAMES"])
self.assertListEqual(name_1+["shraga"]+name_2, all_names)
def test_format(self):
config_vars["Number"] = "434"
config_vars["Ricki"] = ["Joe", "Merlin", "1938"]
str1 = f"""{config_vars["Ricki"]}{config_vars["Number"].int()}{config_vars["Ricki"].list()}"""
self.assertEqual("JoeMerlin1938434['Joe', 'Merlin', '1938']", str1)
def test_defaults(self):
empty_list = config_vars.get("MAMBO_JUMBO", []).list()
self.assertEqual([], empty_list)
full_list = config_vars.get("MAMBO_JUMBO", ["mambo", "jumbo"]).list()
self.assertEqual(["mambo", "jumbo"], full_list)
empty_str = config_vars.get("MAMBO_JUMBO", "").str()
self.assertEqual("", empty_str)
full_str = config_vars.get("MAMBO_JUMBO", "mambo jumbo").str()
self.assertEqual("mambo jumbo", full_str)
def test_bool(self):
# non exiting ConfigVar should resolve to False
self.assertFalse(config_vars.get("BEN_SHAPIRO"))
def test_var_in_var_simple(self):
config_vars["A"] = "$(B)"
config_vars["B"] = "$(C)"
config_vars["C"] = "ali baba"
self.assertEqual("ali baba", config_vars["A"].str())
self.assertEqual("ali baba", config_vars.resolve_str("$(A)"))
def test_array(self):
config_vars["PUSHKIN"] ="1", "2", "3"
self.assertEqual("123", config_vars["PUSHKIN"].str())
self.assertEqual("123", config_vars.resolve_str("$(PUSHKIN)"))
self.assertEqual("1", config_vars.resolve_str("$(PUSHKIN[0])"))
self.assertEqual("2", config_vars.resolve_str("$(PUSHKIN[1])"))
self.assertEqual("3", config_vars.resolve_str("$(PUSHKIN[2])"))
self.assertEqual("321", config_vars.resolve_str("$(PUSHKIN[2])$(PUSHKIN[1])$(PUSHKIN[0])"))
def test_readFile(self):
input_file_path = Path(__file__).parent.joinpath("test_input.yaml")
out_file_path = Path(__file__).parent.joinpath("test_out.yaml")
expected_file_path = Path(__file__).parent.joinpath("expected_output.yaml")
reader = ConfigVarYamlReader()
reader.read_yaml_file(input_file_path)
variables_as_yaml = config_vars.repr_for_yaml()
yaml_doc = aYaml.YamlDumpDocWrap(variables_as_yaml, '!define', "",
explicit_start=True, sort_mappings=True)
with open(out_file_path, "w") as wfd:
aYaml.writeAsYaml(yaml_doc, wfd)
out_lines = normalize_yaml_lines(out_file_path)
expected_lines = normalize_yaml_lines(expected_file_path)
self.assertEqual(out_lines, expected_lines)
def test_resolve_time(self):
config_vars["PRINT_STATISTICS"] = "True"
config_vars["MANDOLIN"] = "a$(A)b$(B)c$(C)d", "a$(A)b$(B)c$(C)d", "a$(A)b$(B)c$(C)d"
config_vars["A"] = "$(B)$(B<>)$(C)"
config_vars["B"] = "$(C)$(C<>)$(H)"
config_vars["C"] = "bub"
for i in range(10000):
a = config_vars["MANDOLIN"].str()
config_vars.print_statistics()
print(str(config_vars["MANDOLIN"]))
def test_Plist_for_native_instruments(self):
config_vars["Plist_for_native_instruments"] = r'''ShellCommand('"$(LOCAL_REPO_SYNC_DIR)/Mac/Utilities/plist/plist_creator.sh" $(__Plist_for_native_instruments_1__) $(__Plist_for_native_instruments_2__)', ignore_all_errors=True)'''
o = config_vars.resolve_str('$(Plist_for_native_instruments<"Aphex Vintage Exciter", "/Applications/Waves/Data/NKS FX/">)')
print(o)
|
the-stack_0_26259
|
import csv
import datetime
import h5py
import mir_eval
import numpy as np
import os
import pandas as pd
import peakutils
import sklearn.metrics
import sys
import time
import localmodule
# Read command-line arguments.
args = sys.argv[1:]
aug_kind_str = args[0]
test_unit_str = args[1]
predict_unit_str = args[2]
trial_id = int(args[3])
# Define constants.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
folds = localmodule.fold_units()
models_dir = localmodule.get_models_dir()
units = localmodule.get_units()
model_name = "icassp-add-convnet"
if not aug_kind_str == "none":
model_name = "_".join([model_name, "aug-" + aug_kind_str])
model_dir = os.path.join(models_dir, model_name)
tolerance = 0.5 # in seconds
min_dist = 3 # 150 ms
# Define thresholds.
icassp_thresholds = 1.0 - np.concatenate((
np.logspace(-9, -2, 141), np.delete(np.logspace(-2, 0, 81), 0)
))
n_thresholds = len(icassp_thresholds)
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Thresholding adaptive threshold convnet for detection in " +
dataset_name + " full audio. ")
print("Augmentation kind: " + aug_kind_str)
print("Test unit: " + test_unit_str)
print("Trial ID: {}".format(trial_id))
print("Prediction unit: " + predict_unit_str)
print("")
print('h5py version: {:s}'.format(h5py.__version__))
print('numpy version: {:s}'.format(np.__version__))
print('pandas version: {:s}'.format(pd.__version__))
print('scikit-learn version: {:s}'.format(sklearn.__version__))
print("")
# Define directory for test unit.
unit_dir = os.path.join(model_dir, test_unit_str)
# Define directory for trial.
trial_str = "trial-" + str(trial_id)
trial_dir = os.path.join(unit_dir, trial_str)
# Load ODF.
prediction_name = "_".join([
dataset_name,
model_name,
"test-" + test_unit_str,
trial_str,
"predict-" + predict_unit_str,
"full-predictions.csv"])
prediction_path = os.path.join(trial_dir, prediction_name)
prediction_df = pd.read_csv(prediction_path)
odf = np.array(prediction_df["Predicted probability"])
timestamps = np.array(prediction_df["Timestamp"])
# Load annotation.
annotations_name = "_".join([dataset_name, "annotations"])
annotations_dir = os.path.join(data_dir, annotations_name)
annotation_path = os.path.join(annotations_dir, predict_unit_str + ".txt")
annotation = pd.read_csv(annotation_path, "\t")
begin_times = np.array(annotation["Begin Time (s)"])
end_times = np.array(annotation["End Time (s)"])
relevant = 0.5 * (begin_times + end_times)
relevant = np.sort(relevant)
n_relevant = len(relevant)
# Create CSV file for metrics.
metrics_name = "_".join([
dataset_name,
model_name,
"test-" + test_unit_str,
trial_str,
"predict-" + predict_unit_str,
"full-audio-metrics",
])
metrics_path = os.path.join(trial_dir, metrics_name + ".csv")
# Write CSV header.
csv_header = [
"Dataset",
"Augmentation kind",
"Test unit",
"Trial",
"Prediction unit",
"Tolerance",
"Threshold",
"Relevant",
"Selected",
"True positives",
"False positives",
"False negatives",
"Precision (%)",
"Recall (%)",
"F1 Score (%)"]
# Write row.
csv_file = open(metrics_path, 'w')
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(csv_header)
csv_file.close()
# Loop over thresholds.
for threshold_id in range(n_thresholds):
threshold = icassp_thresholds[threshold_id]
# Pick peaks.
peak_locations = peakutils.indexes(
odf, thres=threshold, min_dist=min_dist)
peak_times = timestamps[peak_locations]
peak_values = odf[peak_locations]
selected = peak_times[peak_values > threshold]
# Match events.
selected_relevant = mir_eval.util.match_events(
relevant, selected, tolerance)
# Count TP, FP, and FN.
true_positives = len(selected_relevant)
n_selected = len(selected)
false_positives = n_selected - true_positives
false_negatives = n_relevant - true_positives
# Compute precision, recall, and F1 score.
if n_selected == 0 or true_positives == 0:
precision = 0.0
recall = 0.0
f1_score = 0.0
else:
precision = 100 * true_positives / n_selected
recall = 100 * true_positives / n_relevant
f1_score = 2*precision*recall / (precision+recall)
# Write row.
row = [
dataset_name,
aug_kind_str,
test_unit_str,
str(trial_id),
predict_unit_str,
str(int(np.round(1000*tolerance))).rjust(4),
format(threshold, ".10f"),
str(n_relevant).rjust(5),
str(n_selected).rjust(6),
str(true_positives).rjust(5),
str(false_positives).rjust(6),
str(false_negatives).rjust(5),
format(precision, ".6f").rjust(10),
format(recall, ".6f").rjust(10),
format(f1_score, ".6f").rjust(10)
]
# Write row.
csv_file = open(metrics_path, 'a')
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(row)
csv_file.close()
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
|
the-stack_0_26263
|
#!/usr/bin/env python
import os
import glob
import shutil
from conf import bib_dir, template_dir, html_dir, static_dir, pdf_dir
from options import get_config, mkdir_p
from build_template import bib_from_tmpl, html_from_tmpl, from_template
config = get_config()
mkdir_p(bib_dir)
for file in glob.glob(os.path.join(static_dir,'*.css')):
shutil.copy(file, html_dir)
html_pdfs = os.path.join(html_dir, 'pdfs')
mkdir_p(html_pdfs)
for file in glob.glob(os.path.join(pdf_dir,'*.pdf')):
shutil.copy(file, html_pdfs)
citation_key = config['proceedings']['citation_key'] # e.g. proc-scipy-2010
bib_from_tmpl('proceedings', config, citation_key)
proc_dict = dict(config.items() +
{'pdf': 'pdfs/proceedings.pdf'}.items() +
{'bibtex': 'bib/' + citation_key}.items())
for dest_fn in ['index', 'organization']:
html_from_tmpl(dest_fn+'.html', proc_dict, dest_fn)
for article in config['toc']:
art_dict = dict(config.items() +
{'article': article}.items() +
{'pdf': 'pdfs/'+article['paper_id']+'.pdf'}.items() +
{'bibtex': 'bib/'+article['paper_id']+'.bib'}.items())
bib_from_tmpl('article', art_dict, article['paper_id'])
html_from_tmpl('article.html',art_dict, article['paper_id'])
|
the-stack_0_26265
|
"""
This file contains code based on
https://github.com/automl/nasbench301/
Authors: Julien Siems, Lucas Zimmer, Arber Zela, Jovita Lukasik, Margret Keuper, Frank Hutter
"""
import glob
import itertools
import json
import os
import re
from math import isclose
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
import seaborn as sns
from ConfigSpace.read_and_write import json as config_space_json_r_w
from scipy.stats import norm, spearmanr, kendalltau
from sklearn.metrics import mean_squared_error, r2_score
from tqdm import tqdm
from pathlib import Path
from nas_bench_x11.models.lgboost import LGBModel, LGBModelTime
from nas_bench_x11.models.xgboost import XGBModel, XGBModelTime
from nas_bench_x11.models.svd_lgb import SVDLGBModel
from nas_bench_x11.models.svd_xgb import SVDXGBModel
from nas_bench_x11.models.svd_nn import SVDNNModel
from nas_bench_x11.models.vae_nn import VAENNModel
from nas_bench_x11.models.vae_lgb import VAELGBModel
from nas_bench_x11.models.vae_xgb import VAEXGBModel
sns.set_style('whitegrid')
model_dict = {
# NOTE: RUNTIME MODELS SHOULD END WITH "_time"
'xgb': XGBModel,
'svd_lgb': SVDLGBModel,
'svd_xgb': SVDXGBModel,
'svd_nn': SVDNNModel,
'vae_lgb': VAELGBModel,
'vae_xgb': VAEXGBModel,
'vae_nn': VAENNModel,
'xgb_time': XGBModelTime,
'lgb': LGBModel,
'lgb_time': LGBModelTime,
}
def get_project_root() -> Path:
"""
Returns the root path of the project.
"""
return Path(__file__).parent.parent
def evaluate_learning_curve_metrics(y_true, y_pred, prediction_is_first_arg, reduction='mean'):
"""
Create a dict with all evaluation metrics
"""
y_pred = np.array(y_pred)
y_true = np.array(y_true)
reduce_fn = {'mean': np.mean}[reduction]
if prediction_is_first_arg:
y_true, y_pred = y_pred, y_true
def kendall_tau_lc(y_true, y_pred, decimals=None):
corrs, p_vals = [], []
for yt, yp in zip(y_true.T, y_pred.T):
if decimals is None:
corr, p_val = kendalltau(yt, yp)
else:
corr, p_val = kendalltau(yt, np.round(np.array(yp), decimals=decimals))
corrs.append(corr)
p_vals.append(p_val)
return corrs
metrics_dict = dict()
metrics_dict["mse"] = reduce_fn(mean_squared_error(y_true, y_pred, multioutput='raw_values'))
metrics_dict["rmse"] = reduce_fn(mean_squared_error(y_true, y_pred, multioutput='raw_values', squared=False))
metrics_dict["r2"] = reduce_fn(r2_score(y_true, y_pred, multioutput='raw_values'))
metrics_dict["kendall_tau"] = reduce_fn(kendall_tau_lc(y_true, y_pred))
metrics_dict["kendall_tau_2_dec"] = reduce_fn(kendall_tau_lc(y_true, y_pred, decimals=2))
metrics_dict["kendall_tau_1_dec"] = reduce_fn(kendall_tau_lc(y_true, y_pred, decimals=1))
metrics_dict["spearmanr"] = reduce_fn([spearmanr(yt, yp).correlation for yt, yp in zip(y_true.T, y_pred.T)])
# Last epoch metrics
metrics_dict["last_epoch_mse"] = mean_squared_error(y_true[:, -1], y_pred[:, -1])
metrics_dict["last_epoch_rmse"] = np.sqrt(metrics_dict["last_epoch_mse"])
metrics_dict["last_epoch_r2"] = r2_score(y_true[:, -1], y_pred[:, -1])
metrics_dict["last_epoch_kendall_tau"], p_val = kendalltau(y_true[:, -1], y_pred[:, -1])
metrics_dict["last_epoch_kendall_tau_2_dec"], p_val = kendalltau(y_true[:, -1], np.round(np.array(y_pred[:, -1]), decimals=2))
metrics_dict["last_epoch_kendall_tau_1_dec"], p_val = kendalltau(y_true[:, -1], np.round(np.array(y_pred[:, -1]), decimals=1))
metrics_dict["last_epoch_spearmanr"] = spearmanr(y_true[:, -1], y_pred[:, -1]).correlation
return metrics_dict
def evaluate_metrics(y_true, y_pred, prediction_is_first_arg):
"""
Create a dict with all evaluation metrics
"""
if prediction_is_first_arg:
y_true, y_pred = y_pred, y_true
metrics_dict = dict()
metrics_dict["mse"] = mean_squared_error(y_true, y_pred)
metrics_dict["rmse"] = np.sqrt(metrics_dict["mse"])
metrics_dict["r2"] = r2_score(y_true, y_pred)
metrics_dict["kendall_tau"], p_val = kendalltau(y_true, y_pred)
metrics_dict["kendall_tau_2_dec"], p_val = kendalltau(y_true, np.round(np.array(y_pred), decimals=2))
metrics_dict["kendall_tau_1_dec"], p_val = kendalltau(y_true, np.round(np.array(y_pred), decimals=1))
metrics_dict["spearmanr"] = spearmanr(y_true, y_pred).correlation
return metrics_dict
def get_model_configspace(model):
"""
Retrieve the model_config
:param model: Name of the model for which you want the default config
:return:
"""
# Find matching config for the model name
model_config_regex = re.compile(".*{}_configspace.json".format(model))
root = get_project_root()
matched_model_config_paths = list(
filter(model_config_regex.match, glob.glob(str(root) + '/configs/model_configs/*')))
print(matched_model_config_paths)
# Make sure we only matched exactly one config
assert len(matched_model_config_paths) == 1, 'Multiple or no configs matched with the requested model.'
model_config_path = matched_model_config_paths[0]
# Load the configspace object
model_configspace = config_space_json_r_w.read(open(model_config_path, 'r').read())
return model_configspace
def convert_array_to_list(a):
"""Converts a numpy array to list"""
if isinstance(a, np.ndarray):
return a.tolist()
else:
return a
class ConfigLoader:
def __init__(self, config_space_path):
self.config_space = self.load_config_space(config_space_path)
# The exponent to scale the fidelity with.
# Used to move architectures across the fidelity budgets
# Default at None, hence the fidelity values are not changed
self.fidelity_exponent = None
# The number of skip connections to have in the cell
# If this set to None (default) No skip connections will be added to the cell
# Maximum is the maximum number of operations.
self.parameter_free_op_increase_type = None
self.ratio_parameter_free_op_in_cell = None
# Manually adjust a certain set of hyperparameters
self.parameter_change_dict = None
# Save predefined fidelity multiplier
self.fidelity_multiplier = {
'SimpleLearningrateSchedulerSelector:cosine_annealing:T_max': 1.762734383267615,
'NetworkSelectorDatasetInfo:darts:init_channels': 1.3572088082974532,
'NetworkSelectorDatasetInfo:darts:layers': 1.2599210498948732
}
self.fidelity_starts = {
'SimpleLearningrateSchedulerSelector:cosine_annealing:T_max': 50,
'NetworkSelectorDatasetInfo:darts:init_channels': 8,
'NetworkSelectorDatasetInfo:darts:layers': 5
}
def __getitem__(self, path, return_full_lc=True):
"""
Load the results from results.json
:param path: Path to results.json
:return:
"""
json_file = json.load(open(path, 'r'))
config_dict = json_file['optimized_hyperparamater_config']
config_space_instance = self.query_config_dict(config_dict)
val_accuracy = json_file['info'][0]['val_accuracy']
test_accuracy = json_file['test_accuracy']
if return_full_lc:
full_lc = json_file['learning_curves']['Train/val_accuracy']
return config_space_instance, val_accuracy, test_accuracy, json_file, full_lc
else:
# this is the original nasbench301 return
return config_space_instance, val_accuracy, test_accuracy, json_file
def get_runtime(self, path):
"""
Load the runtime from results.json
:param path: Path to results.json
return:
"""
json_file = json.load(open(path, 'r'))
config_dict = json_file['optimized_hyperparamater_config']
config_space_instance = self.query_config_dict(config_dict)
runtime = json_file['runtime']
return config_space_instance, runtime
def query_config_dict(self, config_dict):
# Evaluation methods
# Scale the hyperparameters if needed
if self.fidelity_exponent is not None:
config_dict = self.scale_fidelity(config_dict)
# Add selected parameter free op
if self.ratio_parameter_free_op_in_cell is not None:
config_dict = self.add_selected_parameter_free_op(config_dict)
# Change a selection of parameters
if self.parameter_change_dict is not None:
config_dict = self.change_parameter(config_dict)
# Create the config space instance based on the config space
config_space_instance = \
self.convert_config_dict_to_configspace_instance(self.config_space, config_dict=config_dict)
return config_space_instance
def add_selected_parameter_free_op(self, config_dict):
"""
Add selected parameter free operation to the config dict
:param config_dict:
:return:
"""
assert self.parameter_free_op_increase_type in ['max_pool_3x3',
'avg_pool_3x3',
'skip_connect'], 'Unknown parameter-free op was selected.'
# Dictionary containing operations
cell_op_dict_sel_param_free = {'normal': {}, 'reduce': {}}
cell_op_dict_non_sel_param_free = {'normal': {}, 'reduce': {}}
for cell_type in ['normal']:
for edge in range(0, 14):
key = 'NetworkSelectorDatasetInfo:darts:edge_{}_{}'.format(cell_type, edge)
op = config_dict.get(key, None)
if op is not None:
if op == self.parameter_free_op_increase_type:
cell_op_dict_sel_param_free[cell_type][key] = op
else:
cell_op_dict_non_sel_param_free[cell_type][key] = op
# Select random subset of operations which to turn to selected parameter-free op
for cell_type in ['normal', 'reduce']:
num_sel_param_free_ops = len(cell_op_dict_sel_param_free[cell_type].values())
num_non_sel_param_free_ops = len(cell_op_dict_non_sel_param_free[cell_type].values())
num_ops = num_sel_param_free_ops + num_non_sel_param_free_ops
desired_num_sel_param_free_ops = np.round(num_ops * self.ratio_parameter_free_op_in_cell).astype(np.int)
remaining_num_sel_param_free_op = desired_num_sel_param_free_ops - num_sel_param_free_ops
if remaining_num_sel_param_free_op > 0:
# There are still more selected parameter free operations to add to satisfy the ratio of
# sel param free op. Therefore override some of the other operations to be parameter free op.
sel_param_free_idx = np.random.choice(num_non_sel_param_free_ops, remaining_num_sel_param_free_op,
replace=False)
for idx, (key, value) in enumerate(cell_op_dict_non_sel_param_free[cell_type].items()):
if idx in sel_param_free_idx:
config_dict[key] = self.parameter_free_op_increase_type
return config_dict
def scale_fidelity(self, config_dict):
"""
Scale the fidelity of the current sample
:param config_dict:
:return:
"""
for name, value in self.fidelity_multiplier.items():
config_dict[name] = int(config_dict[name] * value ** self.fidelity_exponent)
return config_dict
def change_parameter(self, config_dict):
for name, value in self.parameter_change_dict.items():
config_dict[name] = value
return config_dict
def convert_config_dict_to_configspace_instance(self, config_space, config_dict):
"""
Convert a config dictionary to configspace instace
:param config_space:
:param config_dict:
:return:
"""
def _replace_str_bool_with_python_bool(input_dict):
for key, value in input_dict.items():
if value == 'True':
input_dict[key] = True
elif value == 'False':
input_dict[key] = False
else:
pass
return input_dict
# Replace the str true with python boolean type
config_dict = _replace_str_bool_with_python_bool(config_dict)
config_instance = CS.Configuration(config_space, values=config_dict)
return config_instance
@staticmethod
def load_config_space(path):
"""
Load ConfigSpace object
As certain hyperparameters are not denoted as optimizable but overriden later,
they are manually overriden here too.
:param path:
:return:
"""
with open(os.path.join(path), 'r') as fh:
json_string = fh.read()
config_space = config_space_json_r_w.read(json_string)
# Override the constant hyperparameters for num_layers, init_channels and
config_space._hyperparameters.pop('NetworkSelectorDatasetInfo:darts:layers', None)
num_layers = CSH.UniformIntegerHyperparameter(name='NetworkSelectorDatasetInfo:darts:layers', lower=1,
upper=10000)
config_space._hyperparameters.pop('SimpleLearningrateSchedulerSelector:cosine_annealing:T_max', None)
t_max = CSH.UniformIntegerHyperparameter(name='SimpleLearningrateSchedulerSelector:cosine_annealing:T_max',
lower=1, upper=10000)
config_space._hyperparameters.pop('NetworkSelectorDatasetInfo:darts:init_channels', None)
init_channels = CSH.UniformIntegerHyperparameter(name='NetworkSelectorDatasetInfo:darts:init_channels', lower=1,
upper=10000)
config_space._hyperparameters.pop('SimpleLearningrateSchedulerSelector:cosine_annealing:eta_min', None)
eta_min_cosine = CSH.UniformFloatHyperparameter(
name='SimpleLearningrateSchedulerSelector:cosine_annealing:eta_min', lower=0, upper=10000)
config_space.add_hyperparameters([num_layers, t_max, init_channels, eta_min_cosine])
return config_space
def get_config_without_architecture(self, config_instance):
"""
Remove the architecture parameters from the config.
Currently this function retrieves the 5 parameters which are actually changed throughout the results:
num_epochs, num_layers, num_init_channels (3 fidelities) + learning_rate, weight_decay
:param config_instance:
:return:
"""
non_arch_hyperparameters_list = [
config_instance._values['SimpleLearningrateSchedulerSelector:cosine_annealing:T_max'],
config_instance._values['NetworkSelectorDatasetInfo:darts:init_channels'],
config_instance._values['NetworkSelectorDatasetInfo:darts:layers'],
config_instance._values['OptimizerSelector:sgd:learning_rate'],
config_instance._values['OptimizerSelector:sgd:weight_decay']]
return non_arch_hyperparameters_list
class ResultLoader:
def __init__(self, root, filepath_regex, train_val_test_split, seed):
self.root = root
self.filepath_regex = filepath_regex
self.train_val_test_split = train_val_test_split
np.random.seed(seed)
def return_train_val_test(self):
"""
Get the result train/val/test split.
:return:
"""
if self.train_val_test_split['type'] == 'all_result_paths':
paths_split = self.all_result_paths()
elif self.train_val_test_split['type'] == 'filtered_result_paths':
paths_split = self.filtered_result_paths()
elif self.train_val_test_split['type'] == 'per_budget_equal_result_paths':
paths_split = self.per_budget_equal_result_paths()
elif self.train_val_test_split['type'] == 'per_subfolder_equal_ratio':
paths_split = self.per_subfolder_equal_ratio()
elif self.train_val_test_split['type'] == 'no_data':
paths_split = [], [], []
else:
raise ValueError('Unknown train/val/test split.')
train_paths, val_paths, test_paths = paths_split
return train_paths, val_paths, test_paths
def filter_duplicate_dirs(self, paths_to_json):
"""
Checks to configurations in the results.json files and returns paths such that none contains
duplicate configurations.
:param paths_to_json: List of dir/results.json
:return: unique list of dir/results.json w.r.t. configuration
"""
config_hashes = []
for path_to_json in paths_to_json:
with open(path_to_json, "r") as f:
results = json.load(f)
config_hash = hash(results["optimized_hyperparamater_config"].__repr__())
config_hashes.append(config_hash)
_, unique_indices = np.unique(config_hashes, return_index=True)
return list(np.array(paths_to_json)[unique_indices])
def get_splits(self, paths, ratios=None):
"""
Divide the paths into train/val/test splits.
:param paths:
:param ratios:
:return:
"""
if ratios is None:
train_ratio, val_ratio, test_ratio = self.train_val_test_split['train'], self.train_val_test_split['val'], \
self.train_val_test_split['test']
else:
train_ratio, val_ratio, test_ratio = ratios
assert isclose(train_ratio + val_ratio + test_ratio, 1.0,
abs_tol=1e-8), 'The train/val/test split should add up to 1.'
# Randomly shuffle the list
rng = np.random.RandomState(6)
rng.shuffle(paths)
# Extract the train/val/test splits
train_upper_idx = int(train_ratio * len(paths))
val_upper_idx = int((train_ratio + val_ratio) * len(paths))
train_paths = paths[:train_upper_idx]
val_paths = paths[train_upper_idx:val_upper_idx]
test_paths = paths[val_upper_idx:-1]
return train_paths, val_paths, test_paths
def all_result_paths(self, verbose=False):
"""
Return the paths of all results
:return: result paths
"""
all_results_paths = glob.glob(os.path.join(self.root, self.filepath_regex))
if verbose:
print("==> Found %i results paths. Filtering duplicates..." % len(all_results_paths))
all_results_paths.sort()
all_results_paths_filtered = self.filter_duplicate_dirs(all_results_paths)
if verbose:
print("==> Finished filtering. Found %i unique architectures, %i duplicates" % (len(all_results_paths_filtered),
len(all_results_paths) - len(
all_results_paths_filtered)))
train_paths, val_paths, test_paths = self.get_splits(all_results_paths_filtered)
return train_paths, val_paths, test_paths
def per_subfolder_equal_ratio(self):
"""
:return:
"""
train_paths, val_paths, test_paths = [], [], []
for subdir in os.listdir(os.path.join(self.root, self.filepath_regex)):
subdir_path = os.path.join(self.root, self.filepath_regex, subdir)
# For each subdir split according to the train_val_test_ratios
files_in_subdir = glob.glob(os.path.join(subdir_path, '*'))
files_in_subdir.sort()
train, val, test = self.get_splits(files_in_subdir)
# Add the train paths
train_paths.extend(train)
val_paths.extend(val)
test_paths.extend(test)
return train_paths, val_paths, test_paths
def filtered_result_paths(self):
"""
Return only the paths of the results that match the filter
:return: result paths
"""
# Check result filters have been specified
assert self.train_val_test_split.get('filters', None) is not None, 'Can\'t filter without a result filter.'
# Train/val and test split should not be the same filter
assert self.train_val_test_split['filters']['train_val_filter'] != self.train_val_test_split['filters'][
'test_filter'], 'Train/Val filter should not be the same as the test filter.'
all_results_paths = glob.glob(os.path.join(self.root, 'run_*/results_fidelity_*/results_*.json'))
all_results_paths.sort()
results_per_filter = {result_filter: [] for result_filter in self.train_val_test_split.get('filters').keys()}
for result_path in tqdm(all_results_paths, desc='Filtering results'):
result_json = json.load(open(result_path, 'r'))
# Go through all elements to be filtered
for result_filter_name, result_filter_path in self.train_val_test_split.get('filters').items():
result_filter = json.load(open(result_filter_path, 'r'))
results = []
for filter_key, filter_details in result_filter.items():
# Retrieve the element to be checked
filtered_value = list(find_key_value(filter_key, result_json))
if len(filtered_value):
if filter_details['type'] == "interval":
# Check if the configuration matches the filter interval
lower_filter_val, high_filter_val = filter_details['data']
if lower_filter_val <= filtered_value[0] <= high_filter_val:
results.append(result_path)
else:
continue
elif filter_details['type'] == "list":
# Check whether the value is in a list of pre-specified values
if filtered_value[0] in filter_details['data']:
results.append(result_path)
else:
continue
else:
pass
if len(results) == len(result_filter.keys()):
results_per_filter[result_filter_name].append(results[0])
# Split the train/val split
new_train_ratio = self.train_val_test_split['train'] / (
self.train_val_test_split['train'] + self.train_val_test_split['val'])
new_val_ratio = self.train_val_test_split['val'] / (
self.train_val_test_split['train'] + self.train_val_test_split['val'])
train_paths, val_paths, _ = self.get_splits(results_per_filter['train_val_filter'],
(new_train_ratio, new_val_ratio, 0.0))
test_paths = results_per_filter['test_filter']
assert len(set(results_per_filter['train_val_filter']).intersection(
set(test_paths))) == 0, 'Train/val and test set are not disjoint.'
return train_paths, val_paths, test_paths
def per_budget_equal_result_paths(self):
"""
Here train/val/test split is performed such that *per fidelity* the ratio of train/val/test is consistent.
:return: result_paths
"""
train_paths_dict, val_paths_dict, test_paths_dict = self.per_budget_data()
def flat_list_from_list_of_lists(list_of_list): return list(itertools.chain.from_iterable(list_of_list))
train_paths, val_paths, test_paths = [flat_list_from_list_of_lists(dict.values()) for dict in
[train_paths_dict, val_paths_dict, test_paths_dict]]
rng = np.random.RandomState(6)
rng.shuffle(train_paths)
rng.shuffle(val_paths)
val_paths(test_paths)
return train_paths, val_paths, test_paths
def per_budget_data(self):
"""
Extract the train/val/test split for each budget
:return: Dictionaries containing the data for each fidelity
"""
train_paths_dict, val_paths_dict, test_paths_dict = {}, {}, {}
for fidelity_num in range(7):
results_in_fidelity = glob.glob(
os.path.join(self.root, 'run_*/results_fidelity_{}/results_*.json').format(fidelity_num))
results_in_fidelity.sort()
# Split the fidelity based on the train/val/test portions
train_paths_in_fidelity, val_paths_in_fidelity, test_paths_in_fidelity = self.get_splits(
results_in_fidelity)
train_paths_dict[fidelity_num] = train_paths_in_fidelity
val_paths_dict[fidelity_num] = val_paths_in_fidelity
test_paths_dict[fidelity_num] = test_paths_in_fidelity
return train_paths_dict, val_paths_dict, test_paths_dict
def find_key_value(key, dictionary):
"""
Check if key is contained in dictionary in a nested way
Source: https://gist.github.com/douglasmiranda/5127251#file-gistfile1-py-L2
:param key:
:param dictionary:
:return:
"""
for k, v in dictionary.items():
if k == key:
yield v
elif isinstance(v, dict):
for result in find_key_value(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in find_key_value(key, d):
yield result
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
|
the-stack_0_26267
|
import os
def parse_mount(volume_spec):
# Docker volumes may be "/src:dest:ro" or simply "/src"
components = volume_spec.split(':')
perm = 'w' # assume write perm if not specified
src_path = components[0]
# check if ro specified
if components[-1] == 'ro':
perm = 'r'
return (src_path, perm)
|
the-stack_0_26269
|
from flask import Flask, redirect, url_for, render_template, request, make_response, session, jsonify, abort
from flask_dance.contrib.twitter import make_twitter_blueprint, twitter
from bson.objectid import ObjectId
from urllib.parse import quote
import ssl
import pymongo
import datetime
import time
import copy
import os
app = Flask(__name__)
app.secret_key = "supersekrit"
f = open('tw_request/.env', 'r')
keyDict = dict()
for s in f.readlines():
k, v = s.split('=', 1)
k = k.strip()
v = v.strip()
keyDict[k] = v
api_key = keyDict['TWITTER_API_KEY']
api_secret = keyDict['TWITTER_API_SECRET']
blueprint = make_twitter_blueprint(api_key=api_key, api_secret=api_secret)
app.register_blueprint(blueprint, url_prefix="/login")
client = pymongo.MongoClient(keyDict['MONGODB_URL'],
connect=False,
ssl_cert_reqs=ssl.CERT_NONE)
db = client.main
cache = client.cache
f.close()
def authorization_required(fun):
def decorated(*args):
if not twitter.authorized:
abort(401)
return fun(*args)
return decorated
def getinfo_id(id) -> dict:
ret = cache['twd'].find_one({"id_str": str(id)})
isCreated = bool(ret)
if isCreated:
_id = ret['_id']
if not isCreated or time.time() - ret['_timestamp'] > 86400:
ret = twitter.get(
f"users/show.json?user_id={id}").json()
ret['profile_image_url_https'] = ret['profile_image_url_https'].replace(
'_normal', '')
ret['_timestamp'] = time.time()
if isCreated:
if '_id' in ret:
ret.pop('_id')
cache['twd'].update({'_id': _id}, {'$set': ret})
else:
cache['twd'].insert_one(ret)
return ret
def getinfo_name(name) -> dict:
ret = cache['twd'].find_one({"screen_name": name})
isCreated = bool(ret)
if isCreated:
_id = ret['_id']
if not isCreated or time.time() - ret['_timestamp'] > 86400:
ret = twitter.get(
f"users/show.json?screen_name={name}").json()
ret['profile_image_url_https'] = ret['profile_image_url_https'].replace(
'_normal', '')
ret['_timestamp'] = time.time()
if isCreated:
if '_id' in ret:
ret.pop('_id')
cache['twd'].update({'_id': _id}, {'$set': ret})
else:
cache['twd'].insert_one(ret)
return ret
@authorization_required
def getinfo() -> dict:
try:
userinfo = dict()
if 'userid' in session:
userid = session['userid']
userinfo = getinfo_id(userid)
else:
account = twitter.get("account/verify_credentials.json")
assert account.ok
session['userid'] = account.json()['id']
userinfo = getinfo_id(account.json()['id'])
assert userinfo
return userinfo
except AssertionError:
abort(500)
@app.route("/")
def intro():
if twitter.authorized:
userinfo = getinfo()
return redirect(f"/user/{userinfo['screen_name']}")
else:
return render_template('index.html')
@app.route("/login")
def login():
return redirect(url_for("twitter.login"))
@authorization_required
@app.route("/logout")
def logout():
invaildate_url = f"https://api.twitter.com/oauth/invalidate_token?access_token={api_key}&access_token_secret={api_secret}"
twitter.post(invaildate_url)
del app.blueprints["twitter"].token
session.clear()
return redirect(url_for("intro"))
@authorization_required
@app.route("/user/<username>")
def reveal_user(username):
try:
viewUser = getinfo()
showUser = getinfo_name(username)
if viewUser['id'] == showUser['id']:
return redirect("/me")
return render_template("user.html",
show_scname=showUser['screen_name'],
show_name=showUser['name'],
description=showUser['description'],
photoURL=showUser['profile_image_url_https'],
view_scname=viewUser['screen_name'],
view_name=viewUser['name'])
except: # If user not exists
abort(404)
@authorization_required
@app.route("/user_ajax/<username>", methods=["GET"])
def user_ajax(username):
showUser = getinfo_name(username)
pendingRequests = list(db[showUser['id_str']].find({"status": "Pending"}))
pendingRequests.sort(key=lambda x: x["timestamp"], reverse=True)
for req in pendingRequests:
authorUser = getinfo_id(req['author_id'])
req['author_scname'] = authorUser['screen_name']
req['author_name'] = authorUser['name']
completeRequests = list(
db[showUser['id_str']].find({"status": "Complete"}))
completeRequests.sort(key=lambda x: x["timestamp"], reverse=True)
for req in completeRequests:
authorUser = getinfo_id(req['author_id'])
req['author_scname'] = authorUser['screen_name']
req['author_name'] = authorUser['name']
return jsonify({"html": render_template("user_ajax.html",
pendingRequests=pendingRequests,
completeRequests=completeRequests)})
@authorization_required
@app.route("/user_ajax/<username>", methods=["POST"])
def user_ajax_post(username):
viewUser = getinfo()
showUser = getinfo_name(username)
message = str(request.form.get('message'))
isSecret = bool(request.form.get('isSecret'))
isAnonymous = bool(request.form.get('isAnonymous'))
db[showUser['id_str']].insert_one({"author_id": viewUser["id_str"],
"message": message,
"isSecret": isSecret,
"isAnonymous": isAnonymous,
"timestamp": str(datetime.datetime.now()),
"status": "Pending"})
return user_ajax(username)
@authorization_required
@app.route("/me")
def me():
viewUser = getinfo()
return render_template("me.html",
scname=viewUser['screen_name'],
name=viewUser['name'],
photoURL=viewUser['profile_image_url_https'],
description=viewUser['description'])
@authorization_required
@app.route("/me_ajax", methods=["GET"])
def me_ajax():
viewUser = getinfo()
pendingRequests = list(db[viewUser['id_str']].find({"status": "Pending"}))
pendingRequests.sort(key=lambda x: x["timestamp"], reverse=True)
for req in pendingRequests:
authorUser = getinfo_id(req['author_id'])
req['author_scname'] = authorUser['screen_name']
req['author_name'] = authorUser['name']
pendingRequests_disp = []
for req in pendingRequests:
rq = copy.deepcopy(req)
if len(req["message"]) > 30:
rq["message"] = rq["message"][:27]+'...'
pendingRequests_disp.append(rq)
completeRequests = list(
db[viewUser['id_str']].find({"status": "Complete"}))
completeRequests.sort(key=lambda x: x["timestamp"], reverse=True)
for req in completeRequests:
authorUser = getinfo_id(req['author_id'])
req['author_scname'] = authorUser['screen_name']
req['author_name'] = authorUser['name']
return jsonify({"html": render_template("me_ajax.html",
pendingRequests=pendingRequests,
pendingRequests_disp=pendingRequests_disp,
completeRequests=completeRequests)})
@authorization_required
@app.route("/me_ajax", methods=["POST"])
def me_ajax_post():
viewUser = getinfo()
targetId = str(request.form.get('requests'))
if targetId == 'None':
return me_ajax()
else: # If target exists
target_req = db[viewUser['id_str']].find_one(
{"_id": ObjectId(targetId)})
# isSecret = target_req["isSecret"]
isAnonymous = target_req["isAnonymous"]
author_id = target_req["author_id"]
timestamp = target_req["timestamp"]
action = str(request.form.get('request_action'))
isntSharing = bool(request.form.get('isSharing'))
if action == "accept":
db[viewUser['id_str']].update(
{"_id": ObjectId(targetId)},
{"$set":
{
'status': 'Complete'
}
}
)
else: # action == "discard"
db[viewUser['id_str']].update(
{"_id": ObjectId(targetId)},
{"$unset": target_req}
)
post_text = timestamp.split('.')[0] + "에 신청된 " + \
("누군가" if isAnonymous else (f"@{getinfo_id(author_id)['screen_name']} ")) + \
"의 리퀘스트가 "+("완료되었어요!!" if action == "accept" else "삭제되었어요...")
if not isntSharing: # If user wants to share
twitter.post(
"statuses/update.json?status={text}".format(text=quote(post_text, safe='')))
return me_ajax()
@app.route("/elements")
def elements():
return render_template('elements.html')
@app.errorhandler(404)
def error404(e):
return render_template("404.html"), 404
|
the-stack_0_26271
|
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
import heapq;
class SummaryRanges(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self._data=[]
def addNum(self, val):
"""
:type val: int
:rtype: void
"""
if not self._data:
self._data.append(Interval(val,val))
return
self._data.sort(cmp=lambda x,y:x.start-y.start)
# search lower bound
nlen=len(self._data)
left,right=0,nlen
while left<right:
mid=(left+right)/2
if self._data[mid].start>=val:
right=mid
else:
left=mid+1
# if lower is 0 or nlen, sidx==eidx
# otherwise pointer two consective pos
sidx=0 if left==0 else left-1
eidx=nlen-1 if left==nlen else left
st=self._data[sidx]
et=self._data[eidx]
# 1.val already in data
if st.start<=val and st.end>=val:
return
if et.start<=val and et.end>=val:
return
# 2.merge the interval,(1,1)(3,3), val==2
if st.end+1==val and et.start-1==val:
st.end=et.end
self._data.pop(eidx)
return
# 3. extend to start
if st.end+1==val:
st.end=val
return
# 4. extend to b
if et.start-1==val:
et.start=val
return
# 5.insert to the pos
self._data.insert(left,Interval(val,val))
def getIntervals(self):
"""
:rtype: List[Interval]
"""
return self._data
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
|
the-stack_0_26272
|
# -*- coding: utf-8 -*-
'''
Execute a command and read the output as JSON. The JSON data is then directly overlaid onto the minion's Pillar data.
'''
from __future__ import absolute_import
# Don't "fix" the above docstring to put it on two lines, as the sphinx
# autosummary pulls only the first line for its description.
# Import python libs
import logging
import json
# Set up logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
command):
'''
Execute a command and read the output as JSON
'''
try:
command = command.replace('%s', minion_id)
return json.loads(__salt__['cmd.run'](command))
except Exception:
log.critical(
'JSON data from {0} failed to parse'.format(command)
)
return {}
|
the-stack_0_26274
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
class SyntheticsTestMonitorStatus(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("value",): {
"UNTRIGGERED": 0,
"TRIGGERED": 1,
"NO_DATA": 2,
},
}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"value": (int,),
}
discriminator = None
attribute_map = {}
_composed_schemas = None
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""SyntheticsTestMonitorStatus - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (int): The status of your Synthetic monitor. * `O` for not triggered * `1` for triggered * `2` for no data., must be one of [0, 1, 2, ] # noqa: E501
Keyword Args:
value (int): The status of your Synthetic monitor. * `O` for not triggered * `1` for triggered * `2` for no data., must be one of [0, 1, 2, ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
super().__init__(kwargs)
if "value" in kwargs:
value = kwargs.pop("value")
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=self._path_to_item,
valid_classes=(self.__class__,),
)
self._check_pos_args(args)
self.value = value
self._check_kw_args(kwargs)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
return cls(*args, **kwargs)
|
the-stack_0_26276
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List, Optional
import numpy as np
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.mx.model.estimator import GluonEstimator
from gluonts.model.forecast import Quantile
from gluonts.model.forecast_generator import QuantileForecastGenerator
from gluonts.model.predictor import Predictor
from gluonts.mx.block.decoder import Seq2SeqDecoder
from gluonts.mx.block.enc2dec import FutureFeatIntegratorEnc2Dec
from gluonts.mx.block.encoder import Seq2SeqEncoder
from gluonts.mx.block.quantile_output import QuantileOutput
from gluonts.mx.distribution import DistributionOutput
from gluonts.mx.model.forecast_generator import DistributionForecastGenerator
from gluonts.mx.model.predictor import RepresentableBlockPredictor
from gluonts.mx.trainer import Trainer
from gluonts.mx.util import copy_parameters
from gluonts.time_feature import time_features_from_frequency_str
from gluonts.transform import (
AddAgeFeature,
AddConstFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
Chain,
RemoveFields,
RenameFields,
SetField,
TestSplitSampler,
Transformation,
VstackFeatures,
)
from ._forking_network import (
ForkingSeq2SeqDistributionPredictionNetwork,
ForkingSeq2SeqNetworkBase,
ForkingSeq2SeqPredictionNetwork,
ForkingSeq2SeqTrainingNetwork,
)
from ._transform import ForkingSequenceSplitter
class ForkingSeq2SeqEstimator(GluonEstimator):
r"""
Sequence-to-Sequence (seq2seq) structure with the so-called
"Forking Sequence" proposed in [WTN+17]_.
The basic idea is that, given a sequence :math:`x_1, x_2, \cdots, x_T`,
with a decoding length :math:`\tau`, we learn a NN that solves the
following series of seq2seq problems:
.. math::
:nowrap:
\begin{eqnarray}
x_1 & \mapsto & x_{2:2+\tau}\\
x_1, x_2 & \mapsto & x_{3:3+\tau}\\
x_1, x_2, x_3 & \mapsto & x_{4:4+\tau}\\
& \ldots & \\
x_1, \ldots, x_{T-\tau} & \mapsto & x_{T-\tau+1:T}
\end{eqnarray}
Essentially, this means instead of having one cut in the standard seq2seq,
one has multiple cuts that progress linearly.
Parameters
----------
encoder
seq2seq encoder
decoder
seq2seq decoder
quantile_output
quantile output
distr_output
distribution output
freq
frequency of the time series.
prediction_length
length of the decoding sequence.
context_length
length of the encoding sequence. (default: 4 * prediction_length)
use_past_feat_dynamic_real
Whether to use the ``past_feat_dynamic_real`` field from the data. (default: False)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data. (default: False)
use_feat_static_cat:
Whether to use the ``feat_static_cat`` field from the data. (default: False)
cardinality: List[int] = None,
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True``. (default: None)
embedding_dimension: List[int] = None,
Dimension of the embeddings for categorical features.
(default: [min(50, (cat+1)//2) for cat in cardinality])
add_time_feature
Adds a set of time features. (default: True)
add_age_feature
Adds an age feature. (default: False)
The age feature starts with a small value at the start of the time series and grows over time.
enable_encoder_dynamic_feature
Whether the encoder should also be provided with the dynamic features (``age``, ``time``
and ``feat_dynamic_real`` if enabled respectively). (default: True)
enable_decoder_dynamic_feature
Whether the decoder should also be provided with the dynamic features (``age``, ``time``
and ``feat_dynamic_real`` if enabled respectively). (default: True)
It makes sense to disable this, if you don't have ``feat_dynamic_real`` for the prediction range.
trainer
trainer (default: Trainer())
scaling
Whether to automatically scale the target values. (default: False if quantile_output is used, True otherwise)
scaling_decoder_dynamic_feature
Whether to automatically scale the dynamic features for the decoder. (default: False)
dtype
(default: np.float32)
num_forking
Decides how much forking to do in the decoder. 1 reduces to seq2seq and enc_len reduces to MQ-C(R)NN.
max_ts_len
Returns the length of the longest time series in the dataset to be used in bounding context_length.
"""
@validated()
def __init__(
self,
encoder: Seq2SeqEncoder,
decoder: Seq2SeqDecoder,
freq: str,
prediction_length: int,
quantile_output: Optional[QuantileOutput] = None,
distr_output: Optional[DistributionOutput] = None,
context_length: Optional[int] = None,
use_past_feat_dynamic_real: bool = False,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
cardinality: List[int] = None,
embedding_dimension: List[int] = None,
add_time_feature: bool = True,
add_age_feature: bool = False,
enable_encoder_dynamic_feature: bool = True,
enable_decoder_dynamic_feature: bool = True,
trainer: Trainer = Trainer(),
scaling: Optional[bool] = None,
scaling_decoder_dynamic_feature: bool = False,
dtype: DType = np.float32,
num_forking: Optional[int] = None,
max_ts_len: Optional[int] = None,
) -> None:
super().__init__(trainer=trainer)
assert (distr_output is None) != (quantile_output is None)
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
use_feat_static_cat or not cardinality
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
c > 0 for c in cardinality
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
e > 0 for e in embedding_dimension
), "Elements of `embedding_dimension` should be > 0"
self.encoder = encoder
self.decoder = decoder
self.freq = freq
self.prediction_length = prediction_length
self.quantile_output = quantile_output
self.distr_output = distr_output
self.context_length = (
context_length
if context_length is not None
else 4 * self.prediction_length
)
if max_ts_len is not None:
max_pad_len = max(max_ts_len - self.prediction_length, 0)
# Don't allow context_length to be longer than the max pad length
self.context_length = (
min(max_pad_len, self.context_length)
if max_pad_len > 0
else self.context_length
)
self.num_forking = (
min(num_forking, self.context_length)
if num_forking is not None
else self.context_length
)
self.use_past_feat_dynamic_real = use_past_feat_dynamic_real
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.add_time_feature = add_time_feature
self.add_age_feature = add_age_feature
self.use_dynamic_feat = (
use_feat_dynamic_real or add_age_feature or add_time_feature
)
self.enable_encoder_dynamic_feature = enable_encoder_dynamic_feature
self.enable_decoder_dynamic_feature = enable_decoder_dynamic_feature
self.scaling = (
scaling if scaling is not None else (quantile_output is None)
)
self.scaling_decoder_dynamic_feature = scaling_decoder_dynamic_feature
self.dtype = dtype
def create_transformation(self) -> Transformation:
chain = []
dynamic_feat_fields = []
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
# --- GENERAL TRANSFORMATION CHAIN ---
# determine unused input
if not self.use_past_feat_dynamic_real:
remove_field_names.append(FieldName.PAST_FEAT_DYNAMIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
if not self.use_feat_static_cat:
remove_field_names.append(FieldName.FEAT_STATIC_CAT)
chain.extend(
[
RemoveFields(field_names=remove_field_names),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
]
)
# --- TRANSFORMATION CHAIN FOR DYNAMIC FEATURES ---
if self.add_time_feature:
chain.append(
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=time_features_from_frequency_str(self.freq),
pred_length=self.prediction_length,
dtype=self.dtype,
)
)
dynamic_feat_fields.append(FieldName.FEAT_TIME)
if self.add_age_feature:
chain.append(
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
dtype=self.dtype,
)
)
dynamic_feat_fields.append(FieldName.FEAT_AGE)
if self.use_feat_dynamic_real:
# Backwards compatibility:
chain.append(
RenameFields({"dynamic_feat": FieldName.FEAT_DYNAMIC_REAL})
)
dynamic_feat_fields.append(FieldName.FEAT_DYNAMIC_REAL)
# we need to make sure that there is always some dynamic input
# we will however disregard it in the hybrid forward.
# the time feature is empty for yearly freq so also adding a dummy feature
# in the case that the time feature is the only one on
if len(dynamic_feat_fields) == 0 or (
not self.add_age_feature
and not self.use_feat_dynamic_real
and self.freq == "Y"
):
chain.append(
AddConstFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_CONST,
pred_length=self.prediction_length,
const=0.0, # For consistency in case with no dynamic features
dtype=self.dtype,
)
)
dynamic_feat_fields.append(FieldName.FEAT_CONST)
# now we map all the dynamic input of length context_length + prediction_length onto FieldName.FEAT_DYNAMIC
# we exclude past_feat_dynamic_real since its length is only context_length
if len(dynamic_feat_fields) > 1:
chain.append(
VstackFeatures(
output_field=FieldName.FEAT_DYNAMIC,
input_fields=dynamic_feat_fields,
)
)
elif len(dynamic_feat_fields) == 1:
chain.append(
RenameFields({dynamic_feat_fields[0]: FieldName.FEAT_DYNAMIC})
)
# --- TRANSFORMATION CHAIN FOR STATIC FEATURES ---
if not self.use_feat_static_cat:
chain.append(
SetField(
output_field=FieldName.FEAT_STATIC_CAT,
value=np.array([0], dtype=np.int32),
)
)
# --- SAMPLE AND CUT THE TIME-SERIES ---
chain.append(
# because of how the forking decoder works, every time step
# in context is used for splitting, which is why we use the TestSplitSampler
ForkingSequenceSplitter(
train_sampler=TestSplitSampler(),
enc_len=self.context_length,
dec_len=self.prediction_length,
num_forking=self.num_forking,
encoder_series_fields=[
FieldName.OBSERVED_VALUES,
# RTS with past and future values which is never empty because added dummy constant variable
FieldName.FEAT_DYNAMIC,
]
+ (
# RTS with only past values are only used by the encoder
[FieldName.PAST_FEAT_DYNAMIC_REAL]
if self.use_past_feat_dynamic_real
else []
),
encoder_disabled_fields=(
[FieldName.FEAT_DYNAMIC]
if not self.enable_encoder_dynamic_feature
else []
)
+ (
[FieldName.PAST_FEAT_DYNAMIC_REAL]
if not self.enable_encoder_dynamic_feature
and self.use_past_feat_dynamic_real
else []
),
decoder_series_fields=[
FieldName.OBSERVED_VALUES,
# Decoder will use all fields under FEAT_DYNAMIC which are the RTS with past and future values
FieldName.FEAT_DYNAMIC,
],
decoder_disabled_fields=(
[FieldName.FEAT_DYNAMIC]
if not self.enable_decoder_dynamic_feature
else []
),
prediction_time_decoder_exclude=[FieldName.OBSERVED_VALUES],
)
)
# past_feat_dynamic features generated above in ForkingSequenceSplitter from those under feat_dynamic - we need
# to stack with the other short related time series from the system labeled as past_past_feat_dynamic_real.
# The system labels them as past_feat_dynamic_real and the additional past_ is added to the string
# in the ForkingSequenceSplitter
if self.use_past_feat_dynamic_real:
# Stack features from ForkingSequenceSplitter horizontally since they were transposed
# so shape is now (enc_len, num_past_feature_dynamic)
chain.append(
VstackFeatures(
output_field=FieldName.PAST_FEAT_DYNAMIC,
input_fields=[
"past_" + FieldName.PAST_FEAT_DYNAMIC_REAL,
FieldName.PAST_FEAT_DYNAMIC,
],
h_stack=True,
)
)
return Chain(chain)
def create_training_network(self) -> ForkingSeq2SeqNetworkBase:
return ForkingSeq2SeqTrainingNetwork(
encoder=self.encoder,
enc2dec=FutureFeatIntegratorEnc2Dec(),
decoder=self.decoder,
quantile_output=self.quantile_output,
distr_output=self.distr_output,
context_length=self.context_length,
num_forking=self.num_forking,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
scaling=self.scaling,
scaling_decoder_dynamic_feature=self.scaling_decoder_dynamic_feature,
dtype=self.dtype,
)
def create_predictor(
self,
transformation: Transformation,
trained_network: ForkingSeq2SeqNetworkBase,
) -> Predictor:
quantile_strs = (
[
Quantile.from_float(quantile).name
for quantile in self.quantile_output.quantiles
]
if self.quantile_output is not None
else None
)
prediction_network_class = (
ForkingSeq2SeqPredictionNetwork
if self.quantile_output is not None
else ForkingSeq2SeqDistributionPredictionNetwork
)
prediction_network = prediction_network_class(
encoder=trained_network.encoder,
enc2dec=trained_network.enc2dec,
decoder=trained_network.decoder,
quantile_output=trained_network.quantile_output,
distr_output=trained_network.distr_output,
context_length=self.context_length,
num_forking=self.num_forking,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
scaling=self.scaling,
scaling_decoder_dynamic_feature=self.scaling_decoder_dynamic_feature,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
forecast_generator=(
QuantileForecastGenerator(quantile_strs)
if quantile_strs is not None
else DistributionForecastGenerator(self.distr_output)
),
)
|
the-stack_0_26278
|
import game_defs
import glvars
import katagames_sdk.katagames_engine as kengi
pygame = kengi.pygame
EventReceiver = kengi.event.EventReceiver
EngineEvTypes = kengi.event.EngineEvTypes
# Nota Bene.
# there will be no model for the collection, as the avatar already contains it!
# (in glvars.the_avatar.artifacts)
BASE_Y = 55
TAQUET_X = 50
class ShowCollectionView(EventReceiver):
"""
dummy view,
displays a gray square(=slot) no matter what,
+displays a steelblue circle inside this slot if the artifact is owned
"""
def __init__(self):
super().__init__()
self.reagent_names_lbl = list()
ft = pygame.font.Font(None, 25)
for ac in game_defs.ArtifactCodes.all_codes:
self.reagent_names_lbl.append(
ft.render(game_defs.ArtifactNames[ac][0], True, (87, 11, 128))
)
self.rituals_title_lbl = ft.render('Rituals: ', True, (87, 11, 128))
self.ritual_spr = pygame.sprite.Sprite()
self.ritual_spr.image = pygame.image.load('assets/ritual.png')
self.ritual_spr.rect = self.ritual_spr.image.get_rect()
self.ritual_spr.rect.center = (180, 434)
self.chalice_spr = pygame.sprite.Sprite()
self.chalice_spr.image = pygame.transform.scale(pygame.image.load('assets/chalice.png'), (150, 150))
self.chalice_spr.image.set_colorkey((255, 0, 255))
self.chalice_spr.rect = self.chalice_spr.image.get_rect()
self.chalice_spr.rect.center = (385, 434)
def proc_event(self, ev, source):
if ev.type == EngineEvTypes.PAINT:
self._do_paint(ev.screen)
def _do_paint(self, scr):
scr.fill(game_defs.BG_COLOR)
ft = pygame.font.Font(None, 27)
av = glvars.the_avatar
# - draw labels
given_y = BASE_Y
for lbl in self.reagent_names_lbl:
scr.blit(lbl, (TAQUET_X, given_y-32))
given_y += 88
# - draw slots
given_y = BASE_Y
circle_offset = [40, 25]
rad = 21
for art_code in game_defs.ArtifactCodes.all_codes:
tmp = max(game_defs.ArtifactNames[art_code].keys())
for num_piece in range(1, tmp+1): # draw smth for each artifact element
tmpx = 50+(num_piece-1)*125
pygame.draw.rect(scr, 'darkgray', (tmpx, given_y, 80, 50))
if av.has_artifact(art_code, num_piece):
tpos = (tmpx+circle_offset[0], given_y+circle_offset[1])
pygame.draw.circle(scr, 'steelblue', tpos, rad)
# also display the quantity...
tmp = ft.render(str(av.artifact_quant(art_code, num_piece)), False, 'orange')
scr.blit(tmp, (tpos[0], tpos[1]+16))
given_y += 88
# titre partie 2 +illustrations
scr.blit(self.rituals_title_lbl, (TAQUET_X, 33 + scr.get_size()[1]//2))
scr.blit(self.ritual_spr.image, self.ritual_spr.rect.topleft)
scr.blit(self.chalice_spr.image, self.chalice_spr.rect.topleft)
class ShowCollectionCtrl(EventReceiver):
def proc_event(self, ev, source):
if ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE:
self.pev(EngineEvTypes.POPSTATE)
class MageryState(kengi.BaseGameState):
def __init__(self, gs_id, name):
super().__init__(gs_id, name)
self.m = self.v = self.c = None
def enter(self):
self.v = ShowCollectionView()
self.v.turn_on()
self.c = ShowCollectionCtrl()
self.c.turn_on()
def release(self):
self.c.turn_off()
self.v.turn_off()
self.c = self.v = None
|
the-stack_0_26279
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jorge Niedbalski R. <[email protected]>'
import os
from setuptools import setup, find_packages
dependencies = ["jujuclient", "PyYaml"]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="juju-suspend",
version="0.1.0",
author="Jorge Niedbalski R.",
include_package_data=True,
author_email="[email protected]",
description="",
install_requires=dependencies,
packages=find_packages(),
test_suite='nose.collector',
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
],
entry_points="""
[console_scripts]
juju-suspend = juju_suspend.cli:main
"""
)
|
the-stack_0_26282
|
"""
Codemonk link: https://www.hackerearth.com/problem/algorithm/monk-and-nice-strings-3-e5800d05/
Monk's best friend Micro's birthday is coming up. Micro likes Nice Strings very much, so Monk decided to gift him one.
Monk is having N nice strings, so he'll choose one from those. But before he selects one, he need to know the Niceness
value of all of those. Strings are arranged in an array A, and the Niceness value of string at position i is defined as
the number of strings having position less than i which are lexicographicaly smaller than A[i]. Since nowadays, Monk is
very busy with the Codemonk series, he asked for your help. Array's index starts from 1.
Input - Output:
First line consists of a single integer denoting N.
N lines follow each containing a string made of lower case English alphabets.
Print N lines, each containing an integer, where the integer in ith line denotes
Niceness value of string A[i].
Sample input:
4
a
c
d
b
Sample Output:
0
1
2
1
"""
"""
The problem is straightforward. For each new character check all the previous ones to find how many are smaller.
Final complexity: O(N^2)
"""
inp_len = int(input())
first_char = input()
characters = [first_char]
print("0")
for i in range(1, inp_len):
new_char = input()
count = 0
for j in range(len(characters)-1, -1, -1):
if new_char > characters[j]:
count += 1
print(count)
characters.append(new_char)
|
the-stack_0_26285
|
import cv2
import mediapipe as mp
mp_face_detection = mp.solutions.face_detection
mp_drawing = mp.solutions.drawing_utils
# For webcam input:
cap = cv2.VideoCapture(0)
with mp_face_detection.FaceDetection(
min_detection_confidence=0.5) as face_detection:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = face_detection.process(image)
# Draw the face detection annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.detections:
for detection in results.detections:
mp_drawing.draw_detection(image, detection)
cv2.imshow('MediaPipe Face Detection', image)
# ctrl and c to close a window
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
|
the-stack_0_26286
|
import logging
import fifo_parser.messageexecutor
# create logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s",
"%Y-%m-%d %H:%M:%S")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
me = fifo_parser.messageexecutor.MessageExecutor()
me.run()
|
the-stack_0_26289
|
# -*- coding: utf-8 -*-
"""Includes functions for copying the PyNX template files."""
import datetime
import os
from distutils.dir_util import copy_tree
from nxstart.utils.files import get_full_path, replace_in_file
def create_pynx_project(folder_path, name, author):
"""
Copies the files from templates/base to folder_path and modifies Makefile and source/main.cpp
to include the project name, author name and current date.
:param folder_path: Path to copy the files to
:param name: Name of the project
:param author: Name of the author
"""
template_folder = get_full_path(os.path.join("templates", "pynx"))
copy_tree(template_folder, folder_path)
main_cpp_file = os.path.join(folder_path, "main.py")
main_cpp_replacements = {
"APP_AUTHOR_PLACEHOLDER": author,
"APP_NAME_PLACEHOLDER": name,
"DATE_PLACEHOLDER": datetime.datetime.now().strftime("%Y-%m-%d"),
}
replace_in_file(main_cpp_file, main_cpp_replacements)
|
the-stack_0_26290
|
import argparse
from utilities import utils
import config
def get_argument_parser(model_name):
'''
Argument parser which returns the options which the user inputted.
Args:
None
Returns:
argparse.ArgumentParser().parse_args()
'''
weights_path = config.PRETRAINED_WEIGHT_PATH
image_path = f'./images/{model_name}.png'
plot_path = f'./images/{model_name}_plot.png'
parser = argparse.ArgumentParser()
parser.add_argument('--epochs',
help = 'How many epochs you need to run (default: 10)',
type = int, default = config.EPOCH)
parser.add_argument('--batch_size',
help = 'The number of images in a batch (default: 64)',
type = int, default = config.BATCH_SIZE)
parser.add_argument('--path_for_weights',
help = f'The path from where the weights will be saved or loaded \
(default: {weights_path})',
type = str, default = weights_path)
parser.add_argument('--path_for_image',
help = f'The path from where the model image will be saved \
(default: {image_path})',
type = str, default = image_path)
parser.add_argument('--path_for_plot',
help = f'The path from where the training progress will be plotted \
(default: {plot_path})',
type = str, default = plot_path)
parser.add_argument('--data_augmentation',
help = '0: No, 1: Yes (default: 1)',
type = int, default = 0)
parser.add_argument('--load_weights',
help = '0: No, 1: Yes (default: 0)',
type = int, default = 0)
parser.add_argument('--plot_training_progress',
help = '0: No, 1: Yes (default: 1)',
type = int, default = 1)
parser.add_argument('--save_model_to_image',
help = '0: No, 1: Yes (default: 1)',
type = int, default = 1)
args = parser.parse_args()
return args
def train(model, model_name):
# load all arguments
args = get_argument_parser(model_name)
training_data, validation_data, test_data = utils.load_mnist()
print(f'[data loaded]')
# build and compile the model
model.compile()
print('[model built]')
# save the model architecture to an image file
#if args.save_model_to_image:
#model.save_model_as_image(args.path_for_image)
#print(f'[model image saved as {args.path_for_image}]')
# load pretrained weights
if config.FINE_TUNE:
model.load_weights(args.path_for_weights)
print(f'[weights loaded from {args.path_for_weights}]')
# train the model
hist = None
if config.DATA_AUGMENTATION:
hist = model.fit_generator(training_data, validation_data,
epochs = args.epochs, batch_size = args.batch_size)
print('[trained with augmented images]')
else:
hist = model.fit(training_data, validation_data,
epochs = args.epochs, batch_size = args.batch_size)
print('[trained without augmented images]')
# save the training progress to an image file
#if args.plot_training_progress:
#utils.plot(history = hist, path = args.path_for_plot, title = model_name)
#print(f'[training progress saved as {args.path_for_plot}]')
# save the model and trained weights in the configured path
if config.SAVE_MODEL:
model.save(config.SAVE_MODEL_PATH)
print(f'[Model and trained weights saved in {config.SAVE_MODEL_PATH}]')
# evaluate the model with the test dataset
loss_and_metrics = model.evaluate(test_data, batch_size = args.batch_size)
print('[Evaluation on the test dataset]\n', loss_and_metrics, '\n')
|
the-stack_0_26291
|
# MIT License
#
# Copyright (c) 2021 ESCAPE
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pyspark.sql import SparkSession
import sys
import json
def main():
spark = SparkSession.Builder().getOrCreate()
# Set spark log level to WARN
spark.sparkContext.setLogLevel("WARN")
msg = """
\nSpark configuration\n-------------------
"""
print(msg)
print(spark.version)
print(spark.sparkContext.getConf().getAll())
print('Reading some data...')
df = spark.read.format('parquet').load('data/clusters.parquet')
df.count()
print('Configuration OK!')
msg = """
\nPython configuration\n--------------------
"""
print(msg)
print(sys.version)
msg = """
\nJupyter RISE add-ons\n--------------------
"""
print(msg)
with open('../.jupyter/nbconfig/rise.json') as f:
rise_info = json.load(f)
print(rise_info)
if __name__ == '__main__':
main()
|
the-stack_0_26292
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""TXYZ topology parser
====================
Tinker_ topology parser: reads information from .txyz and .arc files.
Atom types are read from column 6, while bond connectivity is read from column 7
onwards.
.. _Tinker: https://dasher.wustl.edu/tinker/
See Also
--------
MDAnalysis.coordinates.TXYZ : further documentation on the Tinker format
Classes
-------
.. autoclass:: TXYZParser
:members:
:inherited-members:
"""
from __future__ import absolute_import
import itertools
import numpy as np
from six.moves import zip
from . import guessers
from ..lib.util import openany
from .base import TopologyReaderBase
from ..core.topology import Topology
from ..core.topologyattrs import (
Atomnames,
Atomids,
Atomtypes,
Bonds,
Masses,
Resids,
Resnums,
Segids,
)
class TXYZParser(TopologyReaderBase):
"""Parse a list of atoms from a Tinker XYZ file.
Creates the following attributes:
- Atomnames
- Atomtypes
.. versionadded:: 0.17.0
"""
format = ['TXYZ', 'ARC']
def parse(self, **kwargs):
"""Read the file and return the structure.
Returns
-------
MDAnalysis Topology object
"""
with openany(self.filename) as inf:
#header
natoms = int(inf.readline().split()[0])
atomids = np.zeros(natoms, dtype=np.int)
names = np.zeros(natoms, dtype=object)
types = np.zeros(natoms, dtype=object)
bonds = []
# Find first atom line, maybe there's box information
fline = inf.readline()
try:
# If a box second value will be a float
# If an atom, second value will be a string
float(fline.split()[1])
except ValueError:
# If float conversion failed, we have first atom line
pass
else:
# If previous try succeeded it was a box
# so read another line to find the first atom line
fline = inf.readline()
# Can't infinitely read as XYZ files can be multiframe
for i, line in zip(range(natoms), itertools.chain([fline], inf)):
line = line.split()
atomids[i]= line[0]
names[i] = line[1]
types[i] = line[5]
bonded_atoms = line[6:]
for other_atom in bonded_atoms:
other_atom = int(other_atom) - 1
if i < other_atom:
bonds.append((i, other_atom))
# Guessing time
masses = guessers.guess_masses(names)
attrs = [Atomnames(names),
Atomids(atomids),
Atomtypes(types),
Bonds(tuple(bonds)),
Masses(masses, guessed=True),
Resids(np.array([1])),
Resnums(np.array([1])),
Segids(np.array(['SYSTEM'], dtype=object)),
]
top = Topology(natoms, 1, 1,
attrs=attrs)
return top
|
the-stack_0_26293
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"GcsSource",
"GcsDestination",
"BigQuerySource",
"BigQueryDestination",
"ContainerRegistryDestination",
},
)
class GcsSource(proto.Message):
r"""The Google Cloud Storage location for the input content.
Attributes:
uris (Sequence[str]):
Required. Google Cloud Storage URI(-s) to the
input file(s). May contain wildcards. For more
information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
"""
uris = proto.RepeatedField(proto.STRING, number=1)
class GcsDestination(proto.Message):
r"""The Google Cloud Storage location where the output is to be
written to.
Attributes:
output_uri_prefix (str):
Required. Google Cloud Storage URI to output
directory. If the uri doesn't end with '/', a
'/' will be automatically appended. The
directory is created if it doesn't exist.
"""
output_uri_prefix = proto.Field(proto.STRING, number=1)
class BigQuerySource(proto.Message):
r"""The BigQuery location for the input content.
Attributes:
input_uri (str):
Required. BigQuery URI to a table, up to 2000 characters
long. Accepted forms:
- BigQuery path. For example:
``bq://projectId.bqDatasetId.bqTableId``.
"""
input_uri = proto.Field(proto.STRING, number=1)
class BigQueryDestination(proto.Message):
r"""The BigQuery location for the output content.
Attributes:
output_uri (str):
Required. BigQuery URI to a project or table, up to 2000
characters long.
When only the project is specified, the Dataset and Table
are created. When the full table reference is specified, the
Dataset must exist and table must not exist.
Accepted forms:
- BigQuery path. For example: ``bq://projectId`` or
``bq://projectId.bqDatasetId.bqTableId``.
"""
output_uri = proto.Field(proto.STRING, number=1)
class ContainerRegistryDestination(proto.Message):
r"""The Container Registry location for the container image.
Attributes:
output_uri (str):
Required. Container Registry URI of a container image. Only
Google Container Registry and Artifact Registry are
supported now. Accepted forms:
- Google Container Registry path. For example:
``gcr.io/projectId/imageName:tag``.
- Artifact Registry path. For example:
``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``.
If a tag is not specified, "latest" will be used as the
default tag.
"""
output_uri = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_26294
|
from knowledge_model import Base, Knowledge
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///knowledge.db')
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
def add_article(topic , link, rating):
article_object = Knowledge(
topic = topic,
link = link,
rating = rating)
session.add(article_object)
session.commit()
add_article("music","https://docs.google.com/presentation/d/1oQnW76Wd-bEMCi_r1EAzgTVlL2IwgorDrYjiQeyu35E/edit#slide=id.g3df5820309_0_239 ", 7 )
add_article("https://docs.google.com/presentation/d/1oQnW76Wd-bEMCi_r1EAzgTVlL2IwgorDrYjiQeyu35E/edit#slide=id.g3df5820309_0_239 ", 7 , "music")
add_article(7,"https://docs.google.com/presentation/d/1oQnW76Wd-bEMCi_r1EAzgTVlL2IwgorDrYjiQeyu35E/edit#slide=id.g3df5820309_0_239 ", "music" )
def query_all_articles():
article= session.query(
Knowledge).all()
return article
def query_article_by_topic(topic):
by_topic = session.query(
Knowledge).filter_by(
topic=topic).first()
return by_topic
# print(query_article_by_topic("music"))
def delete_article_by_topic(topic):
session.query(Knowledge).filter_by(
topic= topic).delete()
session.commit()
# delete_article_by_topic("music")
def delete_all_articles():
session.query(Knowledge).delete()
session.commit()
def edit_article_rating(update_rating, article_title):
article_object = session.query(
Knowledge).filter_by(
topic = article_title).first()
print(article_object)
article_object_rating = update_rating
session.commit()
edit_article_rating(9, "music")
#print(query_article_by_topic("music"))
# print(query_all_articles())
# g = Knowledge(topic = "music", link = "https://github.com/mor19-meet/y2s18-databases/tree/master/exercises" , rating = 4)
# session.add(g)
# session.commit()
# articls = query_all_articles()
# for i in articls:
# print(i.topic)
|
the-stack_0_26295
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import Dict, Sequence
import requests
import snappy
from opentelemetry.exporter.prometheus_remote_write.gen.remote_pb2 import (
WriteRequest,
)
from opentelemetry.exporter.prometheus_remote_write.gen.types_pb2 import (
Label,
Sample,
TimeSeries,
)
from opentelemetry.sdk.metrics.export import (
ExportRecord,
MetricsExporter,
MetricsExportResult,
)
from opentelemetry.sdk.metrics.export.aggregate import (
HistogramAggregator,
LastValueAggregator,
MinMaxSumCountAggregator,
SumAggregator,
ValueObserverAggregator,
)
logger = logging.getLogger(__name__)
class PrometheusRemoteWriteMetricsExporter(MetricsExporter):
"""
Prometheus remote write metric exporter for OpenTelemetry.
Args:
endpoint: url where data will be sent (Required)
basic_auth: username and password for authentication (Optional)
headers: additional headers for remote write request (Optional)
timeout: timeout for remote write requests in seconds, defaults to 30 (Optional)
proxies: dict mapping request proxy protocols to proxy urls (Optional)
tls_config: configuration for remote write TLS settings (Optional)
"""
def __init__(
self,
endpoint: str,
basic_auth: Dict = None,
headers: Dict = None,
timeout: int = 30,
tls_config: Dict = None,
proxies: Dict = None,
):
self.endpoint = endpoint
self.basic_auth = basic_auth
self.headers = headers
self.timeout = timeout
self.tls_config = tls_config
self.proxies = proxies
self.converter_map = {
MinMaxSumCountAggregator: self._convert_from_min_max_sum_count,
SumAggregator: self._convert_from_sum,
HistogramAggregator: self._convert_from_histogram,
LastValueAggregator: self._convert_from_last_value,
ValueObserverAggregator: self._convert_from_value_observer,
}
@property
def endpoint(self):
return self._endpoint
@endpoint.setter
def endpoint(self, endpoint: str):
if endpoint == "":
raise ValueError("endpoint required")
self._endpoint = endpoint
@property
def basic_auth(self):
return self._basic_auth
@basic_auth.setter
def basic_auth(self, basic_auth: Dict):
if basic_auth:
if "username" not in basic_auth:
raise ValueError("username required in basic_auth")
if "password_file" in basic_auth:
if "password" in basic_auth:
raise ValueError(
"basic_auth cannot contain password and password_file"
)
with open(basic_auth["password_file"]) as file:
basic_auth["password"] = file.readline().strip()
elif "password" not in basic_auth:
raise ValueError("password required in basic_auth")
self._basic_auth = basic_auth
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout: int):
if timeout <= 0:
raise ValueError("timeout must be greater than 0")
self._timeout = timeout
@property
def tls_config(self):
return self._tls_config
@tls_config.setter
def tls_config(self, tls_config: Dict):
if tls_config:
new_config = {}
if "ca_file" in tls_config:
new_config["ca_file"] = tls_config["ca_file"]
if "cert_file" in tls_config and "key_file" in tls_config:
new_config["cert_file"] = tls_config["cert_file"]
new_config["key_file"] = tls_config["key_file"]
elif "cert_file" in tls_config or "key_file" in tls_config:
raise ValueError(
"tls_config requires both cert_file and key_file"
)
if "insecure_skip_verify" in tls_config:
new_config["insecure_skip_verify"] = tls_config[
"insecure_skip_verify"
]
self._tls_config = tls_config
@property
def proxies(self):
return self._proxies
@proxies.setter
def proxies(self, proxies: Dict):
self._proxies = proxies
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, headers: Dict):
self._headers = headers
def export(
self, export_records: Sequence[ExportRecord]
) -> MetricsExportResult:
if not export_records:
return MetricsExportResult.SUCCESS
timeseries = self._convert_to_timeseries(export_records)
if not timeseries:
logger.error(
"All records contain unsupported aggregators, export aborted"
)
return MetricsExportResult.FAILURE
message = self._build_message(timeseries)
headers = self._build_headers()
return self._send_message(message, headers)
def shutdown(self) -> None:
pass
def _convert_to_timeseries(
self, export_records: Sequence[ExportRecord]
) -> Sequence[TimeSeries]:
timeseries = []
for export_record in export_records:
aggregator_type = type(export_record.aggregator)
converter = self.converter_map.get(aggregator_type)
if converter:
timeseries.extend(converter(export_record))
else:
logger.warning(
"%s aggregator is not supported, record dropped",
aggregator_type,
)
return timeseries
def _convert_from_sum(
self, sum_record: ExportRecord
) -> Sequence[TimeSeries]:
return [
self._create_timeseries(
sum_record,
sum_record.instrument.name + "_sum",
sum_record.aggregator.checkpoint,
)
]
def _convert_from_min_max_sum_count(
self, min_max_sum_count_record: ExportRecord
) -> Sequence[TimeSeries]:
timeseries = []
for agg_type in ["min", "max", "sum", "count"]:
name = min_max_sum_count_record.instrument.name + "_" + agg_type
value = getattr(
min_max_sum_count_record.aggregator.checkpoint, agg_type
)
timeseries.append(
self._create_timeseries(min_max_sum_count_record, name, value)
)
return timeseries
def _convert_from_histogram(
self, histogram_record: ExportRecord
) -> Sequence[TimeSeries]:
timeseries = []
for bound in histogram_record.aggregator.checkpoint.keys():
bound_str = "+Inf" if bound == float("inf") else str(bound)
value = histogram_record.aggregator.checkpoint[bound]
timeseries.append(
self._create_timeseries(
histogram_record,
histogram_record.instrument.name + "_histogram",
value,
extra_label=("le", bound_str),
)
)
return timeseries
def _convert_from_last_value(
self, last_value_record: ExportRecord
) -> Sequence[TimeSeries]:
return [
self._create_timeseries(
last_value_record,
last_value_record.instrument.name + "_last",
last_value_record.aggregator.checkpoint,
)
]
def _convert_from_value_observer(
self, value_observer_record: ExportRecord
) -> Sequence[TimeSeries]:
timeseries = []
for agg_type in ["min", "max", "sum", "count", "last"]:
timeseries.append(
self._create_timeseries(
value_observer_record,
value_observer_record.instrument.name + "_" + agg_type,
getattr(
value_observer_record.aggregator.checkpoint, agg_type
),
)
)
return timeseries
# TODO: Implement convert from quantile once supported by SDK for Prometheus Summaries
def _convert_from_quantile(
self, summary_record: ExportRecord
) -> Sequence[TimeSeries]:
raise NotImplementedError()
# pylint: disable=no-member,no-self-use
def _create_timeseries(
self,
export_record: ExportRecord,
name: str,
value: float,
extra_label: (str, str) = None,
) -> TimeSeries:
timeseries = TimeSeries()
seen = set()
def add_label(label_name: str, label_value: str):
# Label name must contain only alphanumeric characters and underscores
label_name = re.sub("[^\\w_]", "_", label_name)
if label_name not in seen:
label = Label()
label.name = label_name
label.value = label_value
timeseries.labels.append(label)
seen.add(label_name)
else:
logger.warning(
"Duplicate label with name %s and value %s",
label_name,
label_value,
)
# The __name__ label is required by PromQL as its value appears as the metric_name
add_label("__name__", name)
if extra_label:
add_label(extra_label[0], extra_label[1])
if export_record.resource.attributes:
for (
label_name,
label_value,
) in export_record.resource.attributes.items():
add_label(label_name, str(label_value))
if export_record.labels:
for [label_name, label_value] in export_record.labels:
add_label(label_name, label_value)
sample = Sample()
sample.timestamp = int(
export_record.aggregator.last_update_timestamp / 1000000
)
sample.value = value
timeseries.samples.append(sample)
return timeseries
# pylint: disable=no-member,no-self-use
def _build_message(self, timeseries: Sequence[TimeSeries]) -> bytes:
write_request = WriteRequest()
write_request.timeseries.extend(timeseries)
serialized_message = write_request.SerializeToString()
return snappy.compress(serialized_message)
def _build_headers(self) -> Dict:
headers = {
"Content-Encoding": "snappy",
"Content-Type": "application/x-protobuf",
"X-Prometheus-Remote-Write-Version": "0.1.0",
}
if self.headers:
for header_name, header_value in self.headers.items():
headers[header_name] = header_value
return headers
def _send_message(
self, message: bytes, headers: Dict
) -> MetricsExportResult:
auth = None
if self.basic_auth:
auth = (self.basic_auth["username"], self.basic_auth["password"])
cert = None
verify = True
if self.tls_config:
if "ca_file" in self.tls_config:
verify = self.tls_config["ca_file"]
elif "insecure_skip_verify" in self.tls_config:
verify = self.tls_config["insecure_skip_verify"]
if (
"cert_file" in self.tls_config
and "key_file" in self.tls_config
):
cert = (
self.tls_config["cert_file"],
self.tls_config["key_file"],
)
try:
response = requests.post(
self.endpoint,
data=message,
headers=headers,
auth=auth,
timeout=self.timeout,
proxies=self.proxies,
cert=cert,
verify=verify,
)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as err:
logger.error("Export POST request failed with reason: %s", err)
return MetricsExportResult.FAILURE
return MetricsExportResult.SUCCESS
|
the-stack_0_26296
|
from __future__ import absolute_import, division, unicode_literals
import base64
import json
import numpy as np
from panel.pane import DataFrame, JSON, HTML, Markdown, PaneBase, Pane, Str
from panel.tests.util import pd_available, streamz_available
def test_get_markdown_pane_type():
assert PaneBase.get_pane_type("**Markdown**") is Markdown
@pd_available
def test_get_dataframe_pane_type():
import pandas as pd
df = pd.util.testing.makeDataFrame()
assert PaneBase.get_pane_type(df) is DataFrame
@pd_available
def test_get_series_pane_type():
import pandas as pd
df = pd.util.testing.makeDataFrame()
assert PaneBase.get_pane_type(df.iloc[:, 0]) is DataFrame
@streamz_available
def test_get_streamz_dataframe_pane_type():
from streamz.dataframe import Random
sdf = Random(interval='200ms', freq='50ms')
assert PaneBase.get_pane_type(sdf) is DataFrame
@streamz_available
def test_get_streamz_dataframes_pane_type():
from streamz.dataframe import Random
sdf = Random(interval='200ms', freq='50ms').groupby('y').sum()
assert PaneBase.get_pane_type(sdf) is DataFrame
@streamz_available
def test_get_streamz_series_pane_type():
from streamz.dataframe import Random
sdf = Random(interval='200ms', freq='50ms')
assert PaneBase.get_pane_type(sdf.x) is DataFrame
@streamz_available
def test_get_streamz_seriess_pane_type():
from streamz.dataframe import Random
sdf = Random(interval='200ms', freq='50ms').groupby('y').sum()
assert PaneBase.get_pane_type(sdf.x) is DataFrame
def test_markdown_pane(document, comm):
pane = Pane("**Markdown**")
# Create pane
model = pane.get_root(document, comm=comm)
assert pane._models[model.ref['id']][0] is model
assert model.text.endswith("<p><strong>Markdown</strong></p>")
# Replace Pane.object
pane.object = "*Markdown*"
assert pane._models[model.ref['id']][0] is model
assert model.text.endswith("<p><em>Markdown</em></p>")
# Cleanup
pane._cleanup(model)
assert pane._models == {}
def test_markdown_pane_dedent(document, comm):
pane = Pane(" ABC")
# Create pane
model = pane.get_root(document, comm=comm)
assert pane._models[model.ref['id']][0] is model
assert model.text.endswith("<p>ABC</p>")
pane.dedent = False
assert model.text.startswith('<div class="codehilite')
def test_markdown_pane_extensions(document, comm):
pane = Pane("""
```python
None
```
""")
# Create pane
model = pane.get_root(document, comm=comm)
assert pane._models[model.ref['id']][0] is model
assert model.text.startswith('<div class="codehilite')
pane.extensions = ["extra", "smarty"]
assert model.text.startswith('<pre><code class="python')
def test_html_pane(document, comm):
pane = HTML("<h1>Test</h1>")
# Create pane
model = pane.get_root(document, comm=comm)
assert pane._models[model.ref['id']][0] is model
assert model.text == "<h1>Test</h1>"
# Replace Pane.object
pane.object = "<h2>Test</h2>"
assert pane._models[model.ref['id']][0] is model
assert model.text == "<h2>Test</h2>"
# Cleanup
pane._cleanup(model)
assert pane._models == {}
@pd_available
def test_dataframe_pane_pandas(document, comm):
import pandas as pd
pane = DataFrame(pd.util.testing.makeDataFrame())
# Create pane
model = pane.get_root(document, comm=comm)
assert pane._models[model.ref['id']][0] is model
assert model.text.startswith('<table')
orig_text = model.text
# Replace Pane.object
pane.object = pd.util.testing.makeMixedDataFrame()
assert pane._models[model.ref['id']][0] is model
assert model.text.startswith('<table')
assert model.text != orig_text
# Cleanup
pane._cleanup(model)
assert pane._models == {}
@streamz_available
def test_dataframe_pane_streamz(document, comm):
from streamz.dataframe import Random
sdf = Random(interval='200ms', freq='50ms')
pane = DataFrame(sdf)
assert pane._stream is None
# Create pane
model = pane.get_root(document, comm=comm)
assert pane._stream is not None
assert pane._models[model.ref['id']][0] is model
assert model.text == ''
# Replace Pane.object
pane.object = sdf.x
assert pane._models[model.ref['id']][0] is model
assert model.text == ''
# Cleanup
pane._cleanup(model)
assert pane._stream is None
assert pane._models == {}
def test_string_pane(document, comm):
pane = Str("<h1>Test</h1>")
# Create pane
model = pane.get_root(document, comm=comm)
assert pane._models[model.ref['id']][0] is model
assert model.text == "<pre><h1>Test</h1></pre>"
# Replace Pane.object
pane.object = "<h2>Test</h2>"
assert pane._models[model.ref['id']][0] is model
assert model.text == "<pre><h2>Test</h2></pre>"
# Cleanup
pane._cleanup(model)
assert pane._models == {}
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
data_b64 = base64.b64encode(obj.data).decode('utf-8')
return dict(__ndarray__=data_b64,
dtype=str(obj.dtype),
shape=obj.shape)
return json.JSONEncoder.default(self, obj)
def test_json_applies():
assert JSON.applies({1: 2})
assert JSON.applies([1, 2, 3])
assert JSON.applies('{"a": 1}') == 0
assert not JSON.applies({'array': np.array([1, 2, 3])})
assert JSON.applies({'array': np.array([1, 2, 3])}, encoder=NumpyEncoder)
def test_json_pane(document, comm):
pane = JSON({'a': 2})
model = pane.get_root(document, comm=comm)
assert model.text == '{"a": 2}'
assert pane._models[model.ref['id']][0] is model
pane.object = '{"b": 3}'
assert model.text == '{"b": 3}'
assert pane._models[model.ref['id']][0] is model
# Cleanup
pane._cleanup(model)
assert pane._models == {}
|
the-stack_0_26297
|
import argparse
import pathlib
from .interface import write_blocks_hash, execute_blocks
from .tools import install, uninstall, make, clean
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(required=True, dest='command')
# Install script
install_parser = subparsers.add_parser("install")
install_parser.add_argument("-d", "--directory", default=pathlib.Path.cwd(), type=pathlib.Path)
install_parser.set_defaults(handler=install)
# Uninstall script
uninstall_parser = subparsers.add_parser("uninstall")
uninstall_parser.add_argument("-d", "--directory", default=pathlib.Path.cwd(), type=pathlib.Path)
uninstall_parser.set_defaults(handler=uninstall)
# Cleaner script
clean_parser = subparsers.add_parser("clean")
clean_parser.add_argument("-f", "--full", action="store_true")
clean_parser.set_defaults(handler=clean, requires_sys_args=True)
# Make script
make_parser = subparsers.add_parser("make")
make_parser.set_defaults(handler=make, requires_sys_args=True)
# Hash script
hash_parser = subparsers.add_parser("hash")
hash_parser.add_argument("-d", "--directory", default=pathlib.Path.cwd(), type=pathlib.Path)
hash_parser.set_defaults(handler=write_blocks_hash)
# Execute script
execute_parser = subparsers.add_parser("execute")
execute_parser.add_argument("hash_file_path", type=pathlib.Path)
execute_parser.set_defaults(handler=execute_blocks)
args, unknown_args = parser.parse_known_args()
kwargs = vars(args)
command = kwargs.pop("command")
requires_sys_args = kwargs.pop("requires_sys_args", False)
# Some args might require any uncaptured arguments
if requires_sys_args:
kwargs['sys_args'] = unknown_args
elif unknown_args:
parser.error("unrecognized arguments: {}".format(' '.join(unknown_args)))
handler = kwargs.pop("handler")
handler(**kwargs)
if __name__ == "__main__":
main()
|
the-stack_0_26299
|
import typing
import discord
from discord.ext import commands
# Defines a custom Select containing colour options
# That the user can choose. The callback function
# Of this class is called when the user changes their choice
class Dropdown(discord.ui.Select):
def __init__(self, bot):
self.bot = (
bot # For example, you can use self.bot to retrieve a user or perform other functions in the callback.
)
# Alternatively you can use Interaction.client, so you don't need to pass the bot instance.
# Set the options that will be presented inside the dropdown
options = [
discord.SelectOption(label="Red", description="Your favourite colour is red", emoji="🟥"),
discord.SelectOption(label="Green", description="Your favourite colour is green", emoji="🟩"),
discord.SelectOption(label="Blue", description="Your favourite colour is blue", emoji="🟦"),
]
# The placeholder is what will be shown when no option is chosen
# The min and max values indicate we can only pick one of the three options
# The options parameter defines the dropdown options. We defined this above
super().__init__(
placeholder="Choose your favourite colour...",
min_values=1,
max_values=1,
options=options,
)
async def callback(self, interaction: discord.Interaction):
# Use the interaction object to send a response message containing
# The user's favourite colour or choice. The self object refers to the
# Select object, and the values attribute gets a list of the user's
# selected options. We only want the first one.
await interaction.response.send_message(f"Your favourite colour is {self.values[0]}")
class DropdownView(discord.ui.View):
def __init__(self, bot):
self.bot = bot
super().__init__()
# Adds the dropdown to our view object.
self.add_item(Dropdown(self.bot))
class Bot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or("$"))
async def on_ready(self):
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
bot = Bot()
@bot.command()
async def colour(ctx):
"""Sends a message with our dropdown containing colours"""
# Create the view containing our dropdown
view = DropdownView(bot)
# Sending a message containing our view
await ctx.send("Pick your favourite colour:", view=view)
bot.run("token")
|
the-stack_0_26303
|
class Activity(object):
actor = None
object = None
target = None
verb = None
time = None
generator = None
icon_url = None
service_provider = None
links = None
def __init__(self, actor=None, object=None, target=None, verb=None, time=None, generator=None, icon_url=None, service_provider=None, links=None):
self.actor = actor
self.object = object
self.target = target
self.verb = verb
self.time = time
self.service_provider = service_provider
self.generator = generator
self.icon_url = icon_url
if links is not None:
self.links = links
else:
self.links = []
class Object(object):
id = None
name = None
url = None
object_type = None
summary = None
image = None
in_reply_to_object = None
attached_objects = None
reply_objects = None
reaction_activities = None
action_links = None
upstream_duplicate_ids = None
downstream_duplicate_ids = None
links = None
def __init__(self, id=None, name=None, url=None, object_type=None, summary=None, image=None, in_reply_to_object=None, attached_objects=None, reply_objects=None, reaction_activities=None, action_links=None, upstream_duplicate_ids=None, downstream_duplicate_ids=None, links=None):
self.id = id
self.name = name
self.url = url
self.object_type = object_type
self.summary = summary
self.image = image
self.in_reply_to_object = in_reply_to_object
if attached_objects is not None:
self.attached_objects = attached_objects
else:
self.attached_objects = []
if reply_objects is not None:
self.reply_objects = reply_objects
else:
self.reply_objects = []
if reaction_activities is not None:
self.reaction_activities = reaction_activities
else:
self.reaction_activities = []
if action_links is not None:
self.action_links = action_links
else:
self.action_links = []
if upstream_duplicate_ids is not None:
self.upstream_duplicate_ids = upstream_duplicate_ids
else:
self.upstream_duplicate_ids = []
if downstream_duplicate_ids is not None:
self.downstream_duplicate_ids = downstream_duplicate_ids
else:
self.downstream_duplicate_ids = []
if links is not None:
self.links = links
else:
self.links = []
class MediaLink(object):
url = None
media_type = None
width = None
height = None
duration = None
def __init__(self, url=None, media_type=None, width=None, height=None, duration=None):
self.url = url
self.media_type = media_type
self.width = width
self.height = height
self.duration = duration
class ActionLink(object):
url = None
caption = None
def __init__(self, url=None, caption=None):
self.url = url
self.caption = caption
class Link(object):
url = None
media_type = None
rel = None
def __init__(self, url=None, media_type=None, rel=None):
self.url = url
self.media_type = media_type
self.rel = rel
|
the-stack_0_26305
|
"""Provide the WGT info to other services."""
import logging
from datetime import timedelta
from enum import Enum, EnumMeta
from typing import Any, Dict, List, Union
from aiohttp import web
from aiohttp.web_request import Request
from aiohttp.web_response import Response
from wgt import WGT, __version__
from wgt.types import Unit
routes = web.RouteTableDef()
STATUS_URL = "/status/"
INFO_URL = "/info/"
def put_endpoints() -> Dict[str, Any]:
"""Return setter endpoints."""
endpoints = {}
for endpoint in WGT.properties_set():
type_ = WGT.property_type(endpoint)
endpoints[endpoint] = type_
return endpoints
def get_endpoints() -> Dict[str, Any]:
"""Return getter endpoints."""
endpoints = {}
for endpoint in WGT.properties_get():
type_ = WGT.property_type(endpoint)
endpoints[endpoint] = type_
return endpoints
def validate_endpoint_get(request: Request) -> str:
"""Ensure that the given endpoint is valid. If not raise a 404."""
endpoint = str(request.match_info["endpoint"]).lower()
if endpoint not in request.app["get_endpoints"]:
logging.info("Failed to get %s", endpoint)
raise web.HTTPNotFound
return endpoint
def validate_endpoint_put(request: Request) -> str:
"""Ensure that the given endpoint is valid. If not raise a 405."""
endpoint = validate_endpoint_get(request)
if endpoint not in request.app["put_endpoints"]:
logging.info("Failed to put %s", endpoint)
raise web.HTTPMethodNotAllowed(method="put", allowed_methods="get")
return endpoint
def value_to_enum(value: str, enum_class: EnumMeta) -> Any:
"""Translate a value to an enum.
Returns Enum on success, otherwise a HTTPUnprocessableEntity error
is raised.
"""
try:
value_int = int(value)
except ValueError as int_conversion_error:
raise web.HTTPUnprocessableEntity(
reason="Couldnt transform value to int as required for enum."
) from int_conversion_error
try:
value_typed = enum_class(value_int)
except ValueError as enum_conversion_error:
raise web.HTTPUnprocessableEntity(
reason=f"Invalid value for {enum_class}."
) from enum_conversion_error
return value_typed
@routes.get("/")
async def meta(request: Request) -> Response:
"""Return version of the WGT module."""
# Provide types for all endpoints
data: Dict[str, Union[List, str]] = {}
data["version"] = __version__
data["status_url"] = STATUS_URL
data["info_url"] = INFO_URL
data["get_endpoints"] = list(request.app["get_endpoints"].keys())
data["put_endpoints"] = list(request.app["put_endpoints"].keys())
return web.json_response(data)
@routes.get(INFO_URL + "{endpoint}")
async def info(request: Request) -> Response:
"""Add type information for all endpoints."""
endpoint = validate_endpoint_get(request)
# Prepare return value
data: Dict[str, Any] = {}
# Get data
type_ = request.app["get_endpoints"][endpoint]
type_str = str(type_)
if issubclass(type_, Enum):
enum_values = []
for val in type_:
enum_values.append((val.name, val.value))
data[type_str] = enum_values
elif issubclass(type_, Unit):
data[type_str] = "float"
elif issubclass(type_, timedelta):
data[type_str] = "Minutes or Days"
else:
raise web.HTTPNotImplemented(reason=type_str)
return web.json_response(data)
@routes.put(STATUS_URL + "{endpoint}")
async def put_status(request: Request) -> Response:
"""Set a status of the wgt."""
# Validate that this is an actual endpoint
endpoint = validate_endpoint_put(request=request)
# Ensure that the data was put as json/application type
if not request.content_type == "application/json":
raise web.HTTPUnsupportedMediaType(reason="Only application/json ")
# Try to get the data and translate it to a json
data = {}
try:
data = await request.json()
except ValueError as data_conversion_error:
raise web.HTTPUnsupportedMediaType(
reason="Couldn't convert data to json."
) from data_conversion_error
logging.debug("Received %s", data)
# Ensure that we he have the expected keywords
value = data.get("value", None)
if value is None:
raise web.HTTPUnprocessableEntity(reason="Need 'value' in request.")
# Convert received input to expected format
type_ = request.app["set_endpoints"][endpoint]
value_typed = None
if issubclass(type_, Enum):
value_typed = value_to_enum(value, type_)
elif issubclass(type_, Unit):
value_typed = type_(float(value))
else:
raise web.HTTPNotImplemented
# Check if we have a sound value
if value_typed is None:
raise web.HTTPInternalServerError
# Set the typed value
with WGT(ip=request.app["wgt_ip"], version=request.app["wgt_version"]) as wgt:
try:
setattr(wgt, endpoint, value_typed)
except ValueError as err:
raise web.HTTPUnprocessableEntity(reason=str(err))
raise web.HTTPOk
@routes.get(STATUS_URL + "{endpoint}")
async def get_status(request: Request) -> Response:
"""Return status."""
# Check if the attribute is a valid endpoint
endpoint = validate_endpoint_get(request)
data: Dict[str, Any] = {}
# Connect to wgt and read attribute
with WGT(ip=request.app["wgt_ip"], version=request.app["wgt_version"]) as wgt:
status = getattr(wgt, endpoint)
# Convert status to a dict
if isinstance(status, (Enum, Unit)):
data[endpoint] = {"name": status.name, "value": status.value}
elif isinstance(status, timedelta):
if status.days > 0:
data[endpoint] = {"name": f"{status.days} Tage", "value": status.days}
else:
minutes = status.seconds / 60
data[endpoint] = {"name": f"{minutes} Minuten", "value": minutes}
elif status is None:
data["error"] = f"Endpoint '{endpoint}' is not available in your WGT version.'"
else:
logging.error("Couldn't parse status of %s", endpoint)
raise web.HTTPInternalServerError
return web.json_response(data)
def main(port: int = 8080) -> None:
"""Start the server."""
app = web.Application()
app["wgt_ip"] = "10.1.1.29"
app["wgt_version"] = "1.06"
app["get_endpoints"] = get_endpoints()
app["put_endpoints"] = put_endpoints()
app.add_routes(routes)
web.run_app(app, port=port)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
the-stack_0_26306
|
import os
from os import path
from typing import Callable
from PyQt5.QtWidgets import QMainWindow, QFileDialog, QDialog, QMessageBox, QLineEdit, QProgressDialog
from PyQt5.QtGui import QIcon
from importer.alipay import get_transactions_from_alipay_csv
from importer.wechat import get_transactions_from_wechat_csv
from config import app_config
from data_model.transaction import Transaction
from data_model.transaction_item_model import TransactionItemModel
from gui.ui_main_window import Ui_MainWindow
from gui.transaction_view_delegate import TransactionViewDelegate
from beancount_account import get_operating_currencies, generate_account_hierarchy
from fmt import format_transaction
from data_model.tree import Node
from trainer.payee_to_account_trainer import PayeeToAccountTrainer
from .select_account_dialog import SelectAccountDialog
from .account_map_dialog import AccountMapDialog
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_MainWindow()
self.alipay_csv = ''
self.transaction_item_model = TransactionItemModel([])
self.transaction_view_delegate = TransactionViewDelegate(self)
self._account_map_dialog = AccountMapDialog(self)
def setupUi(self):
self.ui.setupUi(self)
# setup file menu action trigger
self.ui.openBeancountAccountAction.triggered.connect(self.select_beancount_file)
self.ui.openAlipayCsvAction.triggered.connect(self.open_alipay_csv)
self.ui.openWechatCsvAction.triggered.connect(self.open_wechat_csv)
# setup account map menu action
self.ui.payeeToAccountAction.triggered.connect(self.edit_payee_to_account)
self.ui.billAccountToFromAccountAction.triggered.connect(self.edit_bill_account_to_from_account)
self.ui.trainPayeeToAccountMapAction.triggered.connect(self.train_payee_to_account)
# setup default value button and line edit
self.ui.selectPaymentAccountBtn.clicked.connect(self.select_default_payment_account)
self.ui.selectExpensesAccountBtn.clicked.connect(self.select_default_expenses_account)
self.ui.defaultCurrencyComboBox.currentTextChanged.connect(self.set_default_currency)
self.ui.defaultPaymentAccountLE.textChanged.connect(self.set_default_payment_account)
self.ui.defaultExpensesAccountLE.textChanged.connect(self.set_default_expenses_account)
# setup default value
self.ui.defaultPaymentAccountLE.setText(app_config.default_payment_account)
self.ui.defaultExpensesAccountLE.setText(app_config.default_expenses_account)
self.ui.defaultCurrencyComboBox.setCurrentText(app_config.default_currency)
# setup transaction table view
self.ui.transactionTableView.setModel(self.transaction_item_model)
self.ui.transactionTableView.setItemDelegate(self.transaction_view_delegate)
# setup import component
open_file_icon = QIcon('./resources/icon/folder-open-line.svg')
self.ui.importToPathLE.setText(app_config.import_to_file)
importToPathLE_action = self.ui.importToPathLE.addAction(open_file_icon, QLineEdit.ActionPosition.TrailingPosition)
importToPathLE_action.triggered.connect(self.select_import_file)
self.ui.importBtn.clicked.connect(self.import_transaction)
def setup_beancount_option(self, beancount_file: str):
try:
accounts = generate_account_hierarchy(beancount_file)
app_config.beancount_currency = get_operating_currencies(beancount_file)
except Exception as e:
accounts = Node('root')
app_config.beancount_currency = []
QMessageBox.critical(self, self.tr('Error'), self.tr('Failed to load beancount file: {0}').format(e))
self.select_account_dialog = SelectAccountDialog(accounts, parent=self)
self.select_account_dialog.setupUi()
self.transaction_view_delegate.select_account_dialog = self.select_account_dialog
self.ui.defaultCurrencyComboBox.clear()
for currency in app_config.beancount_currency:
self.ui.defaultCurrencyComboBox.addItem(currency)
def select_beancount_file(self):
recent_beancount_path = path.dirname(app_config.recent_beancount_file)
app_config.recent_beancount_file = QFileDialog.getOpenFileName(self, self.tr('Open beancount file'), recent_beancount_path,
'beancount (*.beancount *.bc *.txt)')[0]
if not os.path.isfile(app_config.recent_beancount_file):
return
self.setup_beancount_option(app_config.recent_beancount_file)
def set_default_payment_account(self, account):
self.ui.defaultPaymentAccountLE.setText(account)
app_config.default_payment_account = account
self.transaction_item_model.update_transactions_data(
self._gen_func_set_transaction_account_with_default_value())
def select_default_payment_account(self):
if self.select_account_dialog.exec() == QDialog.Accepted:
account = self.select_account_dialog.get_selected_account()
self.set_default_payment_account(account)
def set_default_expenses_account(self, account):
self.ui.defaultExpensesAccountLE.setText(account)
app_config.default_expenses_account = account
self.transaction_item_model.update_transactions_data(
self._gen_func_set_transaction_account_with_default_value())
def select_default_expenses_account(self):
if self.select_account_dialog.exec() == QDialog.Accepted:
account = self.select_account_dialog.get_selected_account()
self.set_default_expenses_account(account)
def set_default_currency(self, currency):
app_config.default_currency = currency
self.transaction_item_model.update_transactions_data(
self._gen_func_set_transaction_currency_with_default_value())
def open_alipay_csv(self):
recent_alipay_path = path.dirname(app_config.recent_alipay_file)
self.alipay_csv = QFileDialog.getOpenFileName(
self, self.tr('Open Alipay CSV file'), recent_alipay_path, 'CSV (*.csv *.txt)')[0]
if not os.path.isfile(self.alipay_csv):
return
app_config.recent_alipay_file = self.alipay_csv
try:
transactions = get_transactions_from_alipay_csv(self.alipay_csv)
self.transaction_item_model.set_transactions_data(transactions)
self.transaction_item_model.update_transactions_data(
self._gen_func_set_transaction_account_with_default_value())
self.transaction_item_model.update_transactions_data(
self._gen_func_set_transaction_currency_with_default_value())
except Exception as e:
QMessageBox.critical(self, self.tr('Failed to open Alipay csv'), self.tr('Failed to open Alipay csv: ') + str(e))
def open_wechat_csv(self):
recent_wechat_path = path.dirname(app_config.recent_wechat_file)
self.wechat_csv = QFileDialog.getOpenFileName(
self, self.tr('Open Wechat CSV file'), recent_wechat_path, 'CSV (*.csv *.txt)')[0]
if not os.path.isfile(self.wechat_csv):
return
app_config.recent_wechat_file = self.wechat_csv
try:
transactions = get_transactions_from_wechat_csv(self.wechat_csv)
self.transaction_item_model.set_transactions_data(transactions)
self.transaction_item_model.update_transactions_data(
self._gen_func_set_transaction_account_with_default_value())
self.transaction_item_model.update_transactions_data(
self._gen_func_set_transaction_currency_with_default_value())
except Exception as e:
QMessageBox.critical(self, self.tr('Failed to open Wechat csv'), self.tr('Failed to open Wechat csv: ') + str(e))
def select_import_file(self):
import_file = QFileDialog.getSaveFileName(self, self.tr('Import to'), '/', 'beancount (*.beancount *.txt)')
if import_file[0]:
self.ui.importToPathLE.setText(import_file[0])
app_config.import_to_file = import_file[0]
def _gen_func_set_transaction_account_with_default_value(self) -> Callable[[Transaction], None]:
default_payment_account = self.ui.defaultPaymentAccountLE.text()
default_expenses_account = self.ui.defaultExpensesAccountLE.text()
def set_account(transaction):
transaction.from_account = app_config.bill_account_to_from_account.get(transaction.bill_payment_account, default_payment_account)
transaction.to_account = app_config.payee_account_map.get(transaction.payee, default_expenses_account)
return set_account
def _gen_func_set_transaction_currency_with_default_value(self) -> Callable[[Transaction], None]:
default_currency = self.ui.defaultCurrencyComboBox.currentText()
def set_currency(transaction):
transaction.currency = default_currency
return set_currency
def import_transaction(self):
import_file = self.ui.importToPathLE.text()
if not import_file:
QMessageBox.warning(self, self.tr('Import path is not set!'), self.tr('Import path is not set! Please set an import path first.'))
return
transaction_text_lines = [format_transaction(tx) for tx in self.transaction_item_model.transactions if tx.will_import]
transactions_text = '\n'.join(transaction_text_lines)
try:
with open(import_file, 'a', encoding='utf-8') as import_file_fs:
import_file_fs.write(transactions_text)
QMessageBox.information(self, self.tr('Imported'), self.tr('Imported {0} transactions').format(len(transaction_text_lines)))
except IOError as e:
QMessageBox.critical(self, self.tr('Cannot import transactions'), self.tr('Cannot import transactions: ') + str(e))
def set_payee_to_account(self, value):
app_config.payee_account_map = value
def set_bill_account_to_from_account(self, value):
app_config.bill_account_to_from_account = value
def edit_payee_to_account(self):
self._account_map_dialog.set_account_map(app_config.payee_account_map, self.tr('Payee to account'), [self.tr('Payee'), self.tr('Account')])
try:
self._account_map_dialog.finishEdit.disconnect()
except:
pass
self._account_map_dialog.finishEdit.connect(self.set_payee_to_account)
self._account_map_dialog.open()
def edit_bill_account_to_from_account(self):
self._account_map_dialog.set_account_map(app_config.bill_account_to_from_account, self.tr('Bill account to From account'), [self.tr('Bill account'), self.tr('From account')])
try:
self._account_map_dialog.finishEdit.disconnect()
except:
pass
self._account_map_dialog.finishEdit.connect(self.set_bill_account_to_from_account)
self._account_map_dialog.open()
def train_payee_to_account(self):
if not os.path.isfile(app_config.recent_beancount_file):
QMessageBox.critical(self, self.tr('Error'), self.tr('No beancount file is open for traning.'))
return
dialog = QProgressDialog(self.tr('Training...'), self.tr('Abort training'), 0, 100, self)
trainer = PayeeToAccountTrainer(self)
trainer.trainProgress.connect(dialog.setValue)
dialog.canceled.connect(trainer.cancel)
dialog.open()
try:
result = trainer.train(app_config.recent_beancount_file)
for (payee, account) in result.items():
if payee in app_config.payee_account_map and app_config.payee_account_map[payee] != account:
overwrite = QMessageBox.question(self,
self.tr('Overwrite?'),
self.tr('{0} already exists on payee to account map with value {1}. Do you want to overwite it with {2}?').format(payee, app_config.payee_account_map[payee], account))
if overwrite == QMessageBox.Yes:
app_config.payee_account_map[payee] = account
else:
app_config.payee_account_map[payee] = account
except Exception as e:
QMessageBox.critical(self, self.tr('Error'), self.tr('An error occurred while training: {0}.').format(e))
dialog.close()
|
the-stack_0_26308
|
#!/usr/bin/python3
import sys
import json
import textfsm
# Read the tfsm file from args
with open(sys.argv[1], 'r') as f:
raw_input = f.read()
# Suck out the comment lines, all at the start and supply as input
lines = raw_input.splitlines()
data = []
test_input = '\n'
for line in lines:
if line.startswith('#'):
data.append(line[1:])
if data:
test_input = '\n'.join(data)
with open(sys.argv[1], 'r') as f:
re_table = textfsm.TextFSM(f)
parsed_out = re_table.ParseText(test_input)
records = []
for entry in parsed_out:
rentry = dict(zip(re_table.header, entry))
records.append(rentry)
print(json.dumps(records))
|
the-stack_0_26310
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Baseline experiment on centralized Stack Overflow LR data."""
from typing import Any, Mapping, Optional
import tensorflow as tf
from utils import centralized_training_loop
from utils.datasets import stackoverflow_tag_prediction
from utils.models import stackoverflow_lr_models
def run_centralized(optimizer: tf.keras.optimizers.Optimizer,
experiment_name: str,
root_output_dir: str,
num_epochs: int,
batch_size: int,
decay_epochs: Optional[int] = None,
lr_decay: Optional[float] = None,
hparams_dict: Optional[Mapping[str, Any]] = None,
vocab_tokens_size: Optional[int] = 10000,
vocab_tags_size: Optional[int] = 500,
num_validation_examples: Optional[int] = 10000,
max_batches: Optional[int] = None):
"""Trains an RNN on the Stack Overflow next word prediction task.
Args:
optimizer: A `tf.keras.optimizers.Optimizer` used to perform training.
experiment_name: The name of the experiment. Part of the output directory.
root_output_dir: The top-level output directory for experiment runs. The
`experiment_name` argument will be appended, and the directory will
contain tensorboard logs, metrics written as CSVs, and a CSV of
hyperparameter choices (if `hparams_dict` is used).
num_epochs: The number of training epochs.
batch_size: The batch size, used for train, validation, and test.
decay_epochs: The number of epochs of training before decaying the learning
rate. If None, no decay occurs.
lr_decay: The amount to decay the learning rate by after `decay_epochs`
training epochs have occurred.
hparams_dict: A mapping with string keys representing the hyperparameters
and their values. If not None, this is written to CSV.
vocab_tokens_size: Integer dictating the number of most frequent words to
use in the vocabulary.
vocab_tags_size: Integer dictating the number of most frequent tags to use
in the label creation.
num_validation_examples: The number of test examples to use for validation.
max_batches: If set to a positive integer, datasets are capped to at most
that many batches. If set to None or a nonpositive integer, the full
datasets are used.
"""
train_dataset, validation_dataset, test_dataset = stackoverflow_tag_prediction.get_centralized_datasets(
train_batch_size=batch_size,
word_vocab_size=vocab_tokens_size,
tag_vocab_size=vocab_tags_size,
num_validation_examples=num_validation_examples)
if max_batches and max_batches >= 1:
train_dataset = train_dataset.take(max_batches)
validation_dataset = validation_dataset.take(max_batches)
test_dataset = test_dataset.take(max_batches)
model = stackoverflow_lr_models.create_logistic_model(
vocab_tokens_size=vocab_tokens_size, vocab_tags_size=vocab_tags_size)
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(
from_logits=False, reduction=tf.keras.losses.Reduction.SUM),
optimizer=optimizer,
metrics=[tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(top_k=5)])
centralized_training_loop.run(
keras_model=model,
train_dataset=train_dataset,
validation_dataset=validation_dataset,
test_dataset=test_dataset,
experiment_name=experiment_name,
root_output_dir=root_output_dir,
num_epochs=num_epochs,
hparams_dict=hparams_dict,
decay_epochs=decay_epochs,
lr_decay=lr_decay)
|
the-stack_0_26313
|
import os
from flask import Blueprint, Flask, Response, jsonify, request
import db
import queries
import service
# nice way to handle URL prefixes and allow for composition
brand = Blueprint("brand", __name__, url_prefix="/brand")
user = Blueprint("user", __name__, url_prefix="/user")
# TODO move to its own module
messages = {
"brand_not_found": (404, "Brand not found"),
"code_not_available": (403, "The code is not available for this brand"),
"code_already_received": (403, "User has already received a code"),
}
@brand.route("/<int:brand_id>/policy", methods=["POST"])
def generate_codes(brand_id):
payload = request.get_json()
# some data is best validated in the controller before passing it to service layer
# TODO refactor out into a validation layer
if 1 > (amount := payload["amount"]) or amount > 100:
return (
jsonify(
{
"result": "error",
"msg_id": "amount_invalid_range",
"msg": "Amount must be in range 1-100",
}
),
400,
)
if (count := payload["count"]) < 1:
return (
jsonify(
{
"result": "error",
"msg_id": "count_invalid_range",
"msg": "Count must be a positive integer",
}
),
400,
)
success, result = service.generate_codes(brand_id, amount, count)
# TODO refactor out common parts to make the code prettier and easier to read
if success:
return jsonify({"result": "success"}), 201
elif result in messages:
return (
jsonify({"result": "error", "msg_id": result, "msg": messages[result][1]}),
messages[result][0],
)
return jsonify({"result": "error", "msg_id": result, "msg": "Internal error"}), 500
@user.route("/codes", methods=["POST"])
def fetch_code():
payload = request.get_json()
# user_id must come from JWT of the user
# for the purpose of this PoC it's hardcoded by default
# but can be provided as an undocumented "userId" parameter
user_id = payload.get("userId") or 1
brand_id = payload["brandId"]
# we avoid using exceptions here because they are quite slow
# however that's not the only way of handling this
# there's a nice pattern in Rust language with special Result class
# which can help in such situations, especially when we consider
# newly added patter matching capabilities
success, result = service.get_code(user_id, brand_id)
if success:
return jsonify({"result": "success", "code": result}), 201
elif result in messages:
return (
jsonify({"result": "error", "msg_id": result, "msg": messages[result][1]}),
messages[result][0],
)
return jsonify({"result": "error", "msg_id": result, "msg": "Internal error"}), 500
def create_app(config_mapping=None):
"""App factory provides simple capabilites for testing"""
app = Flask(__name__)
default_config_mapping = {
"SECRET_KEY": "dev",
"DATABASE": os.path.join(app.instance_path, "discount_service.sqlite"),
}
if config_mapping is not None:
default_config_mapping.update(config_mapping)
app.config.from_mapping(**default_config_mapping)
app.register_blueprint(brand)
app.register_blueprint(user)
db.init_app(app)
return app
|
the-stack_0_26314
|
import torch
from model import MultiStageModel
from train import Trainer
from predict import *
from batch_gen import BatchGenerator
import os
import argparse
import random
import time
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seed = 1538574472
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
parser = argparse.ArgumentParser()
# architecture
parser.add_argument('--num_stages', default=4, type=int, help='stage number')
parser.add_argument('--num_layers', default=10, type=int, help='layer number in each stage')
parser.add_argument('--num_f_maps', default=64, type=int, help='embedded feat. dim.')
parser.add_argument('--features_dim', default=2048, type=int, help='input feat. dim.')
parser.add_argument('--DA_adv', default='none', type=str, help='adversarial loss (none | rev_grad)')
parser.add_argument('--DA_adv_video', default='none', type=str, help='video-level adversarial loss (none | rev_grad | rev_grad_ssl | rev_grad_ssl_2)')
parser.add_argument('--pair_ssl', default='all', type=str, help='pair-feature methods for SSL-DA (all | adjacent)')
parser.add_argument('--num_seg', default=10, type=int, help='segment number for each video')
parser.add_argument('--place_adv', default=['N', 'Y', 'Y', 'N'], type=str, nargs="+",
metavar='N', help='len(place_adv) == num_stages')
parser.add_argument('--multi_adv', default=['N', 'N'], type=str, nargs="+",
metavar='N', help='separate weights for domain discriminators')
parser.add_argument('--weighted_domain_loss', default='Y', type=str, help='weighted domain loss for class-wise domain discriminators')
parser.add_argument('--ps_lb', default='soft', type=str, help='pseudo-label type (soft | hard)')
parser.add_argument('--source_lb_weight', default='pseudo', type=str, help='label type for source data weighting (real | pseudo)')
parser.add_argument('--method_centroid', default='none', type=str, help='method to get centroids (none | prob_hard)')
parser.add_argument('--DA_sem', default='mse', type=str, help='metric for semantic loss (none | mse)')
parser.add_argument('--place_sem', default=['N', 'Y', 'Y', 'N'], type=str, nargs="+",
metavar='N', help='len(place_sem) == num_stages')
parser.add_argument('--ratio_ma', default=0.7, type=float, help='ratio for moving average centroid method')
parser.add_argument('--DA_ent', default='none', type=str, help='entropy-related loss (none | target | attn)')
parser.add_argument('--place_ent', default=['N', 'Y', 'Y', 'N'], type=str, nargs="+",
metavar='N', help='len(place_ent) == num_stages')
parser.add_argument('--use_attn', type=str, default='none', choices=['none', 'domain_attn'], help='attention mechanism')
parser.add_argument('--DA_dis', type=str, default='none', choices=['none', 'JAN'], help='discrepancy method for DA')
parser.add_argument('--place_dis', default=['N', 'Y', 'Y', 'N'], type=str, nargs="+",
metavar='N', help='len(place_dis) == num_stages')
parser.add_argument('--DA_ens', type=str, default='none', choices=['none', 'MCD', 'SWD'], help='ensemble method for DA')
parser.add_argument('--place_ens', default=['N', 'Y', 'Y', 'N'], type=str, nargs="+",
metavar='N', help='len(place_ens) == num_stages')
parser.add_argument('--SS_video', type=str, default='none', choices=['none', 'VCOP'], help='video-based self-supervised learning method')
parser.add_argument('--place_ss', default=['N', 'Y', 'Y', 'N'], type=str, nargs="+",
metavar='N', help='len(place_ss) == num_stages')
# config & setting
parser.add_argument('--path_data', default='data/')
parser.add_argument('--path_model', default='models/')
parser.add_argument('--path_result', default='results/')
parser.add_argument('--action', default='train')
parser.add_argument('--use_target', default='none', choices=['none', 'uSv'])
parser.add_argument('--split_target', default='0', help='split for target data (0: no additional split for target)')
parser.add_argument('--ratio_source', default=1, type=float, help='percentage of total length to use for source data')
parser.add_argument('--ratio_label_source', default=1, type=float, help='percentage of labels to use for source data (after previous processing)')
parser.add_argument('--dataset', default="gtea")
parser.add_argument('--split', default='1')
# hyper-parameters
parser.add_argument('--lr', default=0.0005, type=float, help='learning rate')
parser.add_argument('--bS', default=1, type=int, help='batch size')
parser.add_argument('--alpha', default=0.15, type=float, help='weighting for smoothing loss')
parser.add_argument('--tau', default=4, type=float, help='threshold to truncate smoothing loss')
parser.add_argument('--beta', default=[-2, -2], type=float, nargs="+", metavar='M', help='weighting for adversarial loss & ensemble loss ([frame-beta, video-beta])')
parser.add_argument('--iter_max_beta', default=[1000, 1000], type=float, nargs="+", metavar='M', help='for adaptive beta ([frame-beta, video-beta])')
parser.add_argument('--gamma', default=-2, type=float, help='weighting for semantic loss')
parser.add_argument('--iter_max_gamma', default=1000, type=float, help='for adaptive gamma')
parser.add_argument('--mu', default=1, type=float, help='weighting for entropy loss')
parser.add_argument('--nu', default=-2, type=float, help='weighting for the discrepancy loss')
parser.add_argument('--eta', default=1, type=float, help='weighting for the self-supervised loss')
parser.add_argument('--iter_max_nu', default=1000, type=float, metavar='M', help='for adaptive nu')
parser.add_argument('--dim_proj', default=128, type=int, help='projection dimension for SWD')
# runtime
parser.add_argument('--num_epochs', default=50, type=int)
parser.add_argument('--verbose', default=False, action="store_true")
parser.add_argument('--use_best_model', type=str, default='none', choices=['none', 'source', 'target'], help='save best model')
parser.add_argument('--multi_gpu', default=False, action="store_true")
parser.add_argument('--resume_epoch', default=0, type=int)
# tensorboard
parser.add_argument('--use_tensorboard', default=False, action='store_true')
parser.add_argument('--epoch_embedding', default=50, type=int, help='select epoch # to save embedding (-1: all epochs)')
parser.add_argument('--stage_embedding', default=-1, type=int, help='select stage # to save embedding (-1: last stage)')
parser.add_argument('--num_frame_video_embedding', default=50, type=int, help='number of sample frames per video to store embedding')
args = parser.parse_args()
# check whether place_adv & place_sem are valid
if len(args.place_adv) != args.num_stages:
raise ValueError('len(place_dis) should be equal to num_stages')
if len(args.place_sem) != args.num_stages:
raise ValueError('len(place_sem) should be equal to num_stages')
if len(args.place_ent) != args.num_stages:
raise ValueError('len(place_ent) should be equal to num_stages')
if len(args.place_dis) != args.num_stages:
raise ValueError('len(place_dis) should be equal to num_stages')
if len(args.place_ens) != args.num_stages:
raise ValueError('len(place_ens) should be equal to num_stages')
if len(args.place_ss) != args.num_stages:
raise ValueError('len(place_ss) should be equal to num_stages')
if args.use_target == 'none':
args.DA_adv = 'none'
args.DA_sem = 'none'
args.DA_ent = 'none'
args.DA_dis = 'none'
args.DA_ens = 'none'
args.SS_video = 'none' # focus on cross-domain setting
# use the full temporal resolution @ 15fps
sample_rate = 1
# sample input features @ 15fps instead of 30 fps
# for 50salads, and up-sample the output to 30 fps
if args.dataset == "50salads":
sample_rate = 2
# ====== Load files ====== #
vid_list_file = args.path_data+args.dataset+"/splits/train.split"+args.split+".bundle"
vid_list_file_target = args.path_data+args.dataset+"/splits/test.split"+args.split+".bundle"
vid_list_file_test = vid_list_file_target
if args.split_target != '0':
vid_list_file_target = args.path_data + args.dataset + "/splits/test_train_" + args.split_target + ".split" + args.split + ".bundle"
vid_list_file_test = args.path_data + args.dataset + "/splits/test_test_" + args.split_target + ".split" + args.split + ".bundle"
features_path = args.path_data+args.dataset+"/features/"
gt_path = args.path_data+args.dataset+"/groundTruth/"
mapping_file = args.path_data+args.dataset+"/mapping.txt" # mapping between classes & indices
model_dir = args.path_model+args.dataset+"/split_"+args.split
results_dir = args.path_result+args.dataset+"/split_"+args.split
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1] # list of classes
file_ptr.close()
actions_dict = dict()
for a in actions:
actions_dict[a.split()[1]] = int(a.split()[0])
num_classes = len(actions_dict)
# initialize model & trainer
model = MultiStageModel(args, num_classes)
trainer = Trainer(num_classes)
# ====== Main Program ====== #
start_time = time.time()
if args.action == "train":
batch_gen_source = BatchGenerator(num_classes, actions_dict, gt_path, features_path, sample_rate)
batch_gen_target = BatchGenerator(num_classes, actions_dict, gt_path, features_path, sample_rate)
batch_gen_source.read_data(vid_list_file) # read & shuffle the source training list
batch_gen_target.read_data(vid_list_file_target) # read & shuffle the target training list
trainer.train(model, model_dir, results_dir, batch_gen_source, batch_gen_target, device, args)
if args.action == "predict":
predict(model, model_dir, results_dir, features_path, vid_list_file_test, args.num_epochs, actions_dict,
device, sample_rate, args)
end_time = time.time()
if args.verbose:
print('')
print('total running time:', end_time - start_time)
|
the-stack_0_26316
|
import json
import logging
import os
from dateutil.parser import parse
from flask import Flask, request
APP = Flask(__name__)
logger = logging.getLogger(__name__)
DUMPS_DIR = "dumps"
os.makedirs(DUMPS_DIR, exist_ok=True)
@APP.route("/api/v1/lineage", methods=["POST"])
def dump():
"""Endpoint to dump lineage event to a local file system directory."""
event_name = "default"
try:
js = json.loads(request.data)
content = json.dumps(js, sort_keys=True, indent=4)
date = parse(js["eventTime"]).date()
job_name = js["job"]["name"]
event_name = f"{date}-{job_name}"
except Exception:
content = str(request.data, "UTF-8")
file_path = f"{DUMPS_DIR}/{event_name}.json"
logger.info("Written event %s to file %s", event_name, file_path)
with open(file_path, "a") as f:
f.write(content)
return "", 200
|
the-stack_0_26317
|
from msrest.serialization import Model
class Pools(Model):
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'value': {'key': 'value', 'type': '[PoolDetails]'},
}
def __init__(self, count=None, value=None):
self.count = count
self.value = value
|
the-stack_0_26319
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡Fast, Lightweight, Programmable, TLS interception capable
proxy server for Application debugging, testing and development.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import argparse
import asyncio
import sys
import time
from typing import List, Tuple
from proxy.common.constants import __homepage__, DEFAULT_BUFFER_SIZE
from proxy.common.utils import build_http_request
from proxy.http.methods import httpMethods
from proxy.http.parser import httpParserStates, httpParserTypes, HttpParser
DEFAULT_N = 1
def init_parser() -> argparse.ArgumentParser:
"""Initializes and returns argument parser."""
parser = argparse.ArgumentParser(
description='Benchmark opens N concurrent connections '
'to proxy.py web server. Currently, HTTP/1.1 '
'keep-alive connections are opened. Over each opened '
'connection multiple pipelined request / response '
'packets are exchanged with proxy.py web server.',
epilog='Proxy.py not working? Report at: %s/issues/new' % __homepage__
)
parser.add_argument(
'--n', '-n',
type=int,
default=DEFAULT_N,
help='Default: ' + str(DEFAULT_N) +
'. See description above for meaning of N.'
)
return parser
class Benchmark:
def __init__(self, n: int = DEFAULT_N) -> None:
self.n = n
self.clients: List[Tuple[asyncio.StreamReader,
asyncio.StreamWriter]] = []
async def open_connections(self) -> None:
for _ in range(self.n):
self.clients.append(await asyncio.open_connection('::', 8899))
print('Opened ' + str(self.n) + ' connections')
@staticmethod
async def send(writer: asyncio.StreamWriter) -> None:
try:
while True:
writer.write(build_http_request(
httpMethods.GET, b'/'
))
await asyncio.sleep(0.01)
except KeyboardInterrupt:
pass
@staticmethod
def parse_pipeline_response(response: HttpParser, raw: bytes, counter: int = 0) -> \
Tuple[HttpParser, int]:
response.parse(raw)
if response.state != httpParserStates.COMPLETE:
# Need more data
return response, counter
if response.buffer == b'':
# No more buffer left to parse
return response, counter + 1
# For pipelined requests we may have pending buffer, try parse them as
# responses
pipelined_response = HttpParser(httpParserTypes.RESPONSE_PARSER)
return Benchmark.parse_pipeline_response(
pipelined_response, response.buffer, counter + 1)
@staticmethod
async def recv(idd: int, reader: asyncio.StreamReader) -> None:
print_every = 1000
last_print = time.time()
num_completed_requests: int = 0
response = HttpParser(httpParserTypes.RESPONSE_PARSER)
try:
while True:
raw = await reader.read(DEFAULT_BUFFER_SIZE)
response, total_parsed = Benchmark.parse_pipeline_response(
response, raw)
if response.state == httpParserStates.COMPLETE:
response = HttpParser(httpParserTypes.RESPONSE_PARSER)
if total_parsed > 0:
num_completed_requests += total_parsed
# print('total parsed %d' % total_parsed)
if num_completed_requests % print_every == 0:
now = time.time()
print('[%d] Completed last %d requests in %.2f secs' %
(idd, print_every, now - last_print))
last_print = now
except KeyboardInterrupt:
pass
async def close_connections(self) -> None:
for reader, writer in self.clients:
writer.close()
await writer.wait_closed()
print('Closed ' + str(self.n) + ' connections')
async def run(self) -> None:
try:
await self.open_connections()
print('Exchanging request / response packets')
readers = []
writers = []
idd = 0
for reader, writer in self.clients:
readers.append(
asyncio.create_task(
self.recv(idd, reader)
)
)
writers.append(
asyncio.create_task(
self.send(writer)
)
)
idd += 1
await asyncio.gather(*(readers + writers))
finally:
try:
await self.close_connections()
except RuntimeError:
pass
def main(input_args: List[str]) -> None:
args = init_parser().parse_args(input_args)
benchmark = Benchmark(n=args.n)
try:
asyncio.run(benchmark.run())
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main(sys.argv[1:]) # pragma: no cover
|
the-stack_0_26320
|
# Copyright 2021 EMQ Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from . import reg
from .connection import SourceChannel
from .symbol import parse_context, SymbolRuntime
from ..source import Source
class SourceRuntime(SymbolRuntime):
def __init__(self, ctrl: dict, s: Source):
ctx = parse_context(ctrl)
ds = ""
config = {}
if 'datasource' in ctrl:
ds = ctrl['datasource']
if 'config' in ctrl:
config = ctrl['config']
s.configure(ds, config)
ch = SourceChannel(ctrl['meta'])
ctx.set_emitter(ch)
key = f"{ctrl['meta']['ruleId']}_{ctrl['meta']['opId']}" \
f"_{ctrl['meta']['instanceId']}_{ctrl['symbolName']}"
self.s = s
self.ctx = ctx
self.ch = ch
self.running = False
self.key = key
def run(self):
logging.info('start running source')
self.running = True
reg.setr(self.key, self)
# noinspection PyBroadException
try:
self.s.open(self.ctx)
except Exception:
"""two occasions: normal stop will close socket to raise an error OR\
stopped by unexpected error"""
if self.running:
logging.error(traceback.format_exc())
finally:
self.stop()
def stop(self):
self.running = False
# noinspection PyBroadException
try:
self.s.close(self.ctx)
self.ch.close()
reg.delete(self.key)
except Exception:
logging.error(traceback.format_exc())
def is_running(self) -> bool:
return self.running
|
the-stack_0_26322
|
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
def create_dust_qs(serviceKey, station_name):
qs = dict()
qs['serviceKey'] = unquote(serviceKey)
qs['stationName'] = station_name
qs['dataTerm'] = 'DAILY'
qs['ver']='1.3'
return qs
|
the-stack_0_26323
|
#!/usr/bin/env python
import rospy
import numpy as np
import potential_fields as pf
from unball.msg import MeasurementSystemMessage
from communication.msg import target_positions_msg
from subprocess import call
# This script is made to test the tangential potential around the ball.
# If you need to change the behavior of this potential field you can change this constant:
# * TANGENCIAL_BALL_MAGNITUDE - Indicates the magnitude of field
#
# You can see the potential_fields.py to more informations
TANGENCIAL_BALL_MAGNITUDE = 0.3
number_of_robots = 3
def callback(data):
msg = target_positions_msg()
#PCreating an tangential field
tangencial_ball = pf.TangencialPotentialField(
np.array([data.ball_x, data.ball_y]),
TANGENCIAL_BALL_MAGNITUDE)
for robot in range(number_of_robots):
resultant_vector = tangencial_ball.calculate_force(
np.array([data.x[robot], data.y[robot]]))
msg.x[robot] = resultant_vector[0]
msg.y[robot] = resultant_vector[1]
pub.publish(msg)
def start():
global pub
pub = rospy.Publisher('target_positions_topic', target_positions_msg, queue_size=10)
rospy.init_node('strategy')
rospy.Subscriber('measurement_system_topic', MeasurementSystemMessage, callback)
rospy.spin()
if __name__ == '__main__':
start()
|
the-stack_0_26324
|
"""
dj-stripe Card Model Tests.
"""
from copy import deepcopy
from unittest.mock import ANY, patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from stripe.error import InvalidRequestError
from djstripe.exceptions import StripeObjectManipulationException
from djstripe.models import Card
from . import (
FAKE_CARD,
FAKE_CARD_III,
FAKE_CARD_V,
FAKE_CUSTOMER,
AssertStripeFksMixin,
default_account,
)
class CardTest(AssertStripeFksMixin, TestCase):
def setUp(self):
self.account = default_account()
self.user = get_user_model().objects.create_user(
username="testuser", email="[email protected]"
)
fake_empty_customer = deepcopy(FAKE_CUSTOMER)
fake_empty_customer["default_source"] = None
fake_empty_customer["sources"] = []
self.customer = fake_empty_customer.create_for_user(self.user)
def test_attach_objects_hook_without_customer(self):
card = Card.sync_from_stripe_data(deepcopy(FAKE_CARD_III))
self.assertEqual(card.customer, None)
def test_create_card_finds_customer(self):
card = Card.sync_from_stripe_data(deepcopy(FAKE_CARD))
self.assertEqual(self.customer, card.customer)
self.assertEqual(
card.get_stripe_dashboard_url(), self.customer.get_stripe_dashboard_url()
)
self.assert_fks(
card,
expected_blank_fks={
"djstripe.BankAccount.account",
"djstripe.Customer.coupon",
"djstripe.Customer.default_source",
},
)
def test_str(self):
fake_card = deepcopy(FAKE_CARD)
card = Card.sync_from_stripe_data(fake_card)
self.assertEqual(
"<brand={brand}, last4={last4}, exp_month={exp_month}, "
"exp_year={exp_year}, id={id}>".format(
brand=fake_card["brand"],
last4=fake_card["last4"],
exp_month=fake_card["exp_month"],
exp_year=fake_card["exp_year"],
id=fake_card["id"],
),
str(card),
)
self.assert_fks(
card,
expected_blank_fks={
"djstripe.Customer.coupon",
"djstripe.Customer.default_source",
},
)
@patch("stripe.Token.create", autospec=True)
def test_card_create_token(self, token_create_mock):
card = {"number": "4242", "exp_month": 5, "exp_year": 2012, "cvc": 445}
Card.create_token(**card)
token_create_mock.assert_called_with(api_key=ANY, card=card)
def test_api_call_no_customer(self):
exception_message = (
"Cards must be manipulated through a Customer. "
"Pass a Customer object into this call."
)
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card._api_create()
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card.api_list()
def test_api_call_bad_customer(self):
exception_message = (
"Cards must be manipulated through a Customer. "
"Pass a Customer object into this call."
)
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card._api_create(customer="fish")
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card.api_list(customer="fish")
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_api_create(self, customer_retrieve_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
self.assertEqual(FAKE_CARD, stripe_card)
@patch("tests.CardDict.delete", autospec=True)
@patch("stripe.Card.retrieve", return_value=deepcopy(FAKE_CARD), autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove(self, customer_retrieve_mock, card_retrieve_mock, card_delete_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
self.assertEqual(1, self.customer.legacy_cards.count())
card = self.customer.legacy_cards.all()[0]
card.remove()
self.assertEqual(0, self.customer.legacy_cards.count())
self.assertTrue(card_delete_mock.called)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove_already_deleted_card(self, customer_retrieve_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
self.assertEqual(self.customer.legacy_cards.count(), 1)
card_object = self.customer.legacy_cards.first()
Card.objects.filter(id=stripe_card["id"]).delete()
self.assertEqual(self.customer.legacy_cards.count(), 0)
card_object.remove()
self.assertEqual(self.customer.legacy_cards.count(), 0)
@patch("djstripe.models.Card._api_delete", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove_no_such_source(self, customer_retrieve_mock, card_delete_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
card_delete_mock.side_effect = InvalidRequestError("No such source:", "blah")
self.assertEqual(1, self.customer.legacy_cards.count())
card = self.customer.legacy_cards.all()[0]
card.remove()
self.assertEqual(0, self.customer.legacy_cards.count())
self.assertTrue(card_delete_mock.called)
@patch("djstripe.models.Card._api_delete", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove_no_such_customer(self, customer_retrieve_mock, card_delete_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
card_delete_mock.side_effect = InvalidRequestError("No such customer:", "blah")
self.assertEqual(1, self.customer.legacy_cards.count())
card = self.customer.legacy_cards.all()[0]
card.remove()
self.assertEqual(0, self.customer.legacy_cards.count())
self.assertTrue(card_delete_mock.called)
@patch("djstripe.models.Card._api_delete", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove_unexpected_exception(
self, customer_retrieve_mock, card_delete_mock
):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
card_delete_mock.side_effect = InvalidRequestError(
"Unexpected Exception", "blah"
)
self.assertEqual(1, self.customer.legacy_cards.count())
card = self.customer.legacy_cards.all()[0]
with self.assertRaisesMessage(InvalidRequestError, "Unexpected Exception"):
card.remove()
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_api_list(self, customer_retrieve_mock):
card_list = Card.api_list(customer=self.customer)
self.assertEqual([FAKE_CARD, FAKE_CARD_V], card_list)
|
the-stack_0_26326
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `adelphia-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
|
the-stack_0_26327
|
""" Given an array of integers, find the sum of its elements. """
def simpleArraySum(ar):
size = len(ar)
result = 0
for idx in range(size):
result = result + ar[idx]
return result
if __name__ == "__main__":
res = simpleArraySum([1,2,3,4,10,11])
print(res)
|
the-stack_0_26328
|
from .helper import timestamp_to_datetime
import six
class ApiModel(object):
@classmethod
def object_from_dictionary(cls, entry):
# make dict keys all strings
if entry is None:
return ""
entry_str_dict = dict([(str(key), value) for key, value in entry.items()])
return cls(**entry_str_dict)
def __repr__(self):
return str(self)
# if six.PY2:
# return six.text_type(self).encode('utf8')
# else:
# return self.encode('utf8')
def __str__(self):
if six.PY3:
return self.__unicode__()
else:
return unicode(self).encode('utf-8')
class Image(ApiModel):
def __init__(self, url, width, height):
self.url = url
self.height = height
self.width = width
def __unicode__(self):
return "Image: %s" % self.url
class Video(Image):
def __unicode__(self):
return "Video: %s" % self.url
class Media(ApiModel):
def __init__(self, id=None, **kwargs):
self.id = id
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def get_standard_resolution_url(self):
if self.type == 'image':
return self.images['standard_resolution'].url
else:
return self.videos['standard_resolution'].url
def get_low_resolution_url(self):
if self.type == 'image':
return self.images['low_resolution'].url
else:
return self.videos['low_resolution'].url
def get_thumbnail_url(self):
return self.images['thumbnail'].url
def __unicode__(self):
return "Media: %s" % self.id
@classmethod
def object_from_dictionary(cls, entry):
new_media = Media(id=entry['id'])
new_media.type = entry['type']
new_media.user = User.object_from_dictionary(entry['user'])
new_media.images = {}
for version, version_info in six.iteritems(entry['images']):
new_media.images[version] = Image.object_from_dictionary(version_info)
if new_media.type == 'video':
new_media.videos = {}
for version, version_info in six.iteritems(entry['videos']):
new_media.videos[version] = Video.object_from_dictionary(version_info)
if 'user_has_liked' in entry:
new_media.user_has_liked = entry['user_has_liked']
new_media.like_count = entry['likes']['count']
new_media.likes = []
if 'data' in entry['likes']:
for like in entry['likes']['data']:
new_media.likes.append(User.object_from_dictionary(like))
new_media.comment_count = entry['comments']['count']
new_media.comments = []
for comment in entry['comments']['data']:
new_media.comments.append(Comment.object_from_dictionary(comment))
new_media.users_in_photo = []
if entry.get('users_in_photo'):
for user_in_photo in entry['users_in_photo']:
new_media.users_in_photo.append(UserInPhoto.object_from_dictionary(user_in_photo))
new_media.created_time = timestamp_to_datetime(entry['created_time'])
if entry['location'] and 'id' in entry:
new_media.location = Location.object_from_dictionary(entry['location'])
new_media.caption = None
if entry['caption']:
new_media.caption = Comment.object_from_dictionary(entry['caption'])
new_media.tags = []
if entry['tags']:
for tag in entry['tags']:
new_media.tags.append(Tag.object_from_dictionary({'name': tag}))
new_media.link = entry['link']
new_media.filter = entry.get('filter')
return new_media
class MediaShortcode(Media):
def __init__(self, shortcode=None, **kwargs):
self.shortcode = shortcode
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
class Tag(ApiModel):
def __init__(self, name, **kwargs):
self.name = name
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def __unicode__(self):
return "Tag: %s" % self.name
class Comment(ApiModel):
def __init__(self, *args, **kwargs):
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
user = User.object_from_dictionary(entry['from'])
text = entry['text']
created_at = timestamp_to_datetime(entry['created_time'])
id = entry['id']
return Comment(id=id, user=user, text=text, created_at=created_at)
def __unicode__(self):
return "Comment: %s said \"%s\"" % (self.user.username, self.text)
class Point(ApiModel):
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def __unicode__(self):
return "Point: (%s, %s)" % (self.latitude, self.longitude)
class Location(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = str(id)
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
point = None
if 'latitude' in entry:
point = Point(entry.get('latitude'),
entry.get('longitude'))
location = Location(entry.get('id', 0),
point=point,
name=entry.get('name', ''))
return location
def __unicode__(self):
return "Location: %s (%s)" % (self.id, self.point)
class User(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = id
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def __unicode__(self):
return "User: %s" % self.username
def getName(self):
return self.username;
class Relationship(ApiModel):
def __init__(self, incoming_status="none", outgoing_status="none", target_user_is_private=False):
self.incoming_status = incoming_status
self.outgoing_status = outgoing_status
self.target_user_is_private = target_user_is_private
def __unicode__(self):
follows = False if self.outgoing_status == 'none' else True
followed = False if self.incoming_status == 'none' else True
return "Relationship: (Follows: %s, Followed by: %s)" % (follows, followed)
class Position(ApiModel):
def __init__(self, x, y):
self.x = x
self.y = y
def __unicode__(self):
return "Position: (%s, %s)" % (self.x, self.y)
@classmethod
def object_from_dictionary(cls, entry):
if 'x' in entry:
return Position(entry['x'], entry['y'])
class UserInPhoto(ApiModel):
def __init__(self, user, position):
self.position = position
self.user = user
def __unicode__(self):
return "UserInPhoto: (%s, %s)" % (self.user, self.position)
@classmethod
def object_from_dictionary(cls, entry):
user = None
if 'user' in entry:
user = User.object_from_dictionary(entry['user'])
if 'position' in entry:
position = Position(entry['position']['x'], entry['position']['y'])
return UserInPhoto(user, position)
|
the-stack_0_26330
|
#
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
contains all serializers related to Shop APIs
"""
from rest_framework import serializers
from crapi.shop.models import Order, Product, Coupon
from user.serializers import UserSerializer
class ProductSerializer(serializers.ModelSerializer):
"""
Serializer for Product model
"""
class Meta:
"""
Meta class for ProductSerializer
"""
model = Product
fields = ('id', 'name', 'price', 'image_url')
class OrderSerializer(serializers.ModelSerializer):
"""
Serializer for Order model
"""
user = UserSerializer()
product = ProductSerializer()
class Meta:
"""
Meta class for OrderSerializer
"""
model = Order
fields = ('id', 'user', 'product', 'quantity', 'status', 'created_on')
class CouponSerializer(serializers.Serializer):
"""
Serializer for Coupon model
"""
coupon_code = serializers.CharField()
amount = serializers.IntegerField()
class Meta:
"""
Meta class for CouponSerializer
"""
model = Coupon
class ProductQuantitySerializer(serializers.Serializer):
"""
Serializer for Product order API
"""
product_id = serializers.IntegerField()
quantity = serializers.IntegerField()
|
the-stack_0_26331
|
import logging
from copy import deepcopy
from hyperopt import fmin, tpe, STATUS_OK, Trials
from naslib.optimizers.discrete import Searcher as BaseSearcher
from naslib.utils.utils import AttrDict
class Searcher(BaseSearcher):
def __init__(self, graph, parser, arch_optimizer, *args, **kwargs):
super(Searcher, self).__init__(graph, parser, arch_optimizer, *args, **kwargs)
self.trials = Trials()
def run(self, n_evaluations, *args, **kwargs):
best = fmin(self.objective, space=self.arch_optimizer.space,
algo=tpe.suggest, max_evals=n_evaluations,
trials=self.trials)
def objective(self, x):
config = deepcopy(x)
print('CONFIG: ', config)
self.arch_optimizer.set_to_zero()
for arch_key, arch_weight in self.arch_optimizer.architectural_weights.items():
idx = config[arch_key]
arch_weight.data[idx] = 1
arch_info = self.query()
y = -arch_info['cifar10-valid']['valid_accuracy']
c = arch_info['cifar10-valid']['latency (ms)']
return {
'config': config,
'loss': y,
'cost': c,
'status': STATUS_OK}
#NOTE: this works only for nasbench201 for now
def query(self):
if hasattr(self.graph, 'query_architecture'):
# Record anytime performance
arch_info = self.graph.query_architecture(self.arch_optimizer.architectural_weights)
logging.info('arch {}'.format(arch_info))
if 'arch_eval' not in self.errors_dict:
self.errors_dict['arch_eval'] = []
self.errors_dict['arch_eval'].append(arch_info)
self.log_to_json(self.parser.config.save)
return arch_info
|
the-stack_0_26333
|
#!/usr/bin/env python
"""
Unit tests for Generalized One-Pass Sweep-line Algorithm
- test_regionsweep_simple
- test_regionsweep_random
"""
from typing import List
from unittest import TestCase
from sources.algorithms import \
RegionSweep, RegionSweepDebug, RegionSweepOverlaps
from sources.core import \
Region, RegionPair, RegionSet
class TestRegionSweep(TestCase):
def _evaluate_regionsweep(self, regions: RegionSet, i: int) -> List[RegionPair]:
subscribers = [] #[RegionSweepDebug()]
return RegionSweepOverlaps.prepare(regions, *subscribers)(i)
def test_regionsweep_simple(self):
regionset = RegionSet(dimension=2)
regionset.add(Region([0, 0], [3, 5]))
regionset.add(Region([3, 1], [5, 5]))
regionset.add(Region([2, 4], [6, 6]))
for i in range(regionset.dimension):
expect = regionset.overlaps(i)
actual = self._evaluate_regionsweep(regionset, i)
#for pair in expect: print(f'Expect {i}:\t{pair[0]}\n\t{pair[1]}')
#for pair in actual: print(f'Actual {i}:\t{pair[0]}\n\t{pair[1]}')
for pair in expect:
#passed = "Passed" if pair in actual else "Failed"
#print(f'{passed} {i}: {pair[0]} {pair[1]}')
self.assertTrue(pair in actual)
self.assertEqual(len(expect), len(actual))
def test_regionsweep_random(self):
regionset = RegionSet.from_random(30, Region([0]*3, [100]*3), sizepc=Region([0]*3, [0.5]*3), precision=0)
actuals = []
#for region in regionset: print(f'{region}')
for i in range(regionset.dimension):
#print(f'Dimension: {i}')
expect = regionset.overlaps(i)
actual = self._evaluate_regionsweep(regionset, i)
#for pair in expect: print(f'Expect {i}: {pair[0].id} {pair[1].id}')
#for pair in actual: print(f'Actual {i}: {pair[0].id} {pair[1].id}')
for pair in expect:
#passed = "Passed" if pair in actual else "Failed"
#print(f'{passed} {i}: {pair[0].id} {pair[1].id}')
self.assertTrue(pair in actual)
self.assertEqual(len(expect), len(actual))
actuals.append(actual)
self.assertTrue(all([len(actual) for actual in actuals]))
for pair in actuals[0]:
for d in range(1, regionset.dimension):
self.assertTrue(pair in actuals[d] or (pair[1], pair[0]) in actuals[d])
|
the-stack_0_26334
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class FrHit(Package):
"""An efficient algorithm for fragment recruitment for next generation
sequences against microbial reference genomes."""
homepage = "http://weizhong-lab.ucsd.edu/frhit"
url = "http://weizhong-lab.ucsd.edu/frhit/fr-hit-v0.7.1-2013-02-20.tar.gz"
version('0.7.1-2013-02-20', sha256='44dcfeb73106529fcefb02c017ec7b95b04b6523a2a57683b2bc905c142e62eb')
depends_on('perl')
depends_on('[email protected]:')
# The patch adds the python interpreter to the beginning of the script
# allowing it to be run directly without passing the entire path to the
# script to python.
patch('binning.patch')
def install(self, spec, prefix):
make()
filter_file(
r'#!/bin/env perl',
'#!/usr/bin/env perl',
'frhit2pairend.pl'
)
filter_file(
r'#!/bin/env perl',
'#!/usr/bin/env perl',
'psl2sam.pl'
)
mkdirp(prefix.bin)
install('fr-hit', prefix.bin)
install('frhit2pairend.pl', prefix.bin)
install('psl2sam.pl', prefix.bin)
install('binning-1.1.1/bacteria_gitax.pkl', prefix.bin)
install('binning-1.1.1/binning.py', prefix.bin)
install('binning-1.1.1/tax.pkl', prefix.bin)
|
the-stack_0_26335
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from atss_core.config import cfg
from predictor import COCODemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
cam = cv2.VideoCapture(0)
while True:
start_time = time.time()
ret_val, img = cam.read()
composite = coco_demo.run_on_opencv_image(img)
print("Time: {:.2f} s / img".format(time.time() - start_time))
cv2.imshow("COCO detections", composite)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
the-stack_0_26336
|
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Revenue by Traffic Lab
# MAGIC Get the 3 traffic sources generating the highest total revenue.
# MAGIC 1. Aggregate revenue by traffic source
# MAGIC 2. Get top 3 traffic sources by total revenue
# MAGIC 3. Clean revenue columns to have two decimal places
# MAGIC
# MAGIC ##### Methods
# MAGIC - <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.html" target="_blank">DataFrame</a>: **`groupBy`**, **`sort`**, **`limit`**
# MAGIC - <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.Column.html?highlight=column#pyspark.sql.Column" target="_blank">Column</a>: **`alias`**, **`desc`**, **`cast`**, **`operators`**
# MAGIC - <a href="https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql.html?#functions" target="_blank">Built-in Functions</a>: **`avg`**, **`sum`**
# COMMAND ----------
# MAGIC %run ../../Includes/Classroom-Setup
# COMMAND ----------
# MAGIC %md
# MAGIC ### Setup
# MAGIC Run the cell below to create the starting DataFrame **`df`**.
# COMMAND ----------
from pyspark.sql.functions import col
# Purchase events logged on the BedBricks website
df = (spark.read.format("delta").load(events_path)
.withColumn("revenue", col("ecommerce.purchase_revenue_in_usd"))
.filter(col("revenue").isNotNull())
.drop("event_name")
)
display(df)
# COMMAND ----------
# MAGIC %md ### 1. Aggregate revenue by traffic source
# MAGIC - Group by **`traffic_source`**
# MAGIC - Get sum of **`revenue`** as **`total_rev`**
# MAGIC - Get average of **`revenue`** as **`avg_rev`**
# MAGIC
# MAGIC Remember to import any necessary built-in functions.
# COMMAND ----------
# ANSWER
from pyspark.sql.functions import avg, col, sum
traffic_df = (df
.groupBy("traffic_source")
.agg(sum(col("revenue")).alias("total_rev"),
avg(col("revenue")).alias("avg_rev"))
)
display(traffic_df)
# COMMAND ----------
# MAGIC %md **1.1: CHECK YOUR WORK**
# COMMAND ----------
from pyspark.sql.functions import round
expected1 = [(12704560.0, 1083.175), (78800000.3, 983.2915), (24797837.0, 1076.6221), (47218429.0, 1086.8303), (16177893.0, 1083.4378), (8044326.0, 1087.218)]
test_df = traffic_df.sort("traffic_source").select(round("total_rev", 4).alias("total_rev"), round("avg_rev", 4).alias("avg_rev"))
result1 = [(row.total_rev, row.avg_rev) for row in test_df.collect()]
assert(expected1 == result1)
# COMMAND ----------
# MAGIC %md ### 2. Get top three traffic sources by total revenue
# MAGIC - Sort by **`total_rev`** in descending order
# MAGIC - Limit to first three rows
# COMMAND ----------
# ANSWER
top_traffic_df = traffic_df.sort(col("total_rev").desc()).limit(3)
display(top_traffic_df)
# COMMAND ----------
# MAGIC %md **2.1: CHECK YOUR WORK**
# COMMAND ----------
expected2 = [(78800000.3, 983.2915), (47218429.0, 1086.8303), (24797837.0, 1076.6221)]
test_df = top_traffic_df.select(round("total_rev", 4).alias("total_rev"), round("avg_rev", 4).alias("avg_rev"))
result2 = [(row.total_rev, row.avg_rev) for row in test_df.collect()]
assert(expected2 == result2)
# COMMAND ----------
# MAGIC %md ### 3. Limit revenue columns to two decimal places
# MAGIC - Modify columns **`avg_rev`** and **`total_rev`** to contain numbers with two decimal places
# MAGIC - Use **`withColumn()`** with the same names to replace these columns
# MAGIC - To limit to two decimal places, multiply each column by 100, cast to long, and then divide by 100
# COMMAND ----------
# ANSWER
final_df = (top_traffic_df
.withColumn("avg_rev", (col("avg_rev") * 100).cast("long") / 100)
.withColumn("total_rev", (col("total_rev") * 100).cast("long") / 100)
)
display(final_df)
# COMMAND ----------
# MAGIC %md **3.1: CHECK YOUR WORK**
# COMMAND ----------
expected3 = [(78800000.29, 983.29), (47218429.0, 1086.83), (24797837.0, 1076.62)]
result3 = [(row.total_rev, row.avg_rev) for row in final_df.collect()]
assert(expected3 == result3)
# COMMAND ----------
# MAGIC %md
# MAGIC ### 4. Bonus: Rewrite using a built-in math function
# MAGIC Find a built-in math function that rounds to a specified number of decimal places
# COMMAND ----------
# ANSWER
from pyspark.sql.functions import round
bonus_df = (top_traffic_df
.withColumn("avg_rev", round("avg_rev", 2))
.withColumn("total_rev", round("total_rev", 2))
)
display(bonus_df)
# COMMAND ----------
# MAGIC %md **4.1: CHECK YOUR WORK**
# COMMAND ----------
expected4 = [(78800000.3, 983.29), (47218429.0, 1086.83), (24797837.0, 1076.62)]
result4 = [(row.total_rev, row.avg_rev) for row in bonus_df.collect()]
assert(expected4 == result4)
# COMMAND ----------
# MAGIC %md ### 5. Chain all the steps above
# COMMAND ----------
# ANSWER
chain_df = (df
.groupBy("traffic_source")
.agg(sum(col("revenue")).alias("total_rev"),
avg(col("revenue")).alias("avg_rev"))
.sort(col("total_rev").desc())
.limit(3)
.withColumn("avg_rev", round("avg_rev", 2))
.withColumn("total_rev", round("total_rev", 2))
)
display(chain_df)
# COMMAND ----------
# MAGIC %md **5.1: CHECK YOUR WORK**
# COMMAND ----------
expected5 = [(78800000.3, 983.29), (47218429.0, 1086.83), (24797837.0, 1076.62)]
result5 = [(row.total_rev, row.avg_rev) for row in chain_df.collect()]
assert(expected5 == result5)
# COMMAND ----------
# MAGIC %md ### Clean up classroom
# COMMAND ----------
classroom_cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
|
the-stack_0_26337
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from io import BytesIO
from time import sleep
from test_framework.messages import CTransaction, CTxIn, CTxOut, COIN, COutPoint
from test_framework.mininode import network_thread_start
from test_framework.islamic_digital_coin_node import islamic_digital_coinTestNode
from test_framework.script import CScript, OP_CHECKSIG
from test_framework.test_framework import islamic_digital_coinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
p2p_port,
bytes_to_hex_str,
set_node_times,
)
from decimal import Decimal
# filter utxos based on first 5 bytes of scriptPubKey
def getDelegatedUtxos(utxos):
return [x for x in utxos if x["scriptPubKey"][:10] == '76a97b63d1']
class ISLAMIC_DIGITAL_COIN_ColdStakingTest(islamic_digital_coinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [['-nuparams=v5_shield:201']] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with PoW cache: 200 blocks
self.log.info("Initializing test directory " + self.options.tmpdir)
self._initialize_chain()
self.enable_mocktime()
def init_test(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
self.DEFAULT_FEE = 0.05
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(islamic_digital_coinTestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start() # Start up network handling in another thread
# Let the test nodes get in sync
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack()
def setColdStakingEnforcement(self, fEnable=True):
sporkName = "SPORK_19_COLDSTAKING_MAINTENANCE"
# update spork 19 with node[0]
if fEnable:
self.log.info("Enabling cold staking with SPORK 19...")
res = self.deactivate_spork(0, sporkName)
else:
self.log.info("Disabling cold staking with SPORK 19...")
res = self.activate_spork(0, sporkName)
assert_equal(res, "success")
sleep(1)
# check that node[1] receives it
assert_equal(fEnable, not self.is_spork_active(1, sporkName))
self.log.info("done")
def isColdStakingEnforced(self):
# verify from node[1]
return not self.is_spork_active(1, "SPORK_19_COLDSTAKING_MAINTENANCE")
def run_test(self):
self.description = "Performs tests on the Cold Staking P2CS implementation"
self.init_test()
NUM_OF_INPUTS = 20
INPUT_VALUE = 249
# nodes[0] - coin-owner
# nodes[1] - cold-staker
# First put cold-staking in maintenance mode
self.setColdStakingEnforcement(False)
# double check
assert (not self.isColdStakingEnforced())
# 1) nodes[0] and nodes[2] mine 25 blocks each
# --------------------------------------------
print("*** 1 ***")
self.log.info("Mining 50 Blocks...")
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pow(peer, self.mocktime)
self.sync_blocks()
# 2) node[1] sends his entire balance (50 mature rewards) to node[2]
# - node[2] stakes a block - node[1] locks the change
# - node[0] shields 250 coins (to be delegated later)
print("*** 2 ***")
self.log.info("Emptying node1 balance")
assert_equal(self.nodes[1].getbalance(), 50 * 250)
txid = self.nodes[1].sendtoaddress(self.nodes[2].getnewaddress(), (50 * 250 - 0.01))
assert (txid is not None)
self.sync_mempools()
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
# lock the change output (so it's not used as stake input in generate_pos)
for x in self.nodes[1].listunspent():
assert (self.nodes[1].lockunspent(False, [{"txid": x['txid'], "vout": x['vout']}]))
# check that it cannot stake
sleep(1)
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
# create shielded balance for node 0
self.log.info("Shielding some coins for node0...")
self.nodes[0].shieldsendmany("from_transparent", [{"address": self.nodes[0].getnewshieldaddress(),
"amount": Decimal('250.00')}], 1)
self.sync_all()
for i in range(6):
self.mocktime = self.generate_pow(0, self.mocktime)
self.sync_blocks()
assert_equal(self.nodes[0].getshieldbalance(), 250)
# 3) nodes[0] generates a owner address
# nodes[1] generates a cold-staking address.
# ---------------------------------------------
print("*** 3 ***")
owner_address = self.nodes[0].getnewaddress()
self.log.info("Owner Address: %s" % owner_address)
staker_address = self.nodes[1].getnewstakingaddress()
staker_privkey = self.nodes[1].dumpprivkey(staker_address)
self.log.info("Staking Address: %s" % staker_address)
# 4) Check enforcement.
# ---------------------
print("*** 4 ***")
# Check that SPORK 17 is disabled
assert (not self.isColdStakingEnforced())
self.log.info("Creating a stake-delegation tx before cold staking enforcement...")
assert_raises_rpc_error(-4, "Failed to accept tx in the memory pool (reason: cold-stake-inactive (code 16))\nTransaction canceled.",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, owner_address,
False, False, False, True)
self.log.info("Good. Cold Staking NOT ACTIVE yet.")
# Enable via SPORK
self.setColdStakingEnforcement()
# double check
assert (self.isColdStakingEnforced())
# 5) nodes[0] delegates a number of inputs for nodes[1] to stake em.
# ------------------------------------------------------------------
print("*** 5 ***")
self.log.info("First check warning when using external addresses...")
assert_raises_rpc_error(-5, "Only the owner of the key to owneraddress will be allowed to spend these coins",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT")
self.log.info("Good. Warning triggered.")
self.log.info("Now force the use of external address creating (but not sending) the delegation...")
res = self.nodes[0].rawdelegatestake(staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT", True)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now delegate with internal owner address..")
self.log.info("Try first with a value (0.99) below the threshold")
assert_raises_rpc_error(-8, "Invalid amount",
self.nodes[0].delegatestake, staker_address, 0.99, owner_address)
self.log.info("Nice. it was not possible.")
self.log.info("Then try (creating but not sending) with the threshold value (1.00)")
res = self.nodes[0].rawdelegatestake(staker_address, 1.00, owner_address)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now creating %d real stake-delegation txes..." % NUM_OF_INPUTS)
for i in range(NUM_OF_INPUTS-1):
res = self.nodes[0].delegatestake(staker_address, INPUT_VALUE, owner_address)
assert(res != None and res["txid"] != None and res["txid"] != "")
assert_equal(res["owner_address"], owner_address)
assert_equal(res["staker_address"], staker_address)
# delegate the shielded balance
res = self.nodes[0].delegatestake(staker_address, INPUT_VALUE, owner_address, False, False, True)
assert (res != None and res["txid"] != None and res["txid"] != "")
assert_equal(res["owner_address"], owner_address)
assert_equal(res["staker_address"], staker_address)
fee = self.nodes[0].viewshieldtransaction(res["txid"])['fee']
# sync and mine 2 blocks
self.sync_mempools()
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
self.log.info("%d Txes created." % NUM_OF_INPUTS)
# check balances:
self.expected_balance = NUM_OF_INPUTS * INPUT_VALUE
self.expected_immature_balance = 0
self.checkBalances()
# also shielded balance of node 0 (250 - 249 - fee)
assert_equal(self.nodes[0].getshieldbalance(), round(Decimal(1)-Decimal(fee), 8))
# 6) check that the owner (nodes[0]) can spend the coins.
# -------------------------------------------------------
print("*** 6 ***")
self.log.info("Spending back one of the delegated UTXOs...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_equal(NUM_OF_INPUTS, len(delegated_utxos))
assert_equal(len(delegated_utxos), len(self.nodes[0].listcoldutxos()))
u = delegated_utxos[0]
txhash = self.spendUTXOwithNode(u, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to spend - tx: %s" % str(txhash))
self.sync_mempools()
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after spend.
self.expected_balance -= float(u["amount"])
self.checkBalances()
self.log.info("Balances check out after spend")
assert_equal(NUM_OF_INPUTS-1, len(self.nodes[0].listcoldutxos()))
# 7) check that the staker CANNOT use the coins to stake yet.
# He needs to whitelist the owner first.
# -----------------------------------------------------------
print("*** 7 ***")
self.log.info("Trying to generate a cold-stake block before whitelisting the owner...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Nice. Cold staker was NOT able to create the block yet.")
self.log.info("Whitelisting the owner...")
ret = self.nodes[1].delegatoradd(owner_address)
assert(ret)
self.log.info("Delegator address %s whitelisted" % owner_address)
# 8) check that the staker CANNOT spend the coins.
# ------------------------------------------------
print("*** 8 ***")
self.log.info("Trying to spend one of the delegated UTXOs with the cold-staking key...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_greater_than(len(delegated_utxos), 0)
u = delegated_utxos[0]
assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed (Script failed an OP_CHECKCOLDSTAKEVERIFY operation",
self.spendUTXOwithNode, u, 1)
self.log.info("Good. Cold staker was NOT able to spend (failed OP_CHECKCOLDSTAKEVERIFY)")
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
# 9) check that the staker can use the coins to stake a block with internal miner.
# --------------------------------------------------------------------------------
print("*** 9 ***")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], NUM_OF_INPUTS-1)
self.log.info("Generating one valid cold-stake block...")
self.mocktime = self.generate_pos(1, self.mocktime)
self.log.info("New block created by cold-staking. Trying to submit...")
newblockhash = self.nodes[1].getbestblockhash()
self.log.info("Block %s submitted" % newblockhash)
# Verify that nodes[0] accepts it
self.sync_blocks()
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(newblockhash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 10) check that the staker can use the coins to stake a block with a rawtransaction.
# ----------------------------------------------------------------------------------
print("*** 10 ***")
self.log.info("Generating another valid cold-stake block...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
assert (ret is None)
self.log.info("Block %s submitted." % new_block.hash)
assert_equal(new_block.hash, self.nodes[1].getbestblockhash())
# Verify that nodes[0] accepts it
self.sync_blocks()
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(new_block.hash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
self.mocktime += 60
set_node_times(self.nodes, self.mocktime)
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 11) check that the staker cannot stake a block changing the coinstake scriptPubkey.
# ----------------------------------------------------------------------------------
print("*** 11 ***")
self.log.info("Generating one invalid cold-stake block (changing first coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block (with dummy key)
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, "")
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert("rejected" in ret)
# Verify that nodes[0] rejects it
self.sync_blocks()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 12) neither adding different outputs to the coinstake.
# ------------------------------------------------------
print("*** 12 ***")
self.log.info("Generating another invalid cold-stake block (adding coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
# Add output (dummy key address) to coinstake (taking 100 IDC from the pot)
self.add_output_to_coinstake(new_block, 100)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert_equal(ret, "bad-p2cs-outs")
# Verify that nodes[0] rejects it
self.sync_blocks()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 13) Now node[0] gets mad and spends all the delegated coins, voiding the P2CS contracts.
# ----------------------------------------------------------------------------------------
self.log.info("Let's void the contracts.")
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
print("*** 13 ***")
self.log.info("Cancel the stake delegation spending the delegated utxos...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
# remove one utxo to spend later
final_spend = delegated_utxos.pop()
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void the stake delegations - tx: %s" % str(txhash))
self.sync_blocks()
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
# deactivate SPORK 17 and check that the owner can still spend the last utxo
self.setColdStakingEnforcement(False)
assert (not self.isColdStakingEnforced())
txhash = self.spendUTXOsWithNode([final_spend], 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void a stake delegation (with SPORK 17 disabled) - tx: %s" % str(txhash))
self.sync_mempools()
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after big spend.
self.expected_balance = 0
self.checkBalances()
self.log.info("Balances check out after the delegations have been voided.")
# re-activate SPORK17
self.setColdStakingEnforcement()
assert (self.isColdStakingEnforced())
# 14) check that coinstaker is empty and can no longer stake.
# -----------------------------------------------------------
print("*** 14 ***")
self.log.info("Trying to generate one cold-stake block again...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Cigar. Cold staker was NOT able to create any more blocks.")
# 15) check balances when mature.
# -----------------------------------------------------------
print("*** 15 ***")
self.log.info("Staking 100 blocks to mature the cold stakes...")
for i in range(2):
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pos(peer, self.mocktime)
self.sync_blocks()
self.expected_balance = self.expected_immature_balance
self.expected_immature_balance = 0
self.checkBalances()
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert (txhash != None)
self.log.info("Good. Owner was able to spend the cold staked coins - tx: %s" % str(txhash))
self.sync_mempools()
self.mocktime = self.generate_pos(2, self.mocktime)
self.sync_blocks()
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
self.expected_balance = 0
self.checkBalances()
def checkBalances(self):
w_info = self.nodes[0].getwalletinfo()
assert_equal(self.nodes[0].getblockcount(), w_info['last_processed_block'])
self.log.info("OWNER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_delegated_balance"]), self.expected_immature_balance)
assert_equal(float(w_info["cold_staking_balance"]), 0)
w_info = self.nodes[1].getwalletinfo()
assert_equal(self.nodes[1].getblockcount(), w_info['last_processed_block'])
self.log.info("STAKER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), 0)
assert_equal(float(w_info["cold_staking_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_cold_staking_balance"]), self.expected_immature_balance)
def spendUTXOwithNode(self, utxo, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE)
outputs = {}
outputs[new_addy] = out_amount
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def spendUTXOsWithNode(self, utxos, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = []
outputs = {}
outputs[new_addy] = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[new_addy] += float(utxo["amount"])
outputs[new_addy] -= self.DEFAULT_FEE
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def add_output_to_coinstake(self, block, value, peer=1):
coinstake = block.vtx[1]
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
coinstake.vout.append(
CTxOut(value * COIN, CScript([self.DUMMY_KEY.get_pubkey(), OP_CHECKSIG])))
coinstake.vout[1].nValue -= value * COIN
# re-sign coinstake
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstake.vin[0] = CTxIn(prevout)
stake_tx_signed_raw_hex = self.nodes[peer].signrawtransaction(
bytes_to_hex_str(coinstake.serialize()))['hex']
block.vtx[1] = CTransaction()
block.vtx[1].from_hex(stake_tx_signed_raw_hex)
# re-sign block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.re_sign_block()
if __name__ == '__main__':
ISLAMIC_DIGITAL_COIN_ColdStakingTest().main()
|
the-stack_0_26338
|
import argparse
import os
import SimpleITK as sitk
image = sitk.ReadImage('Data_folder/images/train/image0.nii')
image_spacing = image.GetSpacing()
class Options():
"""This class defines options used during both training and test time."""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
# basic parameters
parser.add_argument('--images_folder', type=str, default='./Data_folder/images')
parser.add_argument('--labels_folder', type=str, default='./Data_folder/labels')
parser.add_argument('--increase_factor_data', default=4, help='Increase data number per epoch')
parser.add_argument('--preload', type=str, default=None)
parser.add_argument('--gpu_ids', type=str, default='2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--workers', default=8, type=int, help='number of data loading workers')
# dataset parameters
parser.add_argument('--patch_size', default=(128, 128, 64), help='Size of the patches extracted from the image')
parser.add_argument('--spacing', default=image_spacing, help='Original Resolution')
parser.add_argument('--resolution', default=None, help='New Resolution, if you want to resample the data')
parser.add_argument('--batch_size', type=int, default=6, help='batch size')
parser.add_argument('--in_channels', default=1, type=int, help='Channels of the input')
parser.add_argument('--out_channels', default=1, type=int, help='Channels of the output')
# training parameters
parser.add_argument('--epochs', default=200, help='Number of epochs')
parser.add_argument('--lr', default=0.001, help='Learning rate')
# Inference
# This is just a trick to make the predict script working
parser.add_argument('--result', default=None, help='Keep this empty and go to predict_single_image script')
parser.add_argument('--weights', default=None, help='Keep this empty and go to predict_single_image script')
self.initialized = True
return parser
def parse(self):
if not self.initialized:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
opt = parser.parse_args()
# set gpu ids
if opt.gpu_ids != '-1':
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_ids
return opt
|
the-stack_0_26339
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import logging
import os
import tempfile
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import saver as saver_lib
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
slim = tf.contrib.slim
# TODO: Replace with freeze_graph.freeze_graph_with_def_protos when
# newer version of Tensorflow becomes more common.
def freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
clear_devices,
initializer_nodes,
variable_names_blacklist=''):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if not saver_lib.checkpoint_exists(input_checkpoint):
raise ValueError(
'Input checkpoint "' + input_checkpoint + '" does not exist!')
if not output_node_names:
raise ValueError(
'You must supply the name of a node to --output_node_names.')
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(input_graph_def, name='')
config = tf.ConfigProto(graph_options=tf.GraphOptions())
with session.Session(config=config) as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(
input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ':0')
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes)
variable_names_blacklist = (variable_names_blacklist.split(',') if
variable_names_blacklist else None)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(','),
variable_names_blacklist=variable_names_blacklist)
return output_graph_def
def replace_variable_values_with_moving_averages(graph,
current_checkpoint_file,
new_checkpoint_file):
"""Replaces variable values in the checkpoint with their moving averages.
If the current checkpoint has shadow variables maintaining moving averages of
the variables defined in the graph, this function generates a new checkpoint
where the variables contain the values of their moving averages.
Args:
graph: a tf.Graph object.
current_checkpoint_file: a checkpoint containing both original variables and
their moving averages.
new_checkpoint_file: file path to write a new checkpoint.
"""
with graph.as_default():
variable_averages = tf.train.ExponentialMovingAverage(0.0)
ema_variables_to_restore = variable_averages.variables_to_restore()
with tf.Session() as sess:
read_saver = tf.train.Saver(ema_variables_to_restore)
read_saver.restore(sess, current_checkpoint_file)
write_saver = tf.train.Saver()
write_saver.save(sess, new_checkpoint_file)
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor
def _tf_example_input_placeholder():
"""Returns input that accepts a batch of strings with tf examples.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_tf_example_placeholder = tf.placeholder(
tf.string, shape=[None], name='tf_example')
def decode(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
return image_tensor
return (batch_tf_example_placeholder,
tf.map_fn(decode,
elems=batch_tf_example_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
def _encoded_image_string_tensor_input_placeholder():
"""Returns input that accepts a batch of PNG or JPEG strings.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_image_str_placeholder = tf.placeholder(
dtype=tf.string,
shape=[None],
name='encoded_image_string_tensor')
def decode(encoded_image_string_tensor):
image_tensor = tf.image.decode_image(encoded_image_string_tensor,
channels=3)
image_tensor.set_shape((None, None, 3))
return image_tensor
return (batch_image_str_placeholder,
tf.map_fn(
decode,
elems=batch_image_str_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder,
}
def _add_output_tensor_nodes(postprocessed_tensors,
output_collection_name='inference_op'):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'num_detections': [batch]
output_collection_name: Name of collection to add output tensors to.
Returns:
A tensor dict containing the added output tensor nodes.
"""
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
outputs = {}
outputs[detection_fields.detection_boxes] = tf.identity(
boxes, name=detection_fields.detection_boxes)
outputs[detection_fields.detection_scores] = tf.identity(
scores, name=detection_fields.detection_scores)
outputs[detection_fields.detection_classes] = tf.identity(
classes, name=detection_fields.detection_classes)
outputs[detection_fields.num_detections] = tf.identity(
num_detections, name=detection_fields.num_detections)
if masks is not None:
outputs[detection_fields.detection_masks] = tf.identity(
masks, name=detection_fields.detection_masks)
for output_key in outputs:
tf.add_to_collection(output_collection_name, outputs[output_key])
if masks is not None:
tf.add_to_collection(output_collection_name,
outputs[detection_fields.detection_masks])
return outputs
def _write_frozen_graph(frozen_graph_path, frozen_graph_def):
"""Writes frozen graph to disk.
Args:
frozen_graph_path: Path to write inference graph.
frozen_graph_def: tf.GraphDef holding frozen graph.
"""
with gfile.GFile(frozen_graph_path, 'wb') as f:
f.write(frozen_graph_def.SerializeToString())
logging.info('%d ops in the final graph.', len(frozen_graph_def.node))
def _write_saved_model(saved_model_path,
frozen_graph_def,
inputs,
outputs):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
saved_model_path: Path to write SavedModel.
frozen_graph_def: tf.GraphDef holding frozen graph.
inputs: The input image tensor to use for detection.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
"""
with tf.Graph().as_default():
with session.Session() as sess:
tf.import_graph_def(frozen_graph_def, name='')
builder = tf.saved_model.builder.SavedModelBuilder(
saved_model_path)
tensor_info_inputs = {
'inputs': tf.saved_model.utils.build_tensor_info(inputs)}
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(
v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def _write_graph_and_checkpoint(inference_graph_def,
model_path,
input_saver_def,
trained_checkpoint_prefix):
for node in inference_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def, name='')
with session.Session() as sess:
saver = saver_lib.Saver(saver_def=input_saver_def,
save_relative_paths=True)
saver.restore(sess, trained_checkpoint_prefix)
saver.save(sess, model_path)
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names=None,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None):
"""Export helper."""
tf.gfile.MakeDirs(output_directory)
frozen_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
saved_model_path = os.path.join(output_directory, 'saved_model')
model_path = os.path.join(output_directory, 'model.ckpt')
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
placeholder_args = {}
if input_shape is not None:
if input_type != 'image_tensor':
raise ValueError('Can only specify input shape for `image_tensor` '
'inputs.')
placeholder_args['input_shape'] = input_shape
placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](
**placeholder_args)
inputs = tf.to_float(input_tensors)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(
preprocessed_inputs, true_image_shapes)
postprocessed_tensors = detection_model.postprocess(
output_tensors, true_image_shapes)
outputs = _add_output_tensor_nodes(postprocessed_tensors,
output_collection_name)
# Add global step to the graph.
slim.get_or_create_global_step()
if graph_hook_fn:
graph_hook_fn()
saver_kwargs = {}
if use_moving_averages:
# This check is to be compatible with both version of SaverDef.
if os.path.isfile(trained_checkpoint_prefix):
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
else:
temp_checkpoint_prefix = tempfile.mkdtemp()
replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
temp_checkpoint_prefix)
checkpoint_to_use = temp_checkpoint_prefix
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
_write_graph_and_checkpoint(
inference_graph_def=tf.get_default_graph().as_graph_def(),
model_path=model_path,
input_saver_def=input_saver_def,
trained_checkpoint_prefix=checkpoint_to_use)
if additional_output_tensor_names is not None:
output_node_names = ','.join(
outputs.keys() + additional_output_tensor_names)
else:
output_node_names = ','.join(outputs.keys())
frozen_graph_def = freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
initializer_nodes='')
_write_frozen_graph(frozen_graph_path, frozen_graph_def)
_write_saved_model(saved_model_path, frozen_graph_def,
placeholder_tensor, outputs)
def export_inference_graph(input_type,
pipeline_config,
trained_checkpoint_prefix,
output_directory,
input_shape=None,
output_collection_name='inference_op',
additional_output_tensor_names=None):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of [`image_tensor`,
`tf_example`].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_prefix: Path to the trained checkpoint file.
output_directory: Path to write outputs.
input_shape: Sets a fixed shape for an `image_tensor` input. If not
specified, will default to [None, None, None, 3].
output_collection_name: Name of collection to add output tensors to.
If None, does not add output tensors to a collection.
additional_output_tensor_names: list of additional output
tensors to include in the frozen graph.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
_export_inference_graph(input_type, detection_model,
pipeline_config.eval_config.use_moving_averages,
trained_checkpoint_prefix,
output_directory, additional_output_tensor_names,
input_shape, output_collection_name,
graph_hook_fn=None)
pipeline_config.eval_config.use_moving_averages = False
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(
os.path.join(output_directory, 'pipeline.config'), 'wb') as f:
f.write(config_text)
|
the-stack_0_26340
|
import numpy as np
import scipy.optimize as opt
from melp.taft.utils.cosmic import get_cosmic_data_from_file
def correct_z_two_event(detector, cosmic_station, **kwargs) -> np.array:
print("*Two event correction")
a, b = get_cosmic_data_from_file(kwargs.get("cosmic_file"), detector, cosmic_station, **kwargs)
print("\n -> Cosmic Muons: ", len(a))
args = np.zeros(kwargs["cosmic_n_modes"]*2) + 0.0001
popt, cov = opt.curve_fit(fit_func, b, a, p0=args, method="lm")
station_offset = 200000
if cosmic_station == 2:
station_offset += 100000
for row in range(len(detector.TileDetector.column_ids(0, station_offset))):
for column in range(len(detector.TileDetector.row_ids(0, station_offset))):
current_tile_id = detector.TileDetector.id_from_row_col(row=row, column=column, station_offset=station_offset)
timing_correction = calibration_correction_z((column, row), *popt)
timing_correction -= calibration_correction_z((0, 0), *popt)
detector.TileDetector.tile[current_tile_id].update_calibration(timing_correction)
return popt
# def fourier_func(x: float or list, c: list, s: list, n: int) -> float or list:
def fourier_func(x: float or list, *args) -> float or np.array:
result = np.zeros(x.shape)
n = int(len(args) / 2)
c = args[0:n + 1]
s = args[n:2 * n]
for i in range(1, n + 1):
# result += c[i - 1] * np.cos(x * i) + s[i - 1] * np.sin(x * i)
result = np.add(result, c[i - 1] * np.cos(x * i) + s[i - 1] * np.sin(x * i))
return result
# def calibration_correction(x: tuple, c_phi: list, s_phi: list, c_z: list, s_z: list, n: int) -> float or list:
def calibration_correction_both(x: tuple, *args) -> float or np.array:
# v_fourier_func = np.vectorize(fourier_func, excluded=['c', 's', 'n'])
z, phi = x
# print(phi, z)
n = int(len(args) / 2)
result_tmp1 = fourier_func(2 * np.pi * (np.array(phi) / 56), *args[0:n + 1])
result_tmp2 = fourier_func(np.pi * (np.array(z) / 52), *args[n:2 * n])
result = np.multiply(result_tmp1, result_tmp2)
# return result
return result
def calibration_correction_z(x: tuple, *args) -> float or np.array:
# v_fourier_func = np.vectorize(fourier_func, excluded=['c', 's', 'n'])
z, phi = x
# print(phi, z)
result = fourier_func(np.pi * (np.array(z) / 52), *args)
#result += args[0]*z
return result
def fit_func(x: tuple, *args) -> float or np.array:
#if len(args) % 4 != 0:
# raise ValueError
# z1, phi1, z2, phi2 = x
z2, phi2, z1, phi1 = x
return calibration_correction_z((z1, phi1), *args) - calibration_correction_z((z2, phi2), *args)
|
the-stack_0_26341
|
import os
import signal
import sys
import tempfile
import time
from contextlib import contextmanager
import pendulum
import yaml
from dagster import Shape, check, composite_solid, pipeline, solid
from dagster.core.host_representation import ExternalPipeline
from dagster.core.host_representation.origin import ExternalPipelineOrigin
from dagster.core.instance import DagsterInstance
from dagster.core.launcher import RunLauncher
from dagster.core.launcher.default_run_launcher import DefaultRunLauncher
from dagster.core.run_coordinator import RunCoordinator
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.telemetry import cleanup_telemetry_logger
from dagster.serdes import ConfigurableClass
from dagster.utils.error import serializable_error_info_from_exc_info
def step_output_event_filter(pipe_iterator):
for step_event in pipe_iterator:
if step_event.is_successful_output:
yield step_event
def nesting_composite_pipeline(depth, num_children, *args, **kwargs):
"""Creates a pipeline of nested composite solids up to "depth" layers, with a fan-out of
num_children at each layer.
Total number of solids will be num_children ^ depth
"""
@solid
def leaf_node(_):
return 1
def create_wrap(inner, name):
@composite_solid(name=name)
def wrap():
for i in range(num_children):
solid_alias = "%s_node_%d" % (name, i)
inner.alias(solid_alias)()
return wrap
@pipeline(*args, **kwargs)
def nested_pipeline():
comp_solid = create_wrap(leaf_node, "layer_%d" % depth)
for i in range(depth):
comp_solid = create_wrap(comp_solid, "layer_%d" % (depth - (i + 1)))
comp_solid.alias("outer")()
return nested_pipeline
@contextmanager
def environ(env):
"""Temporarily set environment variables inside the context manager and
fully restore previous environment afterwards
"""
previous_values = {key: os.getenv(key) for key in env}
for key, value in env.items():
if value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = value
try:
yield
finally:
for key, value in previous_values.items():
if value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = value
@contextmanager
def instance_for_test(overrides=None):
with tempfile.TemporaryDirectory() as temp_dir:
with instance_for_test_tempdir(temp_dir, overrides) as instance:
yield instance
@contextmanager
def instance_for_test_tempdir(temp_dir, overrides=None):
# Write any overrides to disk and set DAGSTER_HOME so that they will still apply when
# DagsterInstance.get() is called from a different process
with environ({"DAGSTER_HOME": temp_dir}):
with open(os.path.join(temp_dir, "dagster.yaml"), "w") as fd:
yaml.dump(overrides, fd, default_flow_style=False)
with DagsterInstance.get() as instance:
try:
yield instance
except:
sys.stderr.write(
"Test raised an exception, attempting to clean up instance:"
+ serializable_error_info_from_exc_info(sys.exc_info()).to_string()
+ "\n"
)
raise
finally:
cleanup_test_instance(instance)
def cleanup_test_instance(instance):
check.inst_param(instance, "instance", DagsterInstance)
# To avoid filesystem contention when we close the temporary directory, wait for
# all runs to reach a terminal state, and close any subprocesses or threads
# that might be accessing the run history DB.
instance.run_launcher.join()
if isinstance(instance.run_launcher, DefaultRunLauncher):
instance.run_launcher.cleanup_managed_grpc_servers()
cleanup_telemetry_logger()
def create_run_for_test(
instance,
pipeline_name=None,
run_id=None,
run_config=None,
mode=None,
solids_to_execute=None,
step_keys_to_execute=None,
status=None,
tags=None,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=None,
execution_plan_snapshot=None,
parent_pipeline_snapshot=None,
external_pipeline_origin=None,
):
return instance.create_run(
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
status,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
external_pipeline_origin=external_pipeline_origin,
)
def register_managed_run_for_test(
instance,
pipeline_name=None,
run_id=None,
run_config=None,
mode=None,
solids_to_execute=None,
step_keys_to_execute=None,
tags=None,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=None,
execution_plan_snapshot=None,
parent_pipeline_snapshot=None,
):
return instance.register_managed_run(
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
)
def poll_for_finished_run(instance, run_id, timeout=20):
total_time = 0
interval = 0.01
while True:
run = instance.get_run_by_id(run_id)
if run.is_finished:
return run
else:
time.sleep(interval)
total_time += interval
if total_time > timeout:
raise Exception("Timed out")
def poll_for_step_start(instance, run_id, timeout=30):
poll_for_event(instance, run_id, event_type="STEP_START", message=None, timeout=timeout)
def poll_for_event(instance, run_id, event_type, message, timeout=30):
total_time = 0
backoff = 0.01
while True:
time.sleep(backoff)
logs = instance.all_logs(run_id)
matching_events = [
log_record.dagster_event
for log_record in logs
if log_record.dagster_event.event_type_value == event_type
]
if matching_events:
if message is None:
return
for matching_message in (event.message for event in matching_events):
if message in matching_message:
return
total_time += backoff
backoff = backoff * 2
if total_time > timeout:
raise Exception("Timed out")
@contextmanager
def new_cwd(path):
old = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old)
def today_at_midnight(timezone_name=None):
now = pendulum.now(timezone_name)
return pendulum.create(now.year, now.month, now.day, tz=now.timezone.name)
class ExplodingRunLauncher(RunLauncher, ConfigurableClass):
def __init__(self, inst_data=None):
self._inst_data = inst_data
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {}
@staticmethod
def from_config_value(inst_data, config_value):
return ExplodingRunLauncher(inst_data=inst_data)
def launch_run(self, instance, run, external_pipeline):
raise NotImplementedError("The entire purpose of this is to throw on launch")
def join(self, timeout=30):
"""Nothing to join on since all executions are synchronous."""
def can_terminate(self, run_id):
return False
def terminate(self, run_id):
check.not_implemented("Termination not supported")
class MockedRunLauncher(RunLauncher, ConfigurableClass):
def __init__(self, inst_data=None):
self._inst_data = inst_data
self._queue = []
def launch_run(self, instance, run, external_pipeline):
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(run, "run", PipelineRun)
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
check.invariant(run.status == PipelineRunStatus.STARTING)
self._queue.append(run)
return run
def queue(self):
return self._queue
@classmethod
def config_type(cls):
return Shape({})
@classmethod
def from_config_value(cls, inst_data, config_value):
return cls(inst_data=inst_data,)
@property
def inst_data(self):
return self._inst_data
def can_terminate(self, run_id):
return False
def terminate(self, run_id):
check.not_implemented("Termintation not supported")
class MockedRunCoordinator(RunCoordinator, ConfigurableClass):
def __init__(self, inst_data=None):
self._inst_data = inst_data
self._queue = []
def submit_run(self, pipeline_run, external_pipeline):
check.inst_param(pipeline_run, "run", PipelineRun)
check.opt_inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
check.inst(pipeline_run.external_pipeline_origin, ExternalPipelineOrigin)
self._queue.append(pipeline_run)
return pipeline_run
def queue(self):
return self._queue
@classmethod
def config_type(cls):
return Shape({})
@classmethod
def from_config_value(cls, inst_data, config_value):
return cls(inst_data=inst_data,)
@property
def inst_data(self):
return self._inst_data
def can_cancel_run(self, run_id):
check.not_implemented("Cancellation not supported")
def cancel_run(self, run_id):
check.not_implemented("Cancellation not supported")
def get_terminate_signal():
if sys.platform == "win32":
return signal.SIGTERM
return signal.SIGKILL
def get_crash_signals():
if sys.platform == "win32":
return [
get_terminate_signal()
] # Windows keeps resources open after termination in a way that messes up tests
else:
return [get_terminate_signal(), signal.SIGINT]
_mocked_system_timezone = {"timezone": None}
@contextmanager
def mock_system_timezone(override_timezone):
with pendulum.tz.LocalTimezone.test(pendulum.Timezone.load(override_timezone)):
try:
_mocked_system_timezone["timezone"] = override_timezone
yield
finally:
_mocked_system_timezone["timezone"] = None
def get_mocked_system_timezone():
return _mocked_system_timezone["timezone"]
|
the-stack_0_26344
|
"""Tests that envs clean up after themselves on agent exit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gym.spaces import Discrete
import atexit
import gym
import os
import subprocess
import tempfile
import time
import ray
from ray.tune import run_experiments
from ray.tune.registry import register_env
# Dummy command to run as a subprocess with a unique name
UNIQUE_CMD = "sleep {}".format(str(time.time()))
_, UNIQUE_FILE_0 = tempfile.mkstemp("test_env_with_subprocess")
_, UNIQUE_FILE_1 = tempfile.mkstemp("test_env_with_subprocess")
_, UNIQUE_FILE_2 = tempfile.mkstemp("test_env_with_subprocess")
_, UNIQUE_FILE_3 = tempfile.mkstemp("test_env_with_subprocess")
class EnvWithSubprocess(gym.Env):
"""Our env that spawns a subprocess."""
def __init__(self, config):
self.action_space = Discrete(2)
self.observation_space = Discrete(2)
# Subprocess that should be cleaned up
self.subproc = subprocess.Popen(UNIQUE_CMD.split(" "), shell=False)
self.config = config
# Exit handler should be called
if config.worker_index == 0:
atexit.register(lambda: os.unlink(UNIQUE_FILE_0))
else:
atexit.register(lambda: os.unlink(UNIQUE_FILE_1))
atexit.register(lambda: self.subproc.kill())
def close(self):
if self.config.worker_index == 0:
os.unlink(UNIQUE_FILE_2)
else:
os.unlink(UNIQUE_FILE_3)
def reset(self):
return 0
def step(self, action):
return 0, 0, True, {}
def leaked_processes():
"""Returns whether any subprocesses were leaked."""
result = subprocess.check_output(
"ps aux | grep '{}' | grep -v grep || true".format(UNIQUE_CMD),
shell=True)
return result
if __name__ == "__main__":
register_env("subproc", lambda config: EnvWithSubprocess(config))
ray.init()
assert os.path.exists(UNIQUE_FILE_0)
assert os.path.exists(UNIQUE_FILE_1)
assert not leaked_processes()
run_experiments({
"demo": {
"run": "PG",
"env": "subproc",
"num_samples": 1,
"config": {
"num_workers": 1,
},
"stop": {
"training_iteration": 1
},
},
})
time.sleep(5.0)
leaked = leaked_processes()
assert not leaked, "LEAKED PROCESSES: {}".format(leaked)
assert not os.path.exists(UNIQUE_FILE_0), "atexit handler not called"
assert not os.path.exists(UNIQUE_FILE_1), "atexit handler not called"
assert not os.path.exists(UNIQUE_FILE_2), "close not called"
assert not os.path.exists(UNIQUE_FILE_3), "close not called"
print("OK")
|
the-stack_0_26345
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0127_auto_20160419_1649'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='message_type',
field=models.PositiveIntegerField(db_index=True, choices=[(0, b'Added a card'), (1, b'Idolized a card'), (2, b'Rank Up'), (3, b'Ranked in event'), (4, b'Verified'), (5, b'Trivia'), (6, b'Custom')]),
preserve_default=True,
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.