filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_6273 | #!/usr/bin/env python3
# IMPORTS
# system
import sys, time
from copy import copy
from collections import defaultdict
import pdb
# math
import numpy as np
from scipy.spatial.transform import Rotation as R
# ros
from utils import *
class RaptorLogger:
"""
This helper class writes to /reads from log files.
* save_elms is a class var that defines what variables will be in the log files. There are different ones for estimation, ground truth, ssp, and params
* to write, the user will pass in an object name and a dict with keys corresponding to the second element of each tuple in save_elms
* to read the user gives the object name, and a dict is passed back
* params are treated slightly differently, with their own read/write functions
"""
def __init__(self, mode="write", names=None, base_path="./", b_ssp=False):
self.names = names
self.base_path = base_path
self.b_ssp = b_ssp
self.save_elms = {}
self.log_data = defaultdict(dict)
self.fh = None
self.fn = None
self.prm_fn = self.base_path + '_prms.log'
self.save_elms['est'] = [('Time (s)', 'time', 1), # list of tuples ("HEADER STRING", "DICT KEY STRING", # OF VALUES (int))
('Ado State Est', 'state_est', 13),
('Ego State Est', 'ego_state_est', 13),
('3D Corner Est (X|Y|Z)', 'corners_3d_est', 8*3),
('Corner 2D Projections Est (r|c)', 'proj_corners_est', 8*2),
('Angled BB (r|c|w|h|ang_deg)', 'abb', 5),
('Image Segmentation Mode', 'im_seg_mode', 1)]
self.save_elms['gt'] = [('Time (s)', 'time', 1), # list of tuples ("HEADER STRING", "DICT KEY STRING", # OF VALUES (int))
('Ado State GT', 'state_gt', 13),
('Ego State GT', 'ego_state_gt', 13),
('3D Corner GT (X|Y|Z)', 'corners_3d_gt', 8*3),
('Corner 2D Projections GT (r|c)', 'proj_corners_gt', 8*2),
('Angled BB (r|c|w|h|ang_deg)', 'abb', 5),
('Image Segmentation Mode', 'im_seg_mode', 1)]
self.save_elms['ssp'] = [('Time (s)', 'time', 1), # list of tuples ("HEADER STRING", "DICT KEY STRING", # OF VALUES (int))
('Ado State GT', 'state_gt', 13),
('Ado State Est', 'state_est', 13),
('Ego State Est', 'ego_state_est', 13),
('Ego State GT', 'ego_state_gt', 13),
('3D Corner Est (X|Y|Z)', 'corners_3d_gt', 8*3),
('3D Corner GT (X|Y|Z)', 'corners_3d_gt', 8*3),
('Corner 2D Projections Est (r|c)', 'proj_corners_est', 8*2),
('Corner 2D Projections GT (r|c)', 'proj_corners_gt', 8*2)]
if not b_ssp:
self.modes = ['est', 'gt']
else:
self.modes = ['ssp']
if mode=="read":
self.init_read()
elif mode=="write":
if names is None:
raise RuntimeError("Must provide list of names for tracked object")
self.init_write()
else:
raise RuntimeError("Unrecognized logging mode")
def init_write(self):
all_name_str = ''
for n in self.names:
all_name_str += (n + ' ')
all_name_str = all_name_str[:-1]
self.save_elms['prms'] = [('Camera Intrinsics (K)', 'K', 4),
('tf_cam_ego', 'tf_cam_ego', 16),
('Object BB Size (len|wid|hei|diam) [{}]'.format(all_name_str), '3d_bb_dims', 4*len(self.names))]
# create files and write headers
self.fh = defaultdict(dict)
for m in self.modes:
for n in self.names:
# Create logs
fn = self.base_path + '_' + n + '_'+ m + '.log'
self.create_file_dir(fn)
self.fh[m][n] = open(fn,'w+') # doing this here makes it appendable
save_el_shape = (len(self.save_elms[m]), len(self.save_elms[m][0]))
data_header = ", ".join(np.reshape([*zip(self.save_elms[m])], save_el_shape)[:,0].tolist())
np.savetxt(self.fh[m][n], X=[], header=data_header) # write header
def init_read(self):
self.save_elms['prms'] = [('Camera Intrinsics (K)', 'K', 4),
('tf_cam_ego', 'tf_cam_ego', 16),
('Object BB Size (len|wid|hei|diam) []', '3d_bb_dims', -1)]
self.read_params()
self.fn = defaultdict(dict)
for m in self.modes:
for n in self.names:
self.fn[m][n] = self.base_path + '_' + n + '_'+ m + '.log'
def write_params(self, param_data, mode='prms'):
# write header
self.create_file_dir(self.prm_fn)
prm_fh = open(self.prm_fn,'w+') # doing this here makes it appendable
save_el_shape = (len(self.save_elms[mode]), len(self.save_elms[mode][0]))
data_header = ", ".join(np.reshape([*zip(self.save_elms[mode])], save_el_shape)[:,0].tolist())
np.savetxt(prm_fh, X=[], header=data_header) # write header
# write body
save_el_shape = (len(self.save_elms[mode]), len(self.save_elms[mode][0]))
num_to_write = np.sum(np.reshape([*zip(self.save_elms[mode])], save_el_shape)[:,2].astype(int))
out = np.ones((1, num_to_write)) * 1e10
ind = 0
for i, (header_str, dict_str, count) in enumerate(self.save_elms[mode]):
if dict_str in param_data:
try:
out[0, ind:(ind + count)] = param_data[dict_str]
except:
print("issue with {}".format(dict_str))
pdb.set_trace()
ind += count
out[out>1e5] = np.nan
np.savetxt(prm_fh, X=out, fmt='%.6f') # write to file
prm_fh.close()
def read_params(self, log_type='prms'):
# get header
f = open(self.prm_fn)
header_str = f.readline()
self.log_data[log_type]['ado_names'] = header_str.split('[')[1].split(']')[0].split(' ')
self.names = self.log_data[log_type]['ado_names']
# Read rest of file
data = np.loadtxt(self.prm_fn)
f.close()
ind = 0
for i, (header_str, dict_str, count) in enumerate(self.save_elms[log_type]):
if len(data.shape) > 1:
self.log_data[log_type][dict_str] = data[:, ind:(ind + count)]
else:
self.log_data[log_type][dict_str] = data[ind:(ind + count)]
ind += count
if dict_str == 'K': # Turn camera intrinsics back into a matrix
K = np.eye(3)
K[0, 0] = self.log_data[log_type][dict_str][0]
K[1, 1] = self.log_data[log_type][dict_str][1]
K[0, 2] = self.log_data[log_type][dict_str][2]
K[1, 2] = self.log_data[log_type][dict_str][3]
self.log_data[log_type][dict_str] = K
elif dict_str == 'tf_cam_ego':
self.log_data[log_type][dict_str] = np.reshape(self.log_data[log_type][dict_str], (4, 4))
elif dict_str == '3d_bb_dims':
all_sizes = np.asarray(data[ind : ind + 4*len(self.log_data[log_type]['ado_names'])])
bb_3d_dict_all = {}
for k, name in enumerate(self.log_data[log_type]['ado_names']):
bb_3d_dict_all[name] = all_sizes[4*k : 4*k+4] # len|wid|hei|diam
self.log_data[log_type][dict_str] = bb_3d_dict_all
return self.log_data[log_type]
def write_data_to_log(self, data, name, mode):
""" mode can be est, gt, ssp"""
if (not self.b_ssp and not mode in ['est', 'gt']) or (self.b_ssp and not mode == 'ssp'):
raise RuntimeError("Mode {} not recognized".format(mode))
save_el_shape = (len(self.save_elms[mode]), len(self.save_elms[mode][0]))
num_to_write = np.sum(np.reshape([*zip(self.save_elms[mode])], save_el_shape)[:,2].astype(int))
out = np.ones((1, num_to_write)) * 1e10
ind = 0
for i, (header_str, dict_str, count) in enumerate(self.save_elms[mode]):
if dict_str in data:
try:
out[0, ind:(ind + count)] = data[dict_str]
except:
print("issue with {}".format(dict_str))
pdb.set_trace()
ind += count
out[out>1e5] = np.nan
np.savetxt(self.fh[mode][name], X=out, fmt='%.6f') # write to file
def read_logs(self, name):
"""
Return a dict with keys being log type (est /gt /prms). Each of these is a dict with the various types of values in the log
"""
if self.names is None:
self.log_data[log_type]
for log_type in self.fn:
if not log_type in self.save_elms:
print("Warning: we are are missing the log file for {}".format(log_type))
continue
ind = 0
data = np.loadtxt(self.fn[log_type][name])
for i, (header_str, dict_str, count) in enumerate(self.save_elms[log_type]):
if len(data.shape) > 1:
self.log_data[log_type][dict_str] = data[:, ind:(ind + count)]
else:
self.log_data[log_type][dict_str] = data[ind:(ind + count)]
ind += count
return self.log_data
def close_files(self):
for fh_key in self.fh:
if fh_key == 'prms':
self.fh[fh_key].close()
continue
for n in self.names:
self.fh[fh_key][n].close()
def create_file_dir(self, fn_with_dir):
path = "/".join(fn_with_dir.split("/")[:-1])
if not os.path.exists( path ):
os.makedirs( path )
|
the-stack_0_6274 | #!/usr/bin/env python
#
# This software is Copyright (c) 2010-2016
# Adam Maxwell. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# - Neither the name of Adam Maxwell nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from optparse import OptionParser
import os
import sys
from subprocess import call as sync_task
from shutil import copy2 as copyfile
import plistlib
LAUNCHCTL_PATH = "/bin/launchctl"
SCRIPT_NAME = "texliveupdatecheck"
PLIST_NAME = "com.googlecode.mactlmgr.update_check.plist"
def log_message(msg):
"""write a message to standard output"""
sys.stderr.write("%s: %s\n" % (os.path.basename(sys.argv[0]), msg))
def installed_plist_path():
"""absolute path to the installed plist for the process owner"""
plist_dir = "/Library/LaunchAgents" if os.geteuid() == 0 else os.path.expanduser("~/Library/LaunchAgents")
return os.path.join(plist_dir, PLIST_NAME)
def installed_script_path():
"""absolute path to the installed script for the process owner"""
script_dir = "/Library/Application Support/TeX Live Utility"
if os.geteuid() != 0:
script_dir = os.path.expanduser("~" + script_dir)
return os.path.join(script_dir, SCRIPT_NAME)
def unload_agent():
"""returns zero in case of success or if the plist does not exist"""
plist_path = installed_plist_path()
ret = 0
if os.path.exists(plist_path):
ret = sync_task([LAUNCHCTL_PATH, "unload", "-w", "-S", "Aqua", plist_path])
else:
log_message("nothing to unload")
if ret:
log_message("unable to unload agent %s" % (plist_path))
return ret
def load_agent():
"""returns zero if the plist was loaded, raises if it does not exist"""
plist_path = installed_plist_path()
assert os.path.exists(plist_path), "%s does not exist" % (plist_path)
ret = sync_task([LAUNCHCTL_PATH, "load", "-w", "-S", "Aqua", plist_path])
if ret:
log_message("unable to load agent %s" % (plist_path))
return ret
def uninstall_agent():
"""returns nonzero if the plist exists and could not be unloaded"""
plist_path = installed_plist_path()
ret = 0
# nonexistence is not a failure
if os.path.exists(plist_path):
try:
os.remove(plist_path)
except Exception as e:
log_message("ERROR: failed to remove %s" % (plist_path))
ret = 1
else:
log_message("nothing to remove")
return ret
def sync_agent_program_name():
"""ensure the launch agent plist has the current program name"""
plist_path = installed_plist_path()
exec_path = installed_script_path()
# mainly for the change from Python update checker to Obj-C
if os.path.exists(plist_path) and os.path.exists(exec_path):
unload_agent()
try:
# Now edit the plist in-memory so it points to the correct path,
# then save it out to the destination directory (avoids modifying
# the passed-in file).
with open(plist_path, "rb") as plfile:
plist = plistlib.load(plfile)
# rewrite entire array
plist["ProgramArguments"] = [exec_path]
with open(plist_path, "wb") as plfile:
plistlib.dump(plist, plfile, fmt=plistlib.FMT_XML)
except Exception as e:
log_message("ERROR: failed to regenerate launchd plist %s with exception %s" % (plist_path, e))
else:
load_agent()
def install_agent(source_path):
"""argument is absolute path to the source property list"""
plist_path = installed_plist_path()
plist_dir = os.path.dirname(plist_path)
ret = 0
if os.path.exists(plist_dir) == False:
try:
os.makedirs(plist_dir)
except Exception as e:
log_message("ERROR: failed to create %s" % (plist_dir))
ret = 1
if ret == 0:
assert os.path.isdir(plist_dir), "%s is not a directory" % (plist_dir)
try:
# Now edit the plist in-memory so it points to the correct path,
# then save it out to the destination directory (avoids modifying
# the passed-in file).
with open(source_path, "rb") as plfile:
plist = plistlib.load(plfile)
# rewrite entire array
plist["ProgramArguments"] = [installed_script_path()]
with open(plist_path, "wb") as plfile:
plistlib.dump(plist, plfile, fmt=plistlib.FMT_XML)
except Exception as e:
log_message("ERROR: failed to copy %s --> %s" % (source_path, plist_path))
ret = 1
return ret
def install_script(source_path):
"""argument is absolute path to the source script"""
script_path = installed_script_path()
script_dir = os.path.dirname(script_path)
ret = 0
if os.path.exists(script_dir) == False:
try:
os.makedirs(script_dir)
except Exception as e:
log_message("ERROR: failed to create %s" % (script_dir))
ret = 1
if ret == 0:
assert os.path.isdir(script_dir), "%s is not a directory" % (script_dir)
try:
copyfile(source_path, script_path)
except Exception as e:
log_message("ERROR: failed to copy %s --> %s" % (source_path, script_path))
ret = 1
return ret
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-i", "--install", help="install agent", action="store_true", dest="install", default=False)
parser.add_option("-r", "--remove", help="remove agent", action="store_true", dest="remove", default=False)
parser.add_option("-p", "--plist", help="path of property list to install", action="store", type="string", dest="source_plist")
parser.add_option("-s", "--script", help="path of script to install", action="store", type="string", dest="source_script")
(options, args) = parser.parse_args()
if options.install == options.remove:
if options.install == False:
parser.error("an action (install or remove) must be specified")
else:
parser.error("only one action may be specified")
if options.install:
if options.source_plist is None and options.source_script is None:
parser.error("at least one of option -p or -s is required")
# if os.path.isabs(options.source_plist) == False or os.path.isabs(options.source_script) == False:
# parser.error("path arguments must be absolute")
if options.source_plist and not os.path.isfile(options.source_plist):
parser.error("path arguments cannot point to a directory")
assert os.path.basename(options.source_plist) == PLIST_NAME, "incorrect plist name defined"
if options.source_script and not os.path.isfile(options.source_script):
parser.error("path arguments cannot point to a directory")
assert os.path.basename(options.source_script) == SCRIPT_NAME, "incorrect script name defined"
status = 0
if options.remove:
status += unload_agent()
status += uninstall_agent()
else:
assert options.install, "inconsistent option checking"
# unload a previous version before installing
if options.source_plist:
status += unload_agent()
if options.source_script:
status += install_script(options.source_script)
# if unloaded and we have a plist, now try to install and load it
if status == 0 and options.source_plist:
status = install_agent(options.source_plist)
if status == 0:
status = load_agent()
# in case the name of the script has changed; will also unload/reload
if 0 == status:
sync_agent_program_name()
exit(status)
|
the-stack_0_6277 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module for monitoring various qiskit functionality"""
import sys
import time
def _text_checker(job, interval, _interval_set=False, quiet=False, output=sys.stdout):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
output (file): The file like object to write status messages to.
By default this is sys.stdout.
"""
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if job.queue_position() is None:
interval = 2
elif not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
prev_msg = msg
if not quiet:
print('', file=output)
def job_monitor(job, interval=None, quiet=False, output=sys.stdout):
"""Monitor the status of a IBMQJob instance.
Args:
job (BaseJob): Job to monitor.
interval (int): Time interval between status queries.
quiet (bool): If True, do not print status messages.
output (file): The file like object to write status messages to.
By default this is sys.stdout.
"""
if interval is None:
_interval_set = False
interval = 5
else:
_interval_set = True
_text_checker(job, interval, _interval_set,
quiet=quiet, output=output)
|
the-stack_0_6278 | #! /usr/bin/env python3
# coding=utf-8
# This code is licensed under a non-commercial license.
import os
import sys
import argparse
from tqdm import trange
from torchtext import data as torchtext_data
from torchtext import datasets
import torch
import torch.utils.data as data
from torchtext.vocab import Vectors, GloVe, CharNGram, FastText
from nltk.tokenize.treebank import TreebankWordDetokenizer
import torch
import torch.optim
import torch.nn.functional as F
import numpy as np
from IPython import embed
from operator import add
from run_gpt2 import top_k_logits
from style_utils import to_var
import copy
import pickle
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
import torch.optim as optim
torch.manual_seed(0)
np.random.seed(0)
lab_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..')
sys.path.insert(1, lab_root)
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer
from torch.autograd import Variable
tokenizer = GPT2Tokenizer.from_pretrained('/content/drive/MyDrive/passage_generation_testing/gpt2-medium')
model = GPT2LMHeadModel.from_pretrained('/content/drive/MyDrive/passage_generation_testing/gpt2-medium')
class ClassificationHead(torch.nn.Module):
""" Language Model Head for the transformer """
def __init__(self, class_size=5, embed_size=2048):
super(ClassificationHead, self).__init__()
self.class_size = class_size
self.embed_size = embed_size
# self.mlp1 = torch.nn.Linear(embed_size, embed_size)
# self.mlp2 = (torch.nn.Linear(embed_size, class_size))
self.mlp = (torch.nn.Linear(embed_size, class_size))
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
# lm_logits = F.relu(self.mlp1(hidden_state))
# lm_logits = self.mlp2(lm_logits)
lm_logits = self.mlp(hidden_state)
return lm_logits
class Discriminator(torch.nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.classifierhead = ClassificationHead()
self.model = model
self.spltoken = Variable(torch.randn(1, 1, 1024).type(torch.FloatTensor), requires_grad=True)
self.spltoken = self.spltoken.repeat(10, 1, 1)
self.spltoken = self.spltoken.cuda()
def train(self):
for param in self.model.parameters():
param.requires_grad = False
pass
def forward(self, x):
x = model.forward_embed(x)
x = torch.cat((x, self.spltoken), dim=1)
_, x = model.forward_transformer_embed(x, add_one=True)
x = self.classifierhead(x[-1][:, -1, :])
x = F.log_softmax(x, dim=-1)
return x
class Discriminator2(torch.nn.Module):
def __init__(self, class_size=5, embed_size=1024):
super(Discriminator2, self).__init__()
self.classifierhead = ClassificationHead(class_size=class_size, embed_size=embed_size)
self.model = model
self.embed_size = embed_size
def get_classifier(self):
return self.classifierhead
def train_custom(self):
for param in self.model.parameters():
param.requires_grad = False
pass
self.classifierhead.train()
def forward(self, x):
x = model.forward_embed(x)
hidden, x = model.forward_transformer_embed(x)
x = torch.sum(hidden, dim=1)
x = self.classifierhead(x)
x = F.log_softmax(x, dim=-1)
return x
class Discriminator2mean(torch.nn.Module):
def __init__(self, class_size=5, embed_size=1024):
super(Discriminator2mean, self).__init__()
self.classifierhead = ClassificationHead(class_size=class_size, embed_size=embed_size)
self.model = model
self.embed_size = embed_size
def get_classifier(self):
return self.classifierhead
def train_custom(self):
for param in self.model.parameters():
param.requires_grad = False
pass
self.classifierhead.train()
def forward(self, x):
mask_src = 1 - x.eq(0).unsqueeze(1).type(torch.FloatTensor).cuda().detach()
mask_src = mask_src.repeat(1, self.embed_size, 1)
x = model.forward_embed(x)
hidden, x = model.forward_transformer_embed(x)
# Hidden has shape batch_size x length x embed-dim
hidden = hidden.permute(0, 2, 1)
_, _, batch_length = hidden.shape
hidden = hidden * mask_src # / torch.sum(mask_src, dim=-1).unsqueeze(2).repeat(1, 1, batch_length)
#
hidden = hidden.permute(0, 2, 1)
x = torch.sum(hidden, dim=1)/(torch.sum(mask_src, dim=-1).detach() + 1e-10)
x = self.classifierhead(x)
x = F.log_softmax(x, dim=-1)
return x
class Dataset(data.Dataset):
def __init__(self, X, y):
"""Reads source and target sequences from txt files."""
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
d = {}
d['X'] = self.X[index]
d['y'] = self.y[index]
return d
def collate_fn(data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long().cuda() # padding index 0
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, lengths
data.sort(key=lambda x: len(x["X"]), reverse=True) # sort by source seq
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# input
x_batch, _ = merge(item_info['X'])
y_batch = item_info['y']
return x_batch, torch.tensor(y_batch, device='cuda', dtype=torch.long)
def train_epoch(data_loader, discriminator, device='cuda', args=None, epoch=1):
optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
discriminator.train_custom()
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = discriminator(data)
loss = F.nll_loss(output, target)
loss.backward(retain_graph=True)
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Relu Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader), loss.item()))
def test_epoch(data_loader, discriminator, device='cuda', args=None):
discriminator.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = discriminator(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(data_loader.dataset)
print('\nRelu Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='Train a discriminator on top of GPT-2 representations')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='Number of training epochs')
parser.add_argument('--save-model', action='store_true', help='whether to save the model')
parser.add_argument('--dataset-label', type=str, default='SST',choices=('SST', 'clickbait', 'toxic'))
args = parser.parse_args()
batch_size = args.batch_size
device = 'cuda'
# load sst
if args.dataset_label == 'SST':
text = torchtext_data.Field()
label = torchtext_data.Field(sequential=False)
train_data, val_data, test_data = datasets.SST.splits(text, label, fine_grained=True, train_subtrees=True,
# filter_pred=lambda ex: ex.label != 'neutral'
)
x = []
y = []
d = {"positive": 0, "negative": 1, "very positive": 2, "very negative": 3, "neutral": 4}
for i in range(len(train_data)):
seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"])
seq = tokenizer.encode(seq)
seq = torch.tensor(seq, device=device, dtype=torch.long)
x.append(seq)
y.append(d[vars(train_data[i])["label"]])
dataset = Dataset(x, y)
test_x = []
test_y = []
for i in range(len(test_data)):
seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"])
seq = tokenizer.encode(seq)
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
test_x.append(seq)
test_y.append(d[vars(test_data[i])["label"]])
test_dataset = Dataset(test_x, test_y)
discriminator = Discriminator2mean(class_size=5).to(device)
elif args.dataset_label == 'clickbait':
# data = pickle.load(open("/home/gilocal/lab/exp/language/datasets/clickbait/clickbait.p", "r"))
with open("datasets/clickbait/clickbait_train_prefix.txt") as f:
data = []
for d in f:
try:
data.append(eval(d))
except:
continue
x = []
y = []
for d in data:
try:
# seq = tokenizer.encode("Apple's iOS 9 'App thinning' feature will give your phone's storage a boost")
try:
seq = tokenizer.encode(d["text"])
except:
continue
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
x.append(seq)
y.append(d['label'])
except:
pass
dataset = Dataset(x, y)
train_size = int(0.9 * len(dataset))
test_size = len(dataset) - train_size
dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
discriminator = Discriminator2mean(class_size=2).to(device)
elif args.dataset_label == 'toxic':
# data = pickle.load(open("/home/gilocal/lab/exp/language/datasets/clickbait/clickbait.p", "r"))
with open("datasets/toxic/toxic_train.txt") as f:
data = []
for d in f:
data.append(eval(d))
x = []
y = []
for d in data:
try:
# seq = tokenizer.encode("Apple's iOS 9 'App thinning' feature will give your phone's storage a boost")
seq = tokenizer.encode(d["text"])
device = 'cuda'
if(len(seq)<100):
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
continue
x.append(seq)
y.append(int(np.sum(d['label'])>0))
except:
pass
dataset = Dataset(x, y)
print(dataset)
print(len(dataset))
train_size = int(0.9 * len(dataset))
test_size = len(dataset) - train_size
dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
discriminator = Discriminator2mean(class_size=2).to(device)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True, collate_fn=collate_fn)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size, collate_fn=collate_fn)
for epoch in range(args.epochs):
train_epoch(discriminator=discriminator, data_loader=data_loader, args=args, device=device, epoch=epoch)
test_epoch(data_loader=test_loader, discriminator=discriminator, args=args)
seq = tokenizer.encode("This is incredible! I love it, this is the best chicken I have ever had.")
seq = torch.tensor([seq], device=device, dtype=torch.long)
print(discriminator(seq))
if (args.save_model):
torch.save(discriminator.state_dict(),
"discrim_models/{}_mean_lin_discriminator_{}.pt".format(args.dataset_label, epoch))
torch.save(discriminator.get_classifier().state_dict(),
"discrim_models/{}_classifierhead.pt".format(args.dataset_label))
seq = tokenizer.encode("This is incredible! I love it, this is the best chicken I have ever had.")
seq = torch.tensor([seq], device=device, dtype=torch.long)
print(discriminator(seq))
if __name__ == '__main__':
main()
|
the-stack_0_6280 | """Compute the largest double precision number that doesn't cause
exp/cosh/sinh to overflow.
"""
import numpy as np
np.seterr(all='ignore')
def find_overflow(f, a, b):
# Start with a binary search
while True:
mid = 0.5*(a + b)
if f(mid) == np.inf:
b = mid
else:
a = mid
if abs(b - a) < 1e-12:
break
# Polish with a brute force search
while True:
b = np.nextafter(a, np.inf)
res = np.exp(b)
if res == np.inf:
return a
else:
a = b
def main():
a = find_overflow(np.exp, 709.7, 709.9)
print("a = {:.20g}, np.exp(a) = {}".format(a, np.exp(a)))
a = find_overflow(np.cosh, 710, 711)
print("a = {:.20g}, np.cosh(a) = {}".format(a, np.cosh(a)))
a = find_overflow(np.sinh, 710, 711)
print("a = {:.20g}, np.sinh(a) = {}".format(a, np.sinh(a)))
if __name__ == '__main__':
main()
|
the-stack_0_6281 | # load extern modules
import gym
import csv
import time
import numpy as np
from stable_baselines.bench import Monitor
from supervisors.utils import distance_from_obstacles
from env.utils import obs_lidar_pseudo
import threading
from supervisors.cbf import initialize_gp_dynamics, predict_successor_state_gp, initialize_svm_prediction
from supervisors.cbf import initialize_svm_prediction_gp
from operator import itemgetter
from skopt.space import Space
from skopt.sampler import Grid
# from qpsolvers import solve_qp
class Supervisor(Monitor):
"""
A monitor wrapper for Gym environments to save collisions events
Parameters:
env (gym.Env): The environment
filename (Optional[str]): the location to save a log file, can be None for no log
"""
def __init__(self,
env: gym.Env,
filename: str,
safety_distance: float or None,
visibility: float,
safe_info: bool,
safe_states: bool,
supervisor: str,
coordinates_static_obstacles: np.array,
lidar_num_bins: int,
which_static: int,
record_svm_gp: bool,
cbf_quadratic: bool,
cbf_learn_online: bool,
rewards: np.array or None,
num_prior_data_cbf: str or None,
search_space: str or None,
scale_reward: bool):
super(Supervisor, self).__init__(env=env, filename=filename)
self.safety_distance = safety_distance
self.visibility = visibility
self.supervisor = supervisor
self.lidar_num_bins = lidar_num_bins
self.Supervised = 0
self.Intervention = 0
self.SafelyDone = 0
self.scale_reward = scale_reward
self.record_svm_gp = record_svm_gp
self.Crashed = 0
self.last_teacher_crash = 0
self.timeOut = 0
self.which_static = which_static
self.safe_states = safe_states
self.total_distance = 0
if supervisor == 'cbf-gp-logical' or supervisor == 'cbf-gp-svm':
if num_prior_data_cbf is None:
num_prior_data_cbf = '2k'
self.gp = initialize_gp_dynamics('2k')
if supervisor == 'cbf-gp-svm':
if num_prior_data_cbf is None:
num_prior_data_cbf = 100000
self.svm = initialize_svm_prediction_gp(num_prior_data=num_prior_data_cbf)
self.unsafe_threshold = 0.5
search_space = 'grid'
if supervisor == 'cbf-svm':
if num_prior_data_cbf is None:
num_prior_data_cbf = 2000
self.svm = initialize_svm_prediction(num_prior_data=num_prior_data_cbf)
self.unsafe_threshold = 0.6
if supervisor == 'cbf-gp-logical':
self.unsafe_threshold = 0.85
search_space = 'random'
self.cbf_quadratic = cbf_quadratic
if search_space == 'grid':
space = Space([(-1., 1.), (-1., 1.)])
grid = Grid(border="include", use_full_layout=False)
action_manipulated = grid.generate(space.dimensions, 160)
action_manipulated = np.array(action_manipulated)
action_manipulated2 = \
np.append(action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] < -0.3), :],
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] < -0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
self.action_manipulated = action_manipulated2
if search_space == 'random':
self.action_manipulated = np.array([[-0.1, 0],
[0.1, 0],
[0, 0.1],
[0, -0.1],
[-0.25, 0],
[0.25, 0],
[0, 0.25],
[0, -0.25],
[-0.1, 0.1],
[0.1, 0.1],
[-0.1, -0.1],
[0.1, -0.1],
[-0.25, 0.25],
[0.25, 0.25],
[-0.25, -0.25],
[0.25, -0.25], ###############
[0.1, 0.05],
[0.05, 0.1],
[0.05, -0.1],
[-0.25, 0.1],
[0.25, 0.8],
[0.6, 0.25],
[0.3, -0.25],
[-0.1, 0.7],
[0.9, 0.1],
[-0.1, -1],
[1, -0.1],
[-0.2, 0.75],
[0.5, 0.5],
[-0.5, -0.5],
[0.75, 0],
[0.15, 0.05],
[0.6, 0.1],
[0.4, -0.1],
[-0.25, 0.15],
[0.25, 0.9],
[-0.35, 0.25],
[0.5, -0.25],
[-0.19, 0.19],
[1, 1],
[-1, -1],
[0, 1],
[-1, 0],
[0.2, 0.75],
[-0.8, 0],
[0, -0.58]])
self.cbf_learn_online = cbf_learn_online
self.TotalCollisions = []
self.filename = filename
self.observation_storage = []
self.safe_info = safe_info
self.csv_writer_lock = threading.Lock()
self.coordinates_static_obstacles = coordinates_static_obstacles
self.rewards = rewards
# reward shaping
if self.rewards is not None:
self.reward_reached_target = self.rewards[0]
self.reward_distance = self.rewards[1]
self.reward_crashed = self.rewards[2]
if self.supervisor == "logical":
self.reward_logical = self.rewards[3]
if self.supervisor == "cbf-gp-logical" or self.supervisor == "cbf-svm" or self.supervisor == "cbf-gp-svm":
self.reward_cbf = self.rewards[3]
# default reward settings
if self.rewards is None:
self.reward_reached_target = 150
self.reward_distance = 1.5
self.reward_crashed = 150
self.reward_cbf = 0.5
self.reward_logical = 100
# add extra static obstacles as boundary of the garden
self.extra_obstacles = np.array([[-2.5, -.15],
[-2.5, -.35],
[-2.5, -.55],
[-2.5, -.75],
[-2.5, -.95],
[-2.5, -1.15],
[-2.5, -1.35],
[-2.5, -1.55],
[-2.5, -1.75],
[-2.5, -1.95],
[-2.5, -2.15],
[-2.5, -2.35],
[-2.5, -2.55],
[-2.5, -2.75],
[-2.5, -2.95],
[-2.5, -3.15],
[-2.5, -3.35],
[2.55, -.25],
[2.55, -.35],
[2.55, -.55],
[2.55, -.75],
[2.55, -.95],
[2.55, -1.15],
[2.55, -1.35],
[2.55, -1.55],
[2.55, -1.75],
[2.55, -1.95],
[2.55, -2.15],
[2.55, -2.35],
[2.55, -2.55],
[2.55, -2.75],
[2.55, -2.95],
[2.55, -3.15],
[2.55, -3.35],
[-2.50, -0.15],
[-2.30, -0.15],
[-2.10, -0.15],
[-1.90, -0.15],
[-1.70, -0.15],
[-1.50, -0.15],
[-1.30, -0.15],
[-1.10, -0.15],
[-.90, -0.15],
[-.70, -0.15],
[-.5, -0.15],
[-.3, -0.15],
[-.1, -0.15],
[0.7, -0.15],
[0.9, -0.15],
[1.1, -0.15],
[1.3, -0.15],
[1.5, -0.15],
[1.7, -0.15],
[1.9, -0.15],
[2.1, -0.15],
[2.3, -0.15],
[2.5, -0.15],
[-2.40, -3.4],
[-2.20, -3.4],
[-2.00, -3.4],
[-1.90, -3.4],
[-1.70, -3.4],
[-1.50, -3.4],
[-1.30, -3.4],
[-1.10, -3.4],
[-.90, -3.4],
[-.70, -3.4],
[-.50, -3.4],
[-.3, -3.4],
[-.1, -3.4],
[0.1, -3.4],
[0.3, -3.4],
[0.5, -3.4],
[0.7, -3.4],
[0.9, -3.4],
[1.1, -3.4],
[1.3, -3.4],
[1.5, -3.4],
[1.7, -3.4],
[1.9, -3.4],
[2.1, -3.4],
[2.3, -3.4],
[2.5, -3.4],
])
# add extra obstacles as tress in the middle of the garden
self.extra_obstacles = np.concatenate((self.extra_obstacles, self.coordinates_static_obstacles))
def step(self, action):
"""
Get information for the next RL step
Parameters:
action (float, float): Action vector (speed)
Returns:
[float]: Observation vector
float: reward value
observation, reward, done, info
"""
reward = 0
# check if env needs reset
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
# pass proposed action to the CBF (if CBF is active) and return the manipulated or the proposed action
if self.supervisor == 'cbf-gp-logical' or self.supervisor == 'cbf-svm' or self.supervisor == 'cbf-gp-svm':
index = int(len(self.observation_storage) - 1)
old_observation = self.observation_storage[index]
if self.cbf_learn_online is True:
if len(self.observation_storage) > 2:
state_x = self.observation_storage[index - 1]
state_x = state_x[0:self.lidar_num_bins]
state_x = np.asarray(state_x)
action_x = self.observation_storage[index]
action_x = action_x[28:30]
action_x = np.asarray(action_x)
state_action_x = np.append(state_x, action_x)
state_action_x = state_action_x.reshape(1, -1)
state_y = self.observation_storage[index]
state_y = state_y[26]
if state_y > 0:
state_y = int(1)
else:
state_y = int(-1)
state_y = np.asarray(state_y)
state_y = state_y.reshape(1, -1)
self.svm.fit(state_action_x, state_y)
change_proposed_action = False
if self.supervisor == 'cbf-gp-svm' or self.supervisor == 'cbf-gp-logical':
successor_state_worst = predict_successor_state_gp(self.gp,
observation=old_observation[0:self.lidar_num_bins],
action=action)
if self.supervisor == 'cbf-gp-svm':
successor_state_worst = np.sort(successor_state_worst)
successor_state_worst = successor_state_worst.reshape(1, -1)
probability = self.svm.predict_proba(successor_state_worst)
unsafe_probability = probability[0, 1]
# print(unsafe_probability)
if unsafe_probability > self.unsafe_threshold:
change_proposed_action = True
if self.supervisor == 'cbf-gp-logical':
change_proposed_action = max(successor_state_worst[0]) > self.unsafe_threshold
if self.supervisor == 'cbf-svm':
# hier nur svm predicten
state_action = np.concatenate((old_observation[0:self.lidar_num_bins], action), axis=0)
state_action = state_action.reshape(1, -1)
probability = self.svm.predict_proba(state_action)
unsafe = probability[0, 1]
if unsafe > self.unsafe_threshold:
change_proposed_action = True
if change_proposed_action:
if self.supervisor == 'cbf-gp-logical' or self.supervisor == 'cbf-gp-svm':
successor_state_worst_manipulated = []
manipulated = predict_successor_state_gp(self.gp,
observation=old_observation[0:self.lidar_num_bins],
action=self.action_manipulated)
if self.supervisor == 'cbf-gp-logical':
successor_state_worst_manipulated = np.amax(manipulated, axis=1)
if self.supervisor == 'cbf-gp-svm':
manipulated = np.sort(manipulated, axis=1)
probability = self.svm.predict_proba(manipulated)
successor_state_worst_manipulated = probability[:, 1]
if not self.cbf_quadratic:
index = np.argmin(successor_state_worst_manipulated)
distance_org = np.sqrt((self.action_manipulated[index, 0] - action[0]) ** 2 +
(self.action_manipulated[index, 1] - action[1]) ** 2)
self.total_distance += distance_org
action = self.action_manipulated[index]
if self.scale_reward:
reward -= self.reward_cbf * (distance_org / 2.8)
self.total_distance += distance_org
if self.cbf_quadratic:
if int(sum(successor_state_worst_manipulated < self.unsafe_threshold)) > 0.1:
safety_actions = self.action_manipulated[successor_state_worst_manipulated < self.unsafe_threshold]
else:
safety_actions = self.action_manipulated
distance_org = np.sqrt((safety_actions[:, 0] - action[0]) ** 2 +
(safety_actions[:, 1] - action[1]) ** 2)
index = min(enumerate(distance_org), key=itemgetter(1))[0]
action = safety_actions[index]
if self.scale_reward:
reward -= self.reward_cbf * (distance_org[index] / 2.8)
self.total_distance += distance_org[index]
#if self.supervisor == 'cbf-svm':
# successor_state_unsafe_prob_manipulated = []
# for j in range(0, len(self.action_manipulated)):
# state_action = np.concatenate((old_observation[0:self.lidar_num_bins],
# self.action_manipulated[j]), axis=0)
# state_action = state_action.reshape(1, -1)
# probability = self.svm.predict_proba(state_action)
# unsafe = probability[0, 1]
# successor_state_unsafe_prob_manipulated.append(unsafe)
# index = np.argmin(successor_state_unsafe_prob_manipulated)
# action = self.action_manipulated[index]
self.last_teacher_crash = 1
self.Intervention += 1
self.Supervised = 1
if not self.scale_reward:
reward -= self.reward_cbf
# step env
observation, reward_unity, done, info = self.env.step(action)
# manipulate reward with distance to target. maximum distance is ~5.
reward -= (observation[0] / 5) * self.reward_distance
# save org ibm obs
org_observation = observation
# check if safely done
if done:
self.SafelyDone += 1
reward += self.reward_reached_target
# compute distances to obstacles
distance = distance_from_obstacles(self, observation)
# check if drone crashed
if not done:
if np.min(distance) <= .2:
self.Crashed += 1
reward -= self.reward_crashed
done = True
# check if logical supervisor is active
if not done:
if self.supervisor == 'logical':
if not self.record_svm_gp:
if np.min(distance) <= (self.safety_distance + np.random.rand(1) / 25):
self.Supervised += 1
self.Intervention += 1
done = True
reward -= self.reward_logical
# the following is used when recording data
if self.record_svm_gp:
if np.min(distance) <= 0.29 + np.random.rand(1) / 25:
self.Supervised += 1
self.Intervention += 1
done = True
reward -= self.reward_logical
else:
if np.min(distance) <= 0.35 + np.random.rand(1) / 25:
self.Supervised += 1
# append reward
self.rewards.append(reward)
# and didnt end safely
if len(self.rewards) == 120:
if not done:
done = True
self.timeOut += 1
# transform observation to lidar like observation
observation = obs_lidar_pseudo(self, observation, lidar_num_bins=self.lidar_num_bins, lidar_max_dist=None,
lidar_alias=True, lidar_exp_gain=1.0, distance=distance)
# append supervisor and save current observation to the storage
# first self.lidar_num_bins entries of the observation storage correspond to the lidar
# the next entry is the supervised indicator
# the next entry is the done indicator
# the last 23 entries correspond to the original oracle observation by IBM
observation_storage = np.append(observation, self.Supervised)
observation_storage = np.append(observation_storage, self.which_static)
observation_storage = np.append(observation_storage, self.Intervention)
observation_storage = np.append(observation_storage, self.Crashed)
observation_storage = np.append(observation_storage, done)
observation_storage = np.append(observation_storage, action)
observation_storage = np.append(observation_storage, org_observation)
self.observation_storage.append(observation_storage)
# append the observations of time step t-1 and t-2 to the observation of time step t
# when resetting the env the observations of time step t-1 and t-2 are np.zeros()
# in time step 1 the observation of time step t-2 are np.zeros()
if len(self.rewards) == 1:
observation = np.concatenate((observation, self.observation_storage[0][0:(self.lidar_num_bins + 8)]),
axis=0)
observation_dummy = np.zeros(self.lidar_num_bins + 8)
observation = np.concatenate((observation, observation_dummy), axis=0)
if len(self.rewards) > 1:
observation = np.concatenate((observation, self.observation_storage[len(self.rewards) - 2
][0:(self.lidar_num_bins + 8)]), axis=0)
observation = np.concatenate((observation, self.observation_storage[len(self.rewards) - 1
][0:(self.lidar_num_bins + 8)]), axis=0)
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
eplen = len(self.rewards)
if self.Crashed is True and self.last_teacher_crash == 1:
teacher_failed = 1
else:
teacher_failed = 0
ep_info = [round(ep_rew, 6), eplen, self.Crashed, self.Supervised, self.Intervention, self.SafelyDone,
self.timeOut, round(time.time(), 6), teacher_failed, self.total_distance]
self.episode_rewards.append(ep_rew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.t_start)
info['episode'] = ep_info
if self.safe_states: # and self.Crashed > 0:####################################################
with open(self.filename + 'states.csv', 'a', newline='') as f:
writer = csv.writer(f)
with self.csv_writer_lock:
writer.writerows(self.observation_storage)
if self.safe_info:
with open(self.filename + 'monitor.csv', 'a', newline='') as f:
writer = csv.writer(f)
with self.csv_writer_lock:
writer.writerow(ep_info)
self.TotalCollisions.append(self.Supervised)
self.Supervised = 0
self.SafelyDone = 0
self.Intervention = 0
self.Crashed = 0
self.total_distance = 0
self.timeOut = 0
self.rewards = []
self.observation_storage = []
if not done:
if self.supervisor == 'logical' and self.record_svm_gp:
self.Supervised = 0
self.last_teacher_crash = 0
self.total_steps += 1
return observation, reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
org_observation = observation
distance = distance_from_obstacles(self, observation)
while np.min(distance) < .35:
observation = self.env.reset(**kwargs)
org_observation = observation
distance = distance_from_obstacles(self, observation)
if np.min(distance) < .35:
self.needs_reset = True
if np.min(distance) >= .35:
self.needs_reset = False
self.rewards = []
observation = obs_lidar_pseudo(self, observation, lidar_num_bins=self.lidar_num_bins, lidar_max_dist=None,
lidar_alias=True, lidar_exp_gain=1.0, distance=distance)
# append supervisor and save current observation to the storage
supervised = False
observation_storage = np.append(observation, supervised)
observation_storage = np.append(observation_storage, self.which_static)
intervention = np.array([0])
observation_storage = np.append(observation_storage, intervention)
crash_dummy = np.array([0])
observation_storage = np.append(observation_storage, crash_dummy)
done = False
observation_storage = np.append(observation_storage, done)
action_dummy = np.array([0, 0])
observation_storage = np.append(observation_storage, action_dummy)
observation_storage = np.append(observation_storage, org_observation)
self.observation_storage.append(observation_storage)
observation_dummy = np.zeros(self.lidar_num_bins + 8)
observation = np.concatenate((observation, observation_dummy), axis=0)
observation = np.concatenate((observation, observation_dummy), axis=0)
return observation
|
the-stack_0_6282 |
import matplotlib
matplotlib.use("Agg")
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adagrad
from keras.utils import np_utils
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import net
from configuration import config
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
args = vars(ap.parse_args())
NUM_EPOCHS = 20
INIT_LR = 1e-2
BS = 64
trainPaths = list(paths.list_images(config.TRAIN_PATH))
totalTrain = len(trainPaths)
totalVal = len(list(paths.list_images(config.VAL_PATH)))
totalTest = len(list(paths.list_images(config.TEST_PATH)))
trainLabels = [int(p.split(os.path.sep)[-2]) for p in trainPaths]
trainLabels = np_utils.to_categorical(trainLabels)
classTotals = trainLabels.sum(axis=0)
classWeight = classTotals.max() / classTotals
trainAug = ImageDataGenerator(
rescale=1 / 255.0,
rotation_range=20,
zoom_range=0.05,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.05,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest")
valAug = ImageDataGenerator(rescale=1 / 255.0)
trainGen = trainAug.flow_from_directory(
config.TRAIN_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=True,
batch_size=BS)
valGen = valAug.flow_from_directory(
config.VAL_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=False,
batch_size=BS)
testGen = valAug.flow_from_directory(
config.TEST_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=False,
batch_size=BS)
model = CancerNet.build(width=48, height=48, depth=3,
classes=2)
opt = Adagrad(lr=INIT_LR, decay=INIT_LR / NUM_EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
H = model.fit_generator(
trainGen,
steps_per_epoch=totalTrain // BS,
validation_data=valGen,
validation_steps=totalVal // BS,
class_weight=classWeight,
epochs=NUM_EPOCHS)
print("[INFO] evaluating network...")
testGen.reset()
predIdxs = model.predict_generator(testGen,
steps=(totalTest // BS) + 1)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testGen.classes, predIdxs,
target_names=testGen.class_indices.keys()))
cm = confusion_matrix(testGen.classes, predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
model.save('cancer.model')
N = NUM_EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
|
the-stack_0_6283 | #!/usr/bin/python
"""
Wi-Fi protocol definitions
current supports for packets below
Management
-Probe Request
-Probe Response
-Beacon
Control
-RTS
-CTS
-Block Acknowledgement
Data
-QoS Data
Also have Radiotap support
http://www.radiotap.org/defined-fields
"""
import ctypes
import struct
import logging
import operator
import collections
# wlan.fc.type
_CATEGORIES_ = {0: 'management', 1: 'control', 2: 'data'}
_SUBTYPES_ = {}
# wlan.fc.type_subtype
_SUBTYPES_[0] = {
0: 'Association Request',
1: 'Association Response',
2: 'Reassociation Request',
3: 'Reassociation Response',
4: 'Probe Request',
5: 'Probe Response',
8: 'Beacon',
9: 'ATIM',
10: 'Disassociation',
11: 'Authentication',
12: 'Deauthentication',
13: 'Action',
14: 'Action No ACK'
}
_SUBTYPES_[1] = {
5: 'VHT NDP Announcement',
7: 'Control Wrapper',
8: 'BAR',
9: 'BACK',
10: 'PS-POLL',
11: 'RTS',
12: 'CTS',
13: 'ACK',
14: 'CF-end',
15: 'CF-end + CF-ack'
}
_SUBTYPES_[2] = {
0: 'Data',
1: 'Data + CF-ack',
2: 'Data + CF-poll',
3: 'Data+CF-ack+CF-poll',
4: 'Null',
5: 'CF-ack',
6: 'CF-poll',
7: 'CF-ack+CF-poll',
8: 'QoS data',
9: 'QoS data + CF-ack',
10: 'QoS data + CF-poll',
11: 'QoS data + CF-ack + CF-poll',
12: 'QoS Null',
13: 'Reserved',
14: 'Qos + CF-poll(no data)',
15: 'Qos + CF-ack(no data)'
}
# wlan_mgt.tag
MNGMT_TAGS = {
0: "TAG_SSID",
1: "TAG_SUPP_RATES",
2: "TAG_FH_PARAMETER",
3: "TAG_DS_PARAMETER",
4: "TAG_CF_PARAMETER",
5: "TAG_TIM",
6: "TAG_IBSS_PARAMETER",
7: "TAG_COUNTRY_INFO",
8: "TAG_FH_HOPPING_PARAMETER",
9: "TAG_FH_HOPPING_TABLE",
10: "TAG_REQUEST",
11: "TAG_QBSS_LOAD",
12: "TAG_EDCA_PARAM_SET",
13: "TAG_TSPEC",
14: "TAG_TCLAS",
15: "TAG_SCHEDULE",
16: "TAG_CHALLENGE_TEXT",
32: "TAG_POWER_CONSTRAINT",
33: "TAG_POWER_CAPABILITY",
34: "TAG_TPC_REQUEST",
35: "TAG_TPC_REPORT",
36: "TAG_SUPPORTED_CHANNELS",
37: "TAG_CHANNEL_SWITCH_ANN",
38: "TAG_MEASURE_REQ",
39: "TAG_MEASURE_REP",
40: "TAG_QUIET",
41: "TAG_IBSS_DFS",
42: "TAG_ERP_INFO",
43: "TAG_TS_DELAY",
44: "TAG_TCLAS_PROCESS",
45: "TAG_HT_CAPABILITY",
46: "TAG_QOS_CAPABILITY",
47: "TAG_ERP_INFO_OLD",
48: "TAG_RSN_IE",
50: "TAG_EXT_SUPP_RATES",
51: "TAG_AP_CHANNEL_REPORT",
52: "TAG_NEIGHBOR_REPORT",
53: "TAG_RCPI",
54: "TAG_MOBILITY_DOMAIN",
55: "TAG_FAST_BSS_TRANSITION",
56: "TAG_TIMEOUT_INTERVAL",
57: "TAG_RIC_DATA",
58: "TAG_DSE_REG_LOCATION",
59: "TAG_SUPPORTED_OPERATING_CLASSES",
60: "TAG_EXTENDED_CHANNEL_SWITCH_ANNOUNCEMENT",
61: "TAG_HT_INFO",
62: "TAG_SECONDARY_CHANNEL_OFFSET",
63: "TAG_BSS_AVG_ACCESS_DELAY",
64: "TAG_ANTENNA",
65: "TAG_RSNI",
66: "TAG_MEASURE_PILOT_TRANS",
67: "TAG_BSS_AVB_ADM_CAPACITY",
68: "TAG_BSS_AC_ACCESS_DELAY",
69: "TAG_TIME_ADV",
70: "TAG_RM_ENABLED_CAPABILITY",
71: "TAG_MULTIPLE_BSSID",
72: "TAG_20_40_BSS_CO_EX",
73: "TAG_20_40_BSS_INTOL_CH_REP",
74: "TAG_OVERLAP_BSS_SCAN_PAR",
75: "TAG_RIC_DESCRIPTOR",
76: "TAG_MMIE",
78: "TAG_EVENT_REQUEST",
79: "TAG_EVENT_REPORT",
80: "TAG_DIAGNOSTIC_REQUEST",
81: "TAG_DIAGNOSTIC_REPORT",
82: "TAG_LOCATION_PARAMETERS",
83: "TAG_NO_BSSID_CAPABILITY",
84: "TAG_SSID_LIST",
85: "TAG_MULTIPLE_BSSID_INDEX",
86: "TAG_FMS_DESCRIPTOR",
87: "TAG_FMS_REQUEST",
88: "TAG_FMS_RESPONSE",
89: "TAG_QOS_TRAFFIC_CAPABILITY",
90: "TAG_BSS_MAX_IDLE_PERIOD",
91: "TAG_TFS_REQUEST",
92: "TAG_TFS_RESPONSE",
93: "TAG_WNM_SLEEP_MODE",
94: "TAG_TIM_BROADCAST_REQUEST",
95: "TAG_TIM_BROADCAST_RESPONSE",
96: "TAG_COLLOCATED_INTER_REPORT",
97: "TAG_CHANNEL_USAGE",
98: "TAG_TIME_ZONE",
99: "TAG_DMS_REQUEST",
100: "TAG_DMS_RESPONSE",
101: "TAG_LINK_IDENTIFIER",
102: "TAG_WAKEUP_SCHEDULE",
104: "TAG_CHANNEL_SWITCH_TIMING",
105: "TAG_PTI_CONTROL",
106: "TAG_PU_BUFFER_STATUS",
107: "TAG_INTERWORKING",
108: "TAG_ADVERTISEMENT_PROTOCOL",
109: "TAG_EXPIDITED_BANDWIDTH_REQ",
110: "TAG_QOS_MAP_SET",
111: "TAG_ROAMING_CONSORTIUM",
112: "TAG_EMERGENCY_ALERT_ID",
113: "TAG_MESH_CONFIGURATION",
114: "TAG_MESH_ID",
115: "TAG_MESH_LINK_METRIC_REPORT",
116: "TAG_CONGESTION_NOTIFICATION",
117: "TAG_MESH_PEERING_MGMT",
118: "TAG_MESH_CHANNEL_SWITCH",
119: "TAG_MESH_AWAKE_WINDOW",
120: "TAG_BEACON_TIMING",
121: "TAG_MCCAOP_SETUP_REQUEST",
122: "TAG_MCCAOP_SETUP_REPLY",
123: "TAG_MCCAOP_ADVERTISEMENT",
124: "TAG_MCCAOP_TEARDOWN",
125: "TAG_GANN",
126: "TAG_RANN",
127: "TAG_EXTENDED_CAPABILITIES",
128: "TAG_AGERE_PROPRIETARY",
130: "TAG_MESH_PREQ",
131: "TAG_MESH_PREP",
132: "TAG_MESH_PERR",
133: "TAG_CISCO_CCX1_CKIP",
136: "TAG_CISCO_CCX2",
137: "TAG_PXU",
138: "TAG_PXUC",
139: "TAG_AUTH_MESH_PEERING_EXCH",
140: "TAG_MIC",
141: "TAG_DESTINATION_URI",
142: "TAG_U_APSD_COEX",
143: "TAG_WAKEUP_SCHEDULE_AD",
144: "TAG_EXTENDED_SCHEDULE",
145: "TAG_STA_AVAILABILITY",
146: "TAG_DMG_TSPEC",
147: "TAG_NEXT_DMG_ATI",
148: "TAG_DMG_CAPABILITIES",
149: "TAG_CISCO_CCX3",
150: "TAG_CISCO_VENDOR_SPECIFIC",
151: "TAG_DMG_OPERATION",
152: "TAG_DMG_BSS_PRAMTER_CHANGE",
153: "TAG_DMG_BEAM_REFINEMENT",
154: "TAG_CHANNEL_MEASURMENT_FB",
157: "TAG_AWAKE_WINDOW",
158: "TAG_MULTI_BAND",
159: "TAG_ADDBA_EXT",
160: "TAG_NEXTPCP_LIST",
161: "TAG_PCP_HANDOVER",
162: "TAG_DMG_LINK_MARGIN",
163: "TAG_SWITCHING_STREAM",
164: "TAG_SESSION_TRANSMISSION",
165: "TAG_DYN_TONE_PAIR_REP",
166: "TAG_CLUSTER_REP",
167: "TAG_RELAY_CAPABILITIES",
168: "TAG_RELAY_TRANSFER_PARAM",
169: "TAG_BEAMLINK_MAINTAINCE",
170: "TAG_MULTIPLE_MAC_SUBLAYERS",
171: "TAG_U_PID",
172: "TAG_DMG_LINK_ADAPTION_ACK",
173: "TAG_SYMBOL_PROPRIETARY",
174: "TAG_MCCAOP_ADVERTISEMENT_OV",
175: "TAG_QUIET_PERIOD_REQ",
177: "TAG_QUIET_PERIOD_RES",
182: "TAG_ECPAC_POLICY",
183: "TAG_CLUSTER_TIME_OFFSET",
190: "TAG_ANTENNA_SECTOR_ID",
191: "TAG_VHT_CAPABILITY",
192: "TAG_VHT_OPERATION",
193: "TAG_EXT_BSS_LOAD",
194: "TAG_WIDE_BW_CHANNEL_SWITCH",
195: "TAG_VHT_TX_PWR_ENVELOPE",
196: "TAG_CHANNEL_SWITCH_WRAPPER",
199: "TAG_OPERATING_MODE_NOTIFICATION",
221: "TAG_VENDOR_SPECIFIC_IE"
}
def WIFI(frame, no_rtap=False):
"""calls wifi packet discriminator and constructor.
:frame: ctypes.Structure
:no_rtap: Bool
:return: packet object in success
:return: int
-1 on known error
:return: int
-2 on unknown error
"""
pack = None
try:
pack = WiHelper.get_wifi_packet(frame, no_rtap)
except Exception as e:
logging.exception(e)
return pack
class WiHelper:
"""Wi-Fi packet discriminator class.
Identifies type and subtype of packet, then trigs
packet object creation.
"""
@staticmethod
def get_wifi_packet(frame, no_rtap=False):
"""Discriminates Wi-Fi packet and creates
packet object.
:frame: ctypes.Structure
:no_rtap: Bool
:return: obj
Wi-Fi packet
"""
_, packet = WiHelper._strip_rtap(frame)
frame_control = struct.unpack('BB', packet[:2])
cat = (frame_control[0] >> 2) & 0b0011
s_type = frame_control[0] >> 4
if cat not in _CATEGORIES_.keys():
logging.warning("unknown category: %d" % (cat))
return Unknown(frame, no_rtap)
if s_type not in _SUBTYPES_[cat].keys():
logging.warning("unknown subtype %d in %s category" % (s_type, _CATEGORIES_[cat]))
return Unknown(frame, no_rtap)
if cat == 0:
if s_type == 4:
return ProbeReq(frame, no_rtap)
elif s_type == 5:
return ProbeResp(frame, no_rtap)
elif s_type == 8:
return Beacon(frame, no_rtap)
else:
return Management(frame, no_rtap)
elif cat == 1:
if s_type == 11:
return RTS(frame, no_rtap)
elif s_type == 12:
return CTS(frame, no_rtap)
elif s_type == 9:
return BACK(frame, no_rtap)
else:
return Control(frame, no_rtap)
elif cat == 2:
if s_type == 8:
return QosData(frame, no_rtap, parse_amsdu=True)
else:
return Data(frame, no_rtap)
@staticmethod
def _strip_rtap(frame):
"""strip injected radiotap header.
:return: ctypes.Structure
radiotap header
:return: ctypes.Structure
actual layer 2 Wi-Fi payload
"""
rtap_len = WiHelper.__get_rtap_len(frame)
rtap = frame[:rtap_len]
packet = frame[rtap_len:]
return rtap, packet
@staticmethod
def __get_rtap_len(frame):
"""parse length of radiotap header.
:packet: ctypes.structure
:return: int
"""
r_len = struct.unpack('H', frame[2:4])
return r_len[0]
class Radiotap(ctypes.Structure):
"""Radiotap Header Parser Class.
Radiotap headers summarize physical layer parameters
of Wi-Fi packet, such as MCS(modulation and coding scheme),
NSS(number of spatial streams), BW(bandwidth) for all common
protocol types(802.11a, 802.11n, 802.11ac etc.)
see -> http://www.radiotap.org/defined-fields
see -> https://github.com/boundary/wireshark/blob/master/epan/dissectors/packet-ieee80211-radiotap-defs.h
"""
_rfields_ = [('vers', ctypes.c_uint8),
('pad', ctypes.c_uint8),
('len', ctypes.c_uint16),
('present.tsft', ctypes.c_bool),
('present.flags', ctypes.c_bool),
('present.rate', ctypes.c_bool),
('present.channel', ctypes.c_bool),
('present.fhss', ctypes.c_bool),
('present.dbm_antsignal', ctypes.c_bool),
('present.dbm_antnoise', ctypes.c_bool),
('present.lock_quality', ctypes.c_bool),
('present.tx_attenuation', ctypes.c_bool),
('present.db_tx_attenuation', ctypes.c_bool),
('present.dbm_tx_power', ctypes.c_bool),
('present.antenna', ctypes.c_bool),
('present.db_antsignal', ctypes.c_bool),
('present.db_antnoise', ctypes.c_bool),
('present.rxflags', ctypes.c_bool),
('present.txflags', ctypes.c_bool),
('present.rts_retries', ctypes.c_bool),
('present.data_retries', ctypes.c_bool),
('present.xchannel', ctypes.c_bool),
('present.mcs', ctypes.c_bool),
('present.ampdu', ctypes.c_bool),
('present.vht', ctypes.c_bool),
('present.rtap_ns', ctypes.c_bool),
('present.ven_ns', ctypes.c_bool),
('present.ext', ctypes.c_bool),
('mactime', ctypes.c_uint64),
('flags.cfp', ctypes.c_bool),
('flags.preamble', ctypes.c_bool),
('flags.wep', ctypes.c_bool),
('flags.fragmentation', ctypes.c_bool),
('flags.fcs', ctypes.c_bool),
('flags.datapad', ctypes.c_bool),
('flags.badfcs', ctypes.c_bool),
('flags.shortgi', ctypes.c_bool),
('rate', ctypes.c_uint),
('chan.freq', ctypes.c_uint),
('chan.turbo', ctypes.c_bool),
('chan.cck', ctypes.c_bool),
('chan.ofdm', ctypes.c_bool),
('chan.two_g', ctypes.c_bool),
('chan.five_g', ctypes.c_bool),
('chan.passive', ctypes.c_bool),
('chan.dynamic', ctypes.c_bool),
('chan.gfsk', ctypes.c_bool),
('chan.gsm', ctypes.c_bool),
('chan.static_turbo', ctypes.c_bool),
('chan.half_rate', ctypes.c_bool),
('chan.quarter_rate', ctypes.c_bool),
('fhss.hopset', ctypes.c_int),
('fhss.pattern', ctypes.c_uint),
('dbm_antsignal', ctypes.c_uint),
('dbm_antnoise', ctypes.c_uint),
('lock_quality', ctypes.c_uint),
('tx_attenuation', ctypes.c_uint),
('db_tx_attenuation', ctypes.c_uint),
('dbm_tx_power', ctypes.c_uint),
('antenna', ctypes.c_uint),
('db_antsignal', ctypes.c_uint),
('db_antnoise', ctypes.c_uint),
('rxflags.reserved', ctypes.c_bool),
('rxflags.badplcp', ctypes.c_bool),
('txflags', ctypes.c_uint),
('rts_retries', ctypes.c_uint),
('data_retries', ctypes.c_uint),
('xchannel.freq', ctypes.c_uint),
('xchannel.channel', ctypes.c_uint),
('xchannel.max_power', ctypes.c_uint),
('xchannel.flags.turbo', ctypes.c_bool),
('xchannel.flags.cck', ctypes.c_bool),
('xchannel.flags.ofdm', ctypes.c_bool),
('xchannel.flags.two_g', ctypes.c_bool),
('xchannel.flags.five_g', ctypes.c_bool),
('xchannel.flags.passive', ctypes.c_bool),
('xchannel.flags.dynamic', ctypes.c_bool),
('xchannel.flags.gfsk', ctypes.c_bool),
('xchannel.flags.gsm', ctypes.c_bool),
('xchannel.flags.sturbo', ctypes.c_bool),
('xchannel.flags.half', ctypes.c_bool),
('xchannel.flags.quarter', ctypes.c_bool),
('xchannel.flags.ht_20', ctypes.c_bool),
('xchannel.flags.ht_40u', ctypes.c_bool),
('xchannel.flags.ht_40d', ctypes.c_bool),
('mcs.known', ctypes.c_uint8),
('mcs.index', ctypes.c_uint8),
('mcs.have_bw', ctypes.c_bool),
('mcs.have_mcs', ctypes.c_bool),
('mcs.have_gi', ctypes.c_bool),
('mcs.have_format', ctypes.c_bool),
('mcs.have_fec', ctypes.c_bool),
('mcs.have_stbc', ctypes.c_bool),
('mcs.have_ness', ctypes.c_bool),
('mcs.ness_bit1', ctypes.c_bool),
('ampdu.refnum', ctypes.c_uint),
('ampdu.crc_val', ctypes.c_uint8),
('ampdu.reserved', ctypes.c_uint8),
('ampdu.flags.report_zerolen', ctypes.c_bool),
('ampdu.flags.is_zerolen', ctypes.c_bool),
('ampdu.flags.lastknown', ctypes.c_bool),
('ampdu.flags.last', ctypes.c_bool),
('ampdu.flags.delim_crc_error', ctypes.c_bool),
('vht.known_bits', ctypes.c_char_p),
('vht.have_stbc', ctypes.c_bool),
('vht.have_txop_ps', ctypes.c_bool),
('vht.have_gi', ctypes.c_bool),
('vht.have_sgi_nsym_da', ctypes.c_bool),
('vht.have_ldpc_extra', ctypes.c_bool),
('vht.have_beamformed', ctypes.c_bool),
('vht.have_bw', ctypes.c_bool),
('vht.have_gid', ctypes.c_bool),
('vht.have_paid', ctypes.c_bool),
('vht.flag_bits', ctypes.c_bool),
('vht.stbc', ctypes.c_bool),
('vht.txop_ps', ctypes.c_bool),
('vht.gi', ctypes.c_bool),
('vht.sgi_nysm_da', ctypes.c_bool),
('vht.ldpc_extra', ctypes.c_bool),
('vht.beamformed', ctypes.c_bool),
('vht.group_id', ctypes.c_bool),
('vht.partial_id', ctypes.c_bool),
('vht.bw', ctypes.c_uint),
('vht.user_0.nss', ctypes.c_bool),
('vht.user_0.mcs', ctypes.c_bool),
('vht.user_0.coding', ctypes.c_bool),
('vht.user_1.nss', ctypes.c_bool),
('vht.user_1.mcs', ctypes.c_bool),
('vht.user_1.coding', ctypes.c_bool),
('vht.user_2.nss', ctypes.c_bool),
('vht.user_2.mcs', ctypes.c_bool),
('vht.user_2.coding', ctypes.c_bool),
('vht.user_3.nss', ctypes.c_bool),
('vht.user_3.mcs', ctypes.c_bool),
('vht.user_3.coding', ctypes.c_bool),
('prot_type', ctypes.c_char_p)]
# Wireshark syntax conjugates of fields in object
_r_shark_ = {'radiotap.version': 'vers',
'radiotap.pad': 'pad',
'radiotap.length': 'len',
'radiotap.present.tsft': 'present.tsft',
'radiotap.present.flags': 'present.flags',
'radiotap.present.rate': 'present.rate',
'radiotap.present.channel': 'present.channel',
'radiotap.present.fhss': 'present.fhss',
'radiotap.present.dbm_antsignal': 'present.dbm_antsignal',
'radiotap.present.dbm_antnoise': 'present.dbm_antnoise',
'radiotap.present.lock_quality': 'present.lock_quality',
'radiotap.present.tx_attenuation': 'present.tx_attenuation',
'radiotap.present.db_tx_attenuation': 'present.db_tx_attenuation',
'radiotap.present.dbm_tx_power': 'present.dbm_tx_power',
'radiotap.present.antenna': 'present.antenna',
'radiotap.present.db_antsignal': 'present.db_antsignal',
'radiotap.present.db_antnoise': 'present.db_antnoise',
'radiotap.present.rxflags': 'present.rxflags',
'radiotap.present.xchannel': 'present.xchannel',
'radiotap.present.mcs': 'present.mcs',
'radiotap.present.ampdu': 'present.ampdu',
'radiotap.present.vht': 'present.vht',
'radiotap.present.rtap_ns': 'present.rtap_ns',
'radiotap.present.vendor_ns': 'present.ven_ns',
'radiotap.present.ext': 'present.ext',
'radiotap.mactime': 'mactime',
'radiotap.flags.cfp': 'flags.cfp',
'radiotap.flags.preamble': 'flags.preamble',
'radiotap.flags.wep': 'flags.wep',
'radiotap.flags.frag': 'flags.fragmentation',
'radiotap.flags.fcs': 'flags.fcs',
'radiotap.flags.datapad': 'flags.datapad',
'radiotap.flags.badfcs': 'flags.badfcs',
'radiotap.flags.shortgi': 'flags.shortgi',
'radiotap.datarate': 'rate',
'radiotap.channel.freq': 'chan.freq',
'radiotap.channel.flags.turbo': 'chan.turbo',
'radiotap.channel.flags.cck': 'chan.cck',
'radiotap.channel.flags.ofdm': 'chan.ofdm',
'radiotap.channel.flags.2ghz': 'chan.two_g',
'radiotap.channel.flags.5ghz': 'chan.five_g',
'radiotap.channel.flags.passive': 'chan.passive',
'radiotap.channel.flags.dynamic': 'chan.dynamic',
'radiotap.channel.flags.gfsk': 'chan.gfsk',
'radiotap.channel.flags.gsm': 'chan.gsm',
'radiotap.channel.flags.sturbo': 'chan.static_turbo',
'radiotap.channel.flags.half': 'chan.half_rate',
'radiotap.channel.flags.quarter': 'chan.quarter_rate',
'radiotap.fhss.hopset': 'fhss.hopset',
'radiotap.fhss.pattern': 'fhss.pattern',
'radiotap.dbm_antsignal': 'dbm_antsignal',
'radiotap.dbm_antnoise': 'dbm_antnoise',
'radiotap.antenna': 'antenna',
'radiotap.db_antsignal': 'db_antsignal',
'radiotap.db_antnoise': 'db_antnoise',
'radiotap.rxflags.badplcp': 'rxflags.badplcp',
'radiotap.xchannel.freq': 'xchannel.freq',
'radiotap.xchannel.channel': 'xchannel.channel',
'radiotap.xchannel.flags.turbo': 'xchannel.flags.turbo',
'radiotap.xchannel.flags.cck': 'xchannel.flags.cck',
'radiotap.xchannel.flags.ofdm': 'xchannel.flags.ofdm',
'radiotap.xchannel.flags.2ghz': 'xchannel.flags.two_g',
'radiotap.xchannel.flags.5ghz': 'xchannel.flags.five_g',
'radiotap.xchannel.flags.passive': 'xchannel.flags.passive',
'radiotap.xchannel.flags.dynamic': 'xchannel.flags.dynamic',
'radiotap.xchannel.flags.gfsk': 'xchannel.flags.gfsk',
'radiotap.xchannel.flags.gsm': 'xchannel.flags.gsm',
'radiotap.xchannel.flags.sturbo': 'xchannel.flags.sturbo',
'radiotap.xchannel.flags.half': 'xchannel.flags.half',
'radiotap.xchannel.flags.quarter': 'xchannel.flags.quarter',
'radiotap.xchannel.flags.ht20': 'xchannel.flags.ht_20',
'radiotap.xchannel.flags.ht40u': 'xchannel.flags.ht_40u',
'radiotap.xchannel.flags.ht40d': 'xchannel.flags.ht_40d',
'radiotap.mcs.known': 'mcs.known',
'radiotap.mcs.index': 'mcs.index',
'radiotap.mcs.have_bw': 'mcs.have_bw',
'radiotap.mcs.have_gi': 'mcs.have_gi',
'radiotap.mcs.have_format': 'mcs.have_format',
'radiotap.mcs.have_fec': 'mcs.have_fec',
'radiotap.mcs.have_stbc': 'mcs.have_stbc',
'radiotap.mcs.have_ness': 'mcs.have_ness',
'radiotap.mcs.ness_bit1': 'mcs.ness_bit1',
'radiotap.ampdu.reference': 'ampdu.refnum',
'radiotap.ampdu.crc_val': 'ampdu.crc_val',
'radiotap.ampdu.reserved': 'ampdu.reserved',
'radiotap.ampdu.flags.report_zerolen': 'ampdu.flags.report_zerolen',
'radiotap.ampdu.flags.is_zerolen': 'ampdu.flags.is_zerolen',
'radiotap.ampdu.flags.lastknown': 'ampdu.flags.lastknown',
'radiotap.ampdu.flags.last': 'ampdu.flags.last',
'radiotap.ampdu.flags.delim_crc_error': 'ampdu.flags.delim_crc_error',
'radiotap.vht.have_stbc': 'vht.have_stbc',
'radiotap.vht.have_txop_ps': 'vht.have_txop_ps',
'radiotap.vht.have_gi': 'vht.have_gi',
'radiotap.vht.have_sgi_nsym_da': 'vht.have_sgi_nsym_da',
'radiotap.vht.have_ldpc_extra': 'vht.have_ldpc_extra', # this does not seem with have_ prefix in wireshark
'radiotap.vht.have_beamformed': 'vht.have_beamformed', # creates conflict with ldpc_extra below; we keep radiotap
'radiotap.vht.have_bw': 'vht.have_bw', # syntax.
'radiotap.vht.have_gid': 'vht.have_gid',
'radiotap.vht.have_paid': 'vht.have_paid',
'radiotap.vht.stbc': 'vht.stbc',
'radiotap.vht.txop_ps': 'vht.txop_ps',
'radiotap.vht.gi': 'vht.gi',
'radiotap.vht.sgi_nysm_da': 'vht.sgi_nysm_da',
'radiotap.vht.ldpc_extra': 'vht.ldpc_extra',
'radiotap.vht.beamformed': 'vht.beamformed',
'radiotap.vht.gid': 'vht.group_id',
'radiotap.vht.paid': 'vht.partial_id',
'radiotap.vht.bw': 'vht.bw',
'radiotap.vht.nss.0': 'vht.user_0.nss',
'radiotap.vht.mcs.0': 'vht.user_0.mcs',
'radiotap.vht.coding.0': 'vht.user_0.coding',
'radiotap.vht.nss.1': 'vht.user_1.nss',
'radiotap.vht.mcs.1': 'vht.user_1.mcs',
'radiotap.vht.coding.1': 'vht.user_1.coding',
'radiotap.vht.nss.2': 'vht.user_2.nss',
'radiotap.vht.mcs.2': 'vht.user_2.mcs',
'radiotap.vht.coding.2': 'vht.user_2.coding',
'radiotap.vht.nss.3': 'vht.user_3.nss',
'radiotap.vht.mcs.3': 'vht.user_3.mcs',
'radiotap.vht.coding.3': 'vht.user_3.coding'}
def __init__(self, rtap_bytes):
"""Constructor method.
:rtap_bytes: ctypes.Structure
"""
super(Radiotap, self).__init__()
self._raw = {} # contains raw bytes, for debugging purposes
self._bits = {} # contains bitstrings, for debugging purposes
idx = 0
self._rtap = rtap_bytes
# parse radiotap headers
self.vers = Radiotap.strip_vers(self._rtap[idx:idx + 1])
idx += 1
self.pad = Radiotap.strip_pad(self._rtap[idx:idx + 1])
idx += 1
self.len = Radiotap.strip_len(self._rtap[idx:idx + 2])
idx += 2
self.present, self.present_bits = Radiotap.strip_present(self._rtap[idx:idx + 4])
idx += 4
# parse radiotap payload
if self.present.tsft: # 8 byte
idx, self.mactime = self.strip_tsft(idx)
if self.present.flags: # 1 byte
idx, self.flags = self.strip_flags(idx)
if self.present.rate: # 1 byte
idx, self.rate = self.strip_rate(idx)
if self.present.channel: # 2 byte (align 2 byte)
idx, self.chan = self.strip_chan(idx)
if self.present.fhss: # 2 byte
idx, self.fhss = self.strip_fhss(idx)
if self.present.dbm_antsignal: # 1 byte
idx, self.dbm_antsignal = self.strip_dbm_antsignal(idx)
if self.present.dbm_antnoise: # 1 byte
idx, self.dbm_antnoise = self.strip_dbm_antnoise(idx)
if self.present.lock_quality: # 2 byte (align 2 byte)
idx, self.lock_quality = self.strip_lock_quality(idx)
if self.present.tx_attenuation: # 1 byte (align 2 byte)
idx, self.tx_attenuation = self.strip_tx_attenuation(idx)
if self.present.db_tx_attenuation: # 1 byte (align 2 byte)
idx, self.db_tx_attenuation = self.strip_db_tx_attenuation(idx)
if self.present.dbm_tx_power: # 1 byte (align 1 byte)
idx, self.dbm_tx_power = self.strip_dbm_tx_power(idx)
if self.present.antenna: # 1 byte
idx, self.antenna = self.strip_antenna(idx)
if self.present.db_antsignal: # 1 byte
idx, self.db_antsignal = self.strip_db_antsignal(idx)
if self.present.db_antnoise: # 1 byte
idx, self.db_antnoise = self.strip_db_antnoise(idx)
if self.present.rxflags: # 2 byte (align 2 byte)
idx, self.rxflags = self.strip_rx_flags(idx)
if self.present.txflags: # 1 byte (align 2 byte)
idx, self.txflags = self.strip_tx_flags(idx)
if self.present.rts_retries: # 1 byte
idx, self.rts_retries = self.strip_rts_retries(idx)
if self.present.data_retries: # 1 byte
idx, self.data_retries = self.strip_data_retries(idx)
if self.present.xchannel: # 7 byte (align 2 byte)
idx, self.xchannel = self.strip_xchannel(idx)
if self.present.mcs: # 3 byte (align 1 byte)
idx, self.mcs = self.strip_mcs(idx)
if self.present.ampdu: # 8 byte (align 4 byte)
idx, self.ampdu = self.strip_ampdu(idx)
if self.present.vht: # 12 byte (align 2 byte)
idx, self.vht = self.strip_vht(idx)
self.prot_type = self.extract_protocol()
@staticmethod
def strip_vers(payload):
"""strip(1 byte) radiotap.version
:payload: ctypes.Structure
:return: int
"""
return struct.unpack('B', payload)[0]
@staticmethod
def strip_pad(payload):
"""strip(1 byte) radiotap.pad
:payload: ctypes.Structure
:return: int
"""
return struct.unpack('B', payload)[0]
@staticmethod
def strip_len(payload):
"""strip(2 byte) radiotap.length
:payload: ctypes.Structure
:return: int
"""
return struct.unpack('H', payload)[0]
@staticmethod
def strip_present(payload):
"""strip(4 byte) radiotap.present. Those are flags that
identify existence of incoming radiotap meta-data.
:idx: int
:return: str
:return: namedtuple
"""
present = collections.namedtuple(
'present', ['tsft', 'flags', 'rate', 'channel', 'fhss',
'dbm_antsignal', 'dbm_antnoise', 'lock_quality',
'tx_attenuation', 'db_tx_attenuation', 'dbm_tx_power',
'antenna', 'db_antsignal', 'db_antnoise', 'rxflags',
'txflags', 'rts_retries', 'data_retries', 'xchannel',
'mcs', 'ampdu', 'vht', 'rtap_ns', 'ven_ns', 'ext'])
val = struct.unpack('<L', payload)[0]
bits = format(val, '032b')[::-1]
present.tsft = int(bits[0]) # timer synchronization function
present.flags = int(bits[1]) # flags
present.rate = int(bits[2]) # rate
present.channel = int(bits[3]) # channel
present.fhss = int(bits[4]) # frequency hoping spread spectrum
present.dbm_antsignal = int(bits[5]) # dbm antenna signal
present.dbm_antnoise = int(bits[6]) # dbm antenna noinse
present.lock_quality = int(bits[7]) # quality of barker code lock
present.tx_attenuation = int(bits[8]) # transmitter attenuation
present.db_tx_attenuation = int(bits[9]) # decibel transmit attenuation
present.dbm_tx_power = int(bits[10]) # dbm transmit power
present.antenna = int(bits[11]) # antenna
present.db_antsignal = int(bits[12]) # db antenna signal
present.db_antnoise = int(bits[13]) # db antenna noise
present.rxflags = int(bits[14]) # receiver flags
present.txflags = int(bits[15]) # transmitter flags
present.rts_retries = int(bits[16]) # rts(request to send) retries
present.data_retries = int(bits[17]) # data retries
present.xchannel = int(bits[18]) # xchannel
present.mcs = int(bits[19]) # modulation and coding scheme
present.ampdu = int(bits[20]) # aggregated mac protocol data unit
present.vht = int(bits[21]) # very high throughput
present.rtap_ns = int(bits[29]) # radiotap namespace
present.ven_ns = int(bits[30]) # vendor namespace
present.ext = int(bits[31]) # extension
return present, bits
def strip_tsft(self, idx):
"""strip(8 byte) radiotap.mactime
:idx: int
:return: int
idx
:return: int
mactime
"""
idx = Radiotap.align(idx, 8)
mactime, = struct.unpack_from('<Q', self._rtap, idx)
return idx + 8, mactime
def strip_flags(self, idx):
"""strip(1 byte) radiotap.flags
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
flags = collections.namedtuple(
'flags', ['cfp', 'preamble', 'wep', 'fragmentation', 'fcs',
'datapad', 'badfcs', 'shortgi'])
val, = struct.unpack_from('<B', self._rtap, idx)
bits = format(val, '08b')[::-1]
flags.cfp = int(bits[0])
flags.preamble = int(bits[1])
flags.wep = int(bits[2])
flags.fragmentation = int(bits[3])
flags.fcs = int(bits[4])
flags.datapad = int(bits[5])
flags.badfcs = int(bits[6])
flags.shortgi = int(bits[7])
return idx + 1, flags
def strip_rate(self, idx):
"""strip(1 byte) radiotap.datarate
note that, unit of this field is originally 0.5 Mbps
:idx: int
:return: int
idx
:return: double
rate in terms of Mbps
"""
val, = struct.unpack_from('<B', self._rtap, idx)
rate_unit = float(1) / 2 # Mbps
return idx + 1, rate_unit * val
def strip_chan(self, idx):
"""strip(2 byte) radiotap.channel.flags
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
chan = collections.namedtuple(
'chan', ['freq', 'turbo', 'cck', 'ofdm', 'two_g', 'five_g',
'passive', 'dynamic', 'gfsk', 'gsm', 'static_turbo',
'half_rate', 'quarter_rate'])
idx = Radiotap.align(idx, 2)
freq, flags, = struct.unpack_from('<HH', self._rtap, idx)
chan.freq = freq
bits = format(flags, '016b')[::-1]
chan.turbo = int(bits[4])
chan.cck = int(bits[5])
chan.ofdm = int(bits[6])
chan.two_g = int(bits[7])
chan.five_g = int(bits[8])
chan.passive = int(bits[9])
chan.dynamic = int(bits[10])
chan.gfsk = int(bits[11])
chan.gsm = int(bits[12])
chan.static_turbo = int(bits[13])
chan.half_rate = int(bits[14])
chan.quarter_rate = int(bits[15])
return idx + 4, chan
def strip_fhss(self, idx):
"""strip (2 byte) radiotap.fhss.hopset(1 byte) and
radiotap.fhss.pattern(1 byte)
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
fhss = collections.namedtuple('fhss', ['hopset', 'pattern'])
fhss.hopset, fhss.pattern, = struct.unpack_from('<bb', self._rtap, idx)
return idx + 2, fhss
def strip_dbm_antsignal(self, idx):
"""strip(1 byte) radiotap.dbm.ant_signal
:idx: int
:return: int
idx
:return: int
"""
dbm_antsignal, = struct.unpack_from('<b', self._rtap, idx)
return idx + 1, dbm_antsignal
def strip_dbm_antnoise(self, idx):
"""strip(1 byte) radiotap.dbm_antnoise
:idx: int
:return: int
idx
:return: int
"""
dbm_antnoise, = struct.unpack_from('<b', self._rtap, idx)
return idx + 1, dbm_antnoise
def strip_lock_quality(self, idx):
"""strip(2 byte) lock quality
:idx: int
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
lock_quality, = struct.unpack_from('<H', self._rtap, idx)
return idx + 2, lock_quality
def strip_tx_attenuation(self, idx):
"""strip(1 byte) tx_attenuation
:idx: int
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
tx_attenuation, = struct.unpack_from('<H', self._rtap, idx)
return idx + 2, tx_attenuation
def strip_db_tx_attenuation(self, idx):
"""strip(1 byte) db_tx_attenuation
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
db_tx_attenuation, = struct.unpack_from('<H', self._rtap, idx)
return idx + 2, db_tx_attenuation
def strip_dbm_tx_power(self, idx):
"""strip(1 byte) dbm_tx_power
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 1)
dbm_tx_power, = struct.unpack_from('<b', self._rtap, idx)
return idx + 1, dbm_tx_power
def strip_antenna(self, idx):
"""strip(1 byte) radiotap.antenna
:return: int
idx
:return: int
"""
antenna, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, antenna
def strip_db_antsignal(self, idx):
"""strip(1 byte) radiotap.db_antsignal
:return: int
idx
:return: int
"""
db_antsignal, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, db_antsignal
def strip_db_antnoise(self, idx):
"""strip(1 byte) radiotap.db_antnoise
:return: int
idx
:return: int
"""
db_antnoise, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, db_antnoise
def strip_rx_flags(self, idx):
"""strip(2 byte) radiotap.rxflags
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
rx_flags = collections.namedtuple('rx_flags', ['reserved', 'badplcp'])
idx = Radiotap.align(idx, 2)
flags, = struct.unpack_from('<H', self._rtap, idx)
flag_bits = format(flags, '08b')[::-1]
rx_flags.reserved = int(flag_bits[0])
rx_flags.badplcp = int(flag_bits[1])
return idx + 2, rx_flags
def strip_tx_flags(self, idx):
"""strip(1 byte) tx_flags
:idx: int
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
tx_flags, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, tx_flags
def strip_rts_retries(self, idx):
"""strip(1 byte) rts_retries
:idx: int
:return: int
idx
:return: int
"""
rts_retries, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, rts_retries
def strip_data_retries(self, idx):
"""strip(1 byte) data_retries
:idx: int
:return: int
idx
:return: int
"""
data_retries, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, data_retries
def strip_xchannel(self, idx):
"""strip(7 bytes) radiotap.xchannel.channel(1 byte),
radiotap.xchannel.freq(2 bytes) and radiotap.xchannel.flags(4 bytes)
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
xchannel = collections.namedtuple(
'xchannel', ['flags', 'freq', 'channel', 'max_power'])
flags = collections.namedtuple(
'flags', ['turbo', 'cck', 'ofdm', 'two_g', 'five_g', 'passive',
'dynamic', 'gfsk', 'gsm', 'sturbo', 'hafl', 'quarter',
'ht_20', 'ht_40u', 'ht_40d'])
idx = Radiotap.align(idx, 2)
flag_val, freq, channel, max_power = struct.unpack_from('<lHBB', self._rtap, idx)
xchannel.freq = freq
xchannel.channel = channel
xchannel.max_power = max_power
bits = format(flag_val, '032b')[::-1]
flags.turbo = int(bits[4])
flags.cck = int(bits[5])
flags.ofdm = int(bits[6])
flags.two_g = int(bits[7])
flags.five_g = int(bits[8])
flags.passive = int(bits[9])
flags.dynamic = int(bits[10])
flags.gfsk = int(bits[11])
flags.gsm = int(bits[12])
flags.sturbo = int(bits[13])
flags.half = int(bits[14])
flags.quarter = int(bits[15])
flags.ht_20 = int(bits[16])
flags.ht_40u = int(bits[17])
flags.ht_40d = int(bits[18])
xchannel.flags = flags
return idx + 8, xchannel
def strip_mcs(self, idx):
"""strip(3 byte) radiotap.mcs which contains 802.11n bandwidth,
mcs(modulation and coding scheme) and stbc(space time block coding)
information.
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
mcs = collections.namedtuple(
'mcs', ['known', 'index', 'have_bw', 'have_mcs', 'have_gi',
'have_format', 'have_fec', 'have_stbc', 'have_ness',
'ness_bit1'])
idx = Radiotap.align(idx, 1)
known, flags, index = struct.unpack_from('<BBB', self._rtap, idx)
bits = format(flags, '032b')[::-1]
mcs.known = known # Known MCS information
mcs.index = index # MCS index
mcs.have_bw = int(bits[0]) # Bandwidth
mcs.have_mcs = int(bits[1]) # MCS
mcs.have_gi = int(bits[2]) # Guard Interval
mcs.have_format = int(bits[3]) # Format
mcs.have_fec = int(bits[4]) # FEC(Forward Error Correction) type
mcs.have_stbc = int(bits[5]) # Space Time Block Coding
mcs.have_ness = int(bits[6]) # Number of Extension Spatial Streams
mcs.ness_bit1 = int(bits[7]) # Number of Extension Spatial Streams bit 1
return idx + 3, mcs
def strip_ampdu(self, idx):
"""strip(8 byte) radiotap.ampdu
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
ampdu = collections.namedtuple(
'ampdu', ['reference', 'crc_val', 'reservered', 'flags'])
flags = collections.namedtuple(
'flags', ['report_zerolen', 'is_zerolen', 'lastknown', 'last',
'delim_crc_error'])
idx = Radiotap.align(idx, 4)
refnum, flag_vals, crc_val, reserved = struct.unpack_from('<LHBB', self._rtap, idx)
ampdu.flags = flags
ampdu.reference = refnum
ampdu.crc_val = crc_val
ampdu.reserved = reserved
bits = format(flag_vals, '032b')[::-1]
ampdu.flags.report_zerolen = int(bits[0])
ampdu.flags.is_zerolen = int(bits[1])
ampdu.flags.lastknown = int(bits[2])
ampdu.flags.last = int(bits[3])
ampdu.flags.delim_crc_error = int(bits[4])
return idx + 8, ampdu
def strip_vht(self, idx):
"""strip(12 byte) radiotap.vht
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
vht = collections.namedtuple(
'vht', ['known_bits', 'have_stbc', 'have_txop_ps', 'have_gi',
'have_sgi_nsym_da', 'have_ldpc_extra', 'have_beamformed',
'have_bw', 'have_gid', 'have_paid', 'stbc', 'txop_ps', 'gi',
'sgi_nysm_da', 'ldpc_extra', 'group_id', 'partial_id',
'beamformed', 'user_0', 'user_1', 'user_2', 'user_3'])
user = collections.namedtuple('user', ['nss', 'mcs', 'coding'])
idx = Radiotap.align(idx, 2)
known, flags, bw = struct.unpack_from('<HBB', self._rtap, idx)
mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3 = struct.unpack_from('<BBBB', self._rtap, idx + 4)
coding, group_id, partial_id = struct.unpack_from('<BBH', self._rtap, idx + 8)
known_bits = format(known, '032b')[::-1]
vht.known_bits = known_bits
vht.have_stbc = int(known_bits[0]) # Space Time Block Coding
vht.have_txop_ps = int(known_bits[1]) # TXOP_PS_NOT_ALLOWD
vht.have_gi = int(known_bits[2]) # Short/Long Guard Interval
vht.have_sgi_nsym_da = int(known_bits[3]) # Short Guard Interval Nsym Disambiguation
vht.have_ldpc_extra = int(known_bits[4]) # LDPC(Low Density Parity Check)
vht.have_beamformed = int(known_bits[5]) # Beamformed
vht.have_bw = int(known_bits[6]) # Bandwidth
vht.have_gid = int(known_bits[7]) # Group ID
vht.have_paid = int(known_bits[8]) # Partial AID
flag_bits = format(flags, '032b')[::-1]
vht.flag_bits = flag_bits
vht.stbc = int(flag_bits[0])
vht.txop_ps = int(flag_bits[1])
vht.gi = int(flag_bits[2])
vht.sgi_nysm_da = int(flag_bits[3])
vht.ldpc_extra = int(flag_bits[4])
vht.beamformed = int(flag_bits[5])
vht.group_id = group_id
vht.partial_id = partial_id
vht.bw = bw
vht.user_0 = user(None, None, None)
vht.user_1 = user(None, None, None)
vht.user_2 = user(None, None, None)
vht.user_3 = user(None, None, None)
for (i, mcs_nss) in enumerate([mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3]):
if mcs_nss:
nss = mcs_nss & 0xf0 >> 4
mcs = (mcs_nss & 0xf0) >> 4
coding = (coding & 2**i) >> i
if i == 0:
vht.user_0 = user(nss, mcs, coding)
elif i == 1:
vht.user_1 = user(nss, mcs, coding)
elif i == 2:
vht.user_2 = user(nss, mcs, coding)
elif i == 3:
vht.user_3 = user(nss, mcs, coding)
return idx + 12, vht
def extract_protocol(self):
"""extract 802.11 protocol from radiotap.channel.flags
:return: str
protocol name
one of below in success
[.11a, .11b, .11g, .11n, .11ac]
None in fail
"""
if self.present.mcs:
return '.11n'
if self.present.vht:
return '.11ac'
if self.present.channel and hasattr(self, 'chan'):
if self.chan.five_g:
if self.chan.ofdm:
return '.11a'
elif self.chan.two_g:
if self.chan.cck:
return '.11b'
elif self.chan.ofdm or self.chan.dynamic:
return '.11g'
return 'None'
@staticmethod
def align(val, align):
"""
:val: int
:align: int
:return: int
"""
return (val + align - 1) & ~(align - 1)
class Wifi(ctypes.Structure):
"""Base Wi-Fi Packet"""
_fields_ = [('name', ctypes.c_char_p), # name of packet
('vers', ctypes.c_ushort), # version
('category', ctypes.c_ushort), # category
('subtype', ctypes.c_ushort), # subtype
('ds', ctypes.c_char_p), # distribution system
('to_ds', ctypes.c_bool), # to distribution system -> wlan.fc.ds[0]
('from_ds', ctypes.c_bool), # from distribution system -> wlan.fc.ds[1]
('frag', ctypes.c_bool), # more flag
('retry', ctypes.c_bool), # retry
('power_mgmt', ctypes.c_bool), # power management
('order', ctypes.c_bool), # order
('wep', ctypes.c_bool), # wired equivalent privacy
('duration', ctypes.c_uint)] # duration
# Wireshark syntax conjugates of fields in object (base)
_shark_ = {'wlan.fc.version': 'vers',
'wlan.fc.type': 'category',
'wlan.fc.type_subtype': 'subtype',
'wlan.fc.ds': 'ds',
'wlan.fc.frag': 'frag',
'wlan.fc.retry': 'retry',
'wlan.fc.pwrmgt': 'power_mgmt',
'wlan.fc.wep': 'wep',
'wlan.fc.order': 'order',
'wlan.duration': 'duration'}
def __init__(self, frame, no_rtap=False):
"""Constructor method.
Parse common headers of all Wi-Fi frames.
:frame: ctypes.Structure
"""
super(Wifi, self).__init__()
self._raw = {}
if not no_rtap:
rtap_bytes, self._packet = WiHelper._strip_rtap(frame)
self.radiotap = Radiotap(rtap_bytes)
else:
self._packet = frame
self.radiotap = None
f_cntrl = struct.unpack('BB', self._packet[:2]) # frame control
flags = f_cntrl[1]
self.vers = f_cntrl[0] & 0b0011
self.category = (f_cntrl[0] >> 2) & 0b0011
self.subtype = f_cntrl[0] >> 4
flag_bits = format(flags, '08b')[::-1]
self.to_ds = int(flag_bits[0])
self.from_ds = int(flag_bits[1])
self.ds = b''.join([(flag_bits[0]).encode('ascii'),
(flag_bits[1]).encode('ascii')])
self.frag = int(flag_bits[2])
self.retry = int(flag_bits[3])
self.power_mgmt = int(flag_bits[4])
self.more_data = int(flag_bits[5])
self.wep = int(flag_bits[6])
self.order = int(flag_bits[7])
# TODO: parse duration with respect to field/subfield
# since some bits might be reserved for types like data (0x20)
# https://community.arubanetworks.com/t5/Technology-Blog/802-11-Duration-ID-Field/ba-p/235872
self.duration = struct.unpack('H', self._packet[2:4])[0] # us
self.name = None
if self.category == 0:
if self.subtype in _SUBTYPES_[0].keys():
self.name = _SUBTYPES_[0][self.subtype].encode('ascii')
elif self.category == 1:
if self.subtype in _SUBTYPES_[1].keys():
self.name = _SUBTYPES_[1][self.subtype].encode('ascii')
elif self.category == 2:
if self.subtype in _SUBTYPES_[2].keys():
self.name = _SUBTYPES_[2][self.subtype].encode('ascii')
def get_shark_field(self, fields):
"""get parameters via wireshark syntax.
out = x.get_shark_field('wlan.fc.type')
out = x.get_shark_field(['wlan.fc.type', 'wlan.seq'])
:fields: str or str[]
:return: dict
out[fields[0]] = val[0] or None
out[fields[1]] = val[1] or None ...
"""
keys, exist, out = None, {}, None
if isinstance(fields, str):
fields = [fields]
elif not isinstance(fields, list):
logging.error('invalid input type')
return None
out = dict.fromkeys(fields)
if hasattr(self, '_shark_'):
exist.update(self._shark_)
if hasattr(self, '_s_shark_'):
exist.update(self._s_shark_)
if hasattr(self.radiotap, '_r_shark_'):
exist.update(self.radiotap._r_shark_)
keys = exist.keys()
for elem in fields:
if elem in keys:
obj_field, tmp = exist[elem], None
try:
tmp = operator.attrgetter(obj_field)(self)
except AttributeError:
tmp = None
if not tmp:
try:
tmp = operator.attrgetter(obj_field)(self.radiotap)
except AttributeError:
tmp = None
out[elem] = tmp
return out
@staticmethod
def get_mac_addr(mac_addr):
"""converts bytes to mac addr format
:mac_addr: ctypes.structure
:return: str
mac addr in format
11:22:33:aa:bb:cc
"""
mac_addr = bytearray(mac_addr)
mac = b':'.join([('%02x' % o).encode('ascii') for o in mac_addr])
return mac
def get_hex_repr(self):
"""wlan.fc.type_subtype hex representation
:return: str
"""
return hex(self.category * 16 + self.subtype)
def strip_mac_addrs(self):
"""strip mac address(each 6 byte) information.
(wlan.ta, wlan.ra, wlan.sa, wlan.da)
(transmitter, receiver, source, destination)
:return: int
index of sequence control
:return: int
index after mac addresses
:return: str
source address (sa)
:return: str
transmitter address (ta)
:return: str
receiver address (ra)
:return: str
destination address (da)
:return: str
basic service sed identifier (bssid)
"""
qos_idx, seq_idx = 0, 0
sa, ta, ra, da, bssid = None, None, None, None, None
if self.to_ds == 1 and self.from_ds == 1:
(ra, ta, da) = struct.unpack('!6s6s6s', self._packet[4:22])
sa = struct.unpack('!6s', self._packet[24:30])[0]
qos_idx = 30
seq_idx = 22
elif self.to_ds == 0 and self.from_ds == 1:
(ra, ta, sa) = struct.unpack('!6s6s6s', self._packet[4:22])
qos_idx = 24
seq_idx = 22
elif self.to_ds == 1 and self.from_ds == 0:
(ra, ta, da) = struct.unpack('!6s6s6s', self._packet[4:22])
qos_idx = 24
seq_idx = 22
elif self.to_ds == 0 and self.from_ds == 0:
(ra, ta, bssid) = struct.unpack('!6s6s6s', self._packet[4:22])
qos_idx = 24
seq_idx = 22
if ta is not None:
ta = Wifi.get_mac_addr(ta)
if ra is not None:
ra = Wifi.get_mac_addr(ra)
if sa is not None:
sa = Wifi.get_mac_addr(sa)
if da is not None:
da = Wifi.get_mac_addr(da)
if bssid is not None:
bssid = Wifi.get_mac_addr(bssid)
return seq_idx, qos_idx, sa, ta, ra, da, bssid
def strip_seq_cntrl(self, idx):
"""strip(2 byte) wlan.seq(12 bit) and wlan.fram(4 bit)
number information.
:seq_cntrl: ctypes.Structure
:return: int
sequence number
:return: int
fragment number
"""
seq_cntrl = struct.unpack('H', self._packet[idx:idx + 2])[0]
seq_num = seq_cntrl >> 4
frag_num = seq_cntrl & 0x000f
return seq_num, frag_num
def __repr__(self, show_rfields=True):
"""
:show_rfields: bool
whether to show radiotap fields too.
"""
out_str = ''
all_fields = []
if hasattr(self, '_fields_'):
all_fields += self._fields_
if hasattr(self, '_sfields_'):
all_fields += self._sfields_
if all_fields:
for elem in all_fields:
key = elem[0]
try:
val = operator.attrgetter(key)(self)
except Exception:
val = None
if isinstance(val, list):
if val:
out_str += "{} <list>[{}]\n".format(key, type(val[0]))
else:
out_str += "{} <list>\n".format(str(key))
else:
out_str += "{}: {}\n".format(str(key), str(val))
else:
logging.error('instance does not have any field')
return None
if show_rfields and hasattr(self.radiotap, '_rfields_'):
for elem in self.radiotap._rfields_:
key = elem[0]
try:
val = operator.attrgetter(key)(self.radiotap)
except Exception:
val = None
if val is not None:
out_str += "radiotap.{}: {}\n".format(key, val)
return out_str
class Data(Wifi):
"""Base Data Packet (type: 2)"""
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:packet: ctypes.Structure
:no_rtap: Bool
shall parse radiotap headers
"""
Wifi.__init__(self, frame, no_rtap)
class QosData(Data):
"""Qos Data (type: 2, subtype: 8)"""
_sfields_ = [('sa', ctypes.c_char_p), # source address
('ta', ctypes.c_char_p), # transmitter address
('ra', ctypes.c_char_p), # receiver address
('da', ctypes.c_char_p), # destionation address
('seq_num', ctypes.c_uint), # sequence number
('frag_num', ctypes.c_uint), # fragment number
('qos_pri', ctypes.c_uint), # qualit of service priority
('qos_bit', ctypes.c_bool), # quality of service bit
('qos_ack', ctypes.c_uint), # quality of service ack
('amsdupresent', ctypes.c_bool), # aggregated mac service data unit
('ccmp_extiv', ctypes.c_uint64), # counter mode chiper block
('payload', list)] # payload
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.sa': 'sa',
'wlan.ta': 'ta',
'wlan.ra': 'ra',
'wlan.da': 'da',
'wlan.seq': 'seq_num',
'wlan.frag': 'frag_num',
'wlan.qos.priority': 'qos_pri',
'wlan.qos.bit4': 'qos_bit',
'wlan.qos.ack': 'qos_ack',
'wlan.qos.amsdupresent': 'amsdupresent',
'wlan.ccmp.extiv': 'ccmp_extiv'}
def __init__(self, frame, no_rtap=False, parse_amsdu=True):
"""Constructor method.
:frame: ctypes.Structure
:parse_amsdu: Bool
shall parse aggregated mac service data unit
"""
Data.__init__(self, frame, no_rtap)
idx = 0
self.sa = self.ta = self.ra = self.da = None
self.seq_num = self.frag_num = None
self.qos_pri = self.qos_bit = self.qos_ack = None
self.ccmp_extiv = None
self.payload = []
seq_idx, qos_idx, self.sa, self.ta, self.ra, self.da, _ = self.strip_mac_addrs()
self.seq_num, self.frag_num = self.strip_seq_cntrl(seq_idx)
idx = qos_idx
incr, self.qos_pri, self.qos_bit, self.qos_ack, self.amsdupresent =\
self.strip_qos_cntrl(idx, self.radiotap.prot_type)
idx += incr
if self.wep == 1:
incr, self.ccmp_extiv = self.strip_ccmp(idx)
idx += incr
if parse_amsdu:
if self.amsdupresent != 0 and self.wep == 0:
while idx < len(self._packet):
msdu, offset = self.strip_msdu(idx)
self.payload.append(msdu)
idx += offset
else:
if self.wep == 0:
msdu = {}
offset, llc = self.strip_llc(idx)
msdu['llc'] = llc
msdu['payload'] = self._packet[idx + offset:]
self.payload.append(msdu)
else:
self.payload.append({'payload': self._packet[idx:]})
def strip_qos_cntrl(self, idx, prot_type):
"""strip(2 byte) wlan.qos
:idx: int
:prot_type: string
802.11 protocol type(.11ac, .11a, .11n, etc)
:return: int
number of processed bytes
:return: int
qos priority
:return: int
qos bit
:return: int
qos acknowledgement
:return: int
amsdupresent(aggregated mac service data unit)
"""
qos_cntrl, = struct.unpack('H', self._packet[idx:idx + 2])
qos_cntrl_bits = format(qos_cntrl, '016b')[::-1]
qos_pri = qos_cntrl & 0x000f
qos_bit = int(qos_cntrl_bits[5])
qos_ack = int(qos_cntrl_bits[6:8], 2)
amsdupresent = 0
if prot_type == '.11ac':
amsdupresent = int(qos_cntrl_bits[7])
return 2, qos_pri, qos_bit, qos_ack, amsdupresent
def strip_ccmp(self, idx):
"""strip(8 byte) wlan.ccmp.extiv
CCMP Extended Initialization Vector
:return: int
number of processed bytes
:return: ctypes.raw
ccmp vector
"""
ccmp_extiv = None
if len(self._packet[idx:]) >= 8:
raw_bytes = self._packet[idx:idx + 8]
ccmp_extiv, = struct.unpack_from('Q', raw_bytes, 0)
return 8, ccmp_extiv
def strip_msdu(self, idx):
"""strip single mac servis data unit(msdu)
see -> https://mrncciew.com/2014/11/01/cwap-802-11-data-frame-aggregation/
:idx: int
:return: dict
msdu
:return: int
number of processed bytes
"""
# length of msdu payload has to be multiple of 4,
# this guaranteed with padding
padding = 0
len_payload = 0
msdu = {
'llc': {},
'wlan.da': None,
'wlan.sa': None,
'payload': None,
'length': 0
}
(da_mac, sa_mac) = struct.unpack('!6s6s', self._packet[idx:idx + 12])
msdu['wlan.da'] = Wifi.get_mac_addr(da_mac)
msdu['wlan.sa'] = Wifi.get_mac_addr(sa_mac)
idx += 12
msdu['length'] = struct.unpack('!H', self._packet[idx:idx + 2])[0]
idx += 2
offset, msdu['llc'] = self.strip_llc(idx)
idx += offset
len_payload = msdu['length'] - offset
msdu['payload'] = self._packet[idx:idx + len_payload]
padding = 4 - (len_payload % 4)
return msdu, msdu['length'] + padding + 12
def strip_llc(self, idx):
"""strip(4 or 8 byte) logical link control headers
:return: int
number of processed bytes
:return: dict
llc information
see -> http://www.wildpackets.com/resources/compendium/ethernet/frame_snap_iee8023
ABBRVS.
ssap: source service access point
dsap: destination service access point
SNAP(Subnetwork Acess Protocol)
"""
llc = {}
snap = 170
llc_dsap = struct.unpack('B', self._packet[idx:idx + 1])[0]
llc['dsap.dsap'] = llc_dsap >> 1
llc['dsap.ig'] = llc_dsap & 0b01
idx += 1
llc_ssap = struct.unpack('B', self._packet[idx:idx + 1])[0]
llc['ssap.sap'] = llc_ssap >> 1
llc['ssap.cr'] = llc_ssap & 0b01
idx += 1
if llc_dsap == snap and llc_ssap == snap:
llc_control = struct.unpack('B', self._packet[idx:idx + 1])[0]
llc['control.u_modifier_cmd'] = llc_control >> 2
llc['control.ftype'] = llc_control & 0x03
idx += 1
llc['organization_code'] = self._packet[idx:idx + 3]
idx += 3
llc['type'] = self._packet[idx:idx + 2]
return 8, llc
else:
return 4, llc
def __str__(self):
frame = "%s (sa: %s, ta: %s, ra: %s, da: %s, ds: %s, seq: %s)"
frame = frame % (self.name, self.sa, self.ta, self.ra, self.da, self.ds, self.seq_num)
return frame
class Management(Wifi):
"""Management Packet (type: 0)"""
# commonly exists in some of the subtypes
_capabilities_ = [('ess', ctypes.c_bool), # extended service set
('ibss', ctypes.c_bool), # indepent service set
('priv', ctypes.c_bool), # privacy
('short_pre', ctypes.c_bool), # short preamble
('pbcc', ctypes.c_bool), # packet binary convolutional code
('chan_agility', ctypes.c_bool), # channel agility
('spec_man', ctypes.c_bool), # spectrum management
('short_slot', ctypes.c_bool), # short slot time
('apsd', ctypes.c_bool), # automatic power save delivery
('radio_meas', ctypes.c_bool), # radio measurement
('dss_ofdm', ctypes.c_bool), # direct spread spectrum
('del_back', ctypes.c_bool), # delayed block acknowledgement
('imm_back', ctypes.c_bool)] # immediate block acknowledgement
_scapabilities_ = {'wlan_mgt.fixed.capabilities.ess': 'ess',
'wlan_mgt.fixed.capabilities.ibss': 'ibss',
'wlan_mgt.fixed.capabilities.priv': 'priv',
'wlan_mgt.fixed.capabilities.preamble': 'short_pre',
'wlan_mgt.fixed.capabilities.pbcc': 'pbcc',
'wlan_mgt.fixed.capabilities.agility': 'chan_agility',
'wlan_mgt.fixed.capabilities.spec_man': 'spec_man',
'wlan_mgt.fixed.capabilities.short_slot_time': 'short_slot',
'wlan_mgt.fixed.capabilities.apsd': 'apsd',
'wlan_mgt.fixed.capabilities.radio_measurement': 'radio_meas',
'wlan_mgt.fixed.capabilities.dss_ofdm': 'dss_ofdm',
'wlan_mgt.fixed.capabilities.del_blk_ack': 'del_back',
'wlan_mgt.fixed_capabilities.imm_blk_ack': 'imm_back'}
def __init__(self, frame, no_rtap=False):
"""Constructor Method
:frame: ctypes.Structure
:subtype: int
"""
Wifi.__init__(self, frame, no_rtap)
self.tagged_params = []
self._raw_tagged_params = None
self.timestamp = None
self.interval = None
self.fixed_capabils = None
def __str__(self):
return self.name
@staticmethod
def parse_tagged_params(raw_tagged_params):
"""strip tagged information elements wlan_mgt.tag
which has generic type-length-value structure
[type, length, value]
type(1 byte), length(1 byte), value(varies)
[wlan_mgt.tag.number, wlan_mgt.tag.length, payload]
structured fields.
:return: dict[]
list of tagged params
:return: int
0 in succ, 1 for
"""
fcs_len = 4 # wlan.fcs (4 bytes)
idx = 0
tagged_params = []
while idx < len(raw_tagged_params) - fcs_len:
tag_num, tag_len = struct.unpack('BB', raw_tagged_params[idx:idx + 2])
idx += 2
if len(raw_tagged_params) >= idx + tag_len:
param = {}
param['number'], param['length'] = tag_num, tag_len
payload = raw_tagged_params[idx:idx + tag_len]
if tag_num in MNGMT_TAGS:
param['name'] = MNGMT_TAGS[tag_num]
if MNGMT_TAGS[tag_num] == 'TAG_VENDOR_SPECIFIC_IE':
param['payload'] = Management.parse_vendor_ie(payload)
else:
param['payload'] = payload
else:
param['name'] = None
tagged_params.append(param)
idx += tag_len
else:
logging.warning('out tag length header points out of boundary')
log_msg = 'index: {p_idx}, pack_len: {p_len}'
log_msg = log_msg.format(p_idx=idx + tag_len,
p_len=len(raw_tagged_params))
logging.warning(log_msg)
return 1, tagged_params
return 0, tagged_params
@staticmethod
def get_fixed_capabils(payload):
"""strip(2 byte) wlan_mgt.fixed.capabilities
:payload: ctypes.structure
2 byte
:return: dict
None in error
"""
if len(payload) != 2:
return None
capabils = {}
fix_cap = struct.unpack('H', payload)[0]
cap_bits = format(fix_cap, '016b')[::-1]
capabils['ess'] = int(cap_bits[0]) # Extended Service Set
capabils['ibss'] = int(cap_bits[1]) # Independent Basic Service Set
capabils['priv'] = int(cap_bits[4]) # Privacy
capabils['short_preamble'] = int(cap_bits[5]) # Short Preamble
capabils['pbcc'] = int(cap_bits[6]) # Packet Binary Convolutional Code
capabils['chan_agility'] = int(cap_bits[7]) # Channel Agility
capabils['spec_man'] = int(cap_bits[8]) # Spectrum Management
capabils['short_slot'] = int(cap_bits[10]) # Short Slot Time
capabils['apsd'] = int(cap_bits[11]) # Automatic Power Save Delivery
capabils['radio_meas'] = int(cap_bits[12]) # Radio Measurement
capabils['dss_ofdm'] = int(cap_bits[13]) # Direct Spread Spectrum
capabils['del_back'] = int(cap_bits[14]) # Delayed Block Acknowledgement
capabils['imm_back'] = int(cap_bits[15]) # Immediate Block Acknowledgement
return capabils
@staticmethod
def parse_vendor_ie(payload):
"""parse vendor specific information element
oui -> organizationally unique identifier
first 3 bytes of mac addresses
see:https://www.wireshark.org/tools/oui-lookup.html
strip wlan_mgt.tag.oui(3 bytes),
wlan_mgt.tag.vendor.oui.type(1 byte)
wlan_mgt.tag.vendor.data (varies)
:payload: ctypes.structure
:return: dict
{'oui':00-11-22, 'oui_type':1, 'oui_data':ctypes.structure}
"""
output = {}
oui = struct.unpack('BBB', payload[0:3])
oui = b'-'.join([('%02x' % o).encode('ascii') for o in oui])
oui_type = struct.unpack('B', payload[3:4])[0]
oui_data = payload[4:]
output['oui'] = oui.upper()
output['oui_type'] = oui_type
output['oui_data'] = oui_data
return output
@staticmethod
def get_timestamp(payload):
"""strip wlan_mgt.fixed.timestamp(8 bytes)
:payload: ctypes.structure
:return: int
None on error
"""
if len(payload) != 8:
return None
timestamp = struct.unpack('Q', payload)[0]
return timestamp
@staticmethod
def get_interval(payload):
"""strip wlan_mgt.fixed.beacoN(2 bytes)
beacon interval
:payload: ctypes.structure
:return: int
None on error
"""
if len(payload) != 2:
return None
interval = struct.unpack('H', payload)[0]
return interval
@staticmethod
def strip_fixed_params(payload):
"""strip(12 byte) wlan_mgt.fixed.all
:payload: ctypes.structure
:return: int
timestamp
:return: int
beacon interval
:return: dict
capabilities
"""
if len(payload) != 12:
return None, None, None
idx = 0
timestamp = Management.get_timestamp(payload[idx:idx + 8])
idx += 8
interval = Management.get_interval(payload[idx:idx + 2])
idx += 2
capabils = Management.get_fixed_capabils(payload[idx:idx + 2])
return timestamp, interval, capabils
@staticmethod
def is_valid_mac_oui(mac_block):
"""checks whether mac block is in format of
00-11-22 or 00:11:22.
:return: int
"""
if len(mac_block) != 8:
return 0
if ':' in mac_block:
if len(mac_block.split(':')) != 3:
return 0
elif '-' in mac_block:
if len(mac_block.split('-')) != 3:
return 0
return 1
def set_fixed_capabils(self, capabils):
"""set keys of capabils into fields of object
:capabils: dict
"""
self.ess = capabils['ess']
self.ibss = capabils['ibss']
self.priv = capabils['priv']
self.short_preamble = capabils['short_preamble']
self.pbcc = capabils['pbcc']
self.chan_agility = capabils['chan_agility']
self.spec_man = capabils['spec_man']
self.short_slot = capabils['short_slot']
self.apsd = capabils['apsd']
self.radio_meas = capabils['radio_meas']
self.dss_ofdm = capabils['dss_ofdm']
self.del_back = capabils['del_back']
self.imm_back = capabils['imm_back']
def get_vendor_ies(self, mac_block=None, oui_type=None):
"""vendor information element querying
:mac_block: str
first 3 bytes of mac addresses in format of
00-11-22 or 00:11:22 or 001122
:oui_type: int
vendors ie type
:return: int
is valid mac_block format
-1 is unknown
:return: dict[]
list of oui information elements
-1 on error (invalid v
"""
vendor_ies = []
if mac_block is not None:
if Management.is_valid_mac_oui(mac_block):
mac_block = mac_block.upper()
if ':' in mac_block:
mac_block.replace(':', '-')
else:
logging.warning("invalid oui macblock")
return None
for elem in self.tagged_params:
tag_num = elem['number']
if MNGMT_TAGS[tag_num] == 'TAG_VENDOR_SPECIFIC_IE':
if mac_block is None:
vendor_ies.append(elem)
elif elem['payload']['oui'] == mac_block.encode('ascii'):
if oui_type is None:
vendor_ies.append(elem)
elif elem['payload']['oui_type'] == oui_type:
vendor_ies.append(elem)
return vendor_ies
class ProbeResp(Management):
"""Probe Response (type: 0, subtype: 5)"""
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('bssid', ctypes.c_char_p), # basic service set identifier
('frag_num', ctypes.c_uint), # fragment number
('seq_num', ctypes.c_uint), # sequence number
('timestamp', ctypes.c_uint64), # timestamp
('interval', ctypes.c_uint), # interval
('tagged_params', list)] # tagged parameters
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ta': 'ta',
'wlan.ra': 'ra',
'wlan.bssid': 'bssid',
'wlan.frag': 'frag_num',
'wlan.seq': 'seq_num',
'wlan_mgt.fixed.timestamp': 'timestamp',
'wlan_mgt.fixed.beacon': 'interval',
'wlan_mgt.tagged.all': 'tagged_params'}
_sfields_ += Management._capabilities_
_s_shark_.update(Management._scapabilities_)
def __init__(self, frame, no_rtap=False):
"""
"""
Management.__init__(self, frame, no_rtap)
idx = 0
self.ta = self.ra = self.bssid = None
self.seq_num = self.frag_num = None
self.timestamp = self.interval = None
# fixed capability fields
self.ess = self.ibss = None
self.privacy = None
self.priv = self.short_pre = self.pbcc = self.chan_agility = None
self.spec_man = self.short_slot = self.apsd = self.radio_meas = None
self.dss_ofdm = self.del_back = self.imm_back = None
seq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()
idx = seq_idx
self.seq_num, self.frag_num = self.strip_seq_cntrl(idx)
idx += 2
payload = self._packet[idx:idx + 12]
timestamp, interval, fixed_capabils = self.strip_fixed_params(payload)
if all([timestamp, interval, fixed_capabils]):
self.timestamp = timestamp
self.interval = interval
self.set_fixed_capabils(fixed_capabils)
idx += 12
else:
logging.error("failed to parse fixed parameters")
return
if idx < len(self._packet):
self._raw_tagged_params = self._packet[idx:]
is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)
if len(tagged_params):
self.tagged_params = tagged_params
if is_out_bound:
logging.error("tag_len header not matched with raw byte counts")
class ProbeReq(Management):
"""Probe Request (type: 0, subtype:4)"""
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('bssid', ctypes.c_char_p), # basic service set identifier
('frag_num', ctypes.c_uint), # fragment number
('seq_num', ctypes.c_uint), # sequence number
('tagged_params', list)] # tagged parameters
_s_shark_ = {'wlan.ra': 'ra',
'wlan.ta': 'ta',
'wlan.bssid': 'bssid',
'wlan.frag': 'frag_num',
'wlan.seq': 'seq_num',
'wlan_mgt.tagged.all': 'tagged_params'}
def __init__(self, frame, no_rtap=False):
"""
"""
Management.__init__(self, frame, no_rtap)
idx = 0
self.ta = self.ra = self.bssid = None
self.seq_num = self.frag_num = None
seq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()
idx = seq_idx
self.seq_num, self.frag_num = self.strip_seq_cntrl(idx)
idx += 2
if idx < len(self._packet):
self._raw_tagged_params = self._packet[idx:]
is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)
if len(tagged_params):
self.tagged_params = tagged_params
if is_out_bound:
logging.error("tag_len header not matched with raw byte counts")
class Beacon(Management):
"""Beacon (type: 0, subtype: 0)"""
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('bssid', ctypes.c_char_p), # basic service set identifier
('frag_num', ctypes.c_uint), # fragment number
('seq_num', ctypes.c_uint), # sequence number
('timestamp', ctypes.c_uint64), # timestamp
('interval', ctypes.c_uint), # interval
('tagged_params', list)] # tagged parameters
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ta': 'ta',
'wlan.ra': 'ra',
'wlan.bssid': 'bssid',
'wlan.frag': 'frag_num',
'wlan.seq': 'seq_num',
'wlan_mgt.fixed.timestamp': 'timestamp',
'wlan_mgt.fixed.beacon': 'interval',
'wlan_mgt.tagged.all': 'tagged_params'}
_sfields_ += Management._capabilities_
_s_shark_.update(Management._scapabilities_)
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Management.__init__(self, frame, no_rtap)
idx = 0
self.timestamp = self.interval = None
self.ta = self.ra = self.bssid = None
self.seq_num = self.frag_num = None
# fixed capability fields
self.ess = self.ibss = None
self.privacy = None
self.priv = self.short_preamble = self.pbcc = self.chan_agility = None
self.spec_man = self.short_slot = self.apsd = self.radio_meas = None
self.dss_ofdm = self.del_back = self.imm_back = None
seq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()
idx = seq_idx
self.seq_num, self.frag_num = self.strip_seq_cntrl(idx)
idx += 2
payload = self._packet[idx:idx + 12]
timestamp, interval, fixed_capabils = self.strip_fixed_params(payload)
if all([timestamp, interval, fixed_capabils]):
self.timestamp = timestamp
self.interval = interval
self.set_fixed_capabils(fixed_capabils)
idx += 12
else:
logging.warning("failed to parse fixed parameters")
return
if idx < len(self._packet):
self._raw_tagged_params = self._packet[idx:]
is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)
if len(tagged_params):
self.tagged_params = tagged_params
if is_out_bound:
logging.warning("tag_len header not matched with raw byte counts")
def __str__(self):
frame = "%s from %s (tstamp: %d, interval: %d)"
frame = frame % (self.name, self.bssid, self.timestamp, self.interval)
return frame
class Control(Wifi):
"""Control Frames (type: 1)"""
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Wifi.__init__(self, frame, no_rtap)
def __str__(self):
return self.name
class RTS(Control):
"""Request to Send Frame (type: 1, subtype: 1)"""
_sfields_ = [('ta', ctypes.c_char_p), # transmitter address
('ra', ctypes.c_char_p)] # receiver address
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ta': 'ta',
'wlan.ra': 'ra'}
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Control.__init__(self, frame, no_rtap)
(ra_mac, ta_mac) = struct.unpack('!6s6s', self._packet[4:16])
self.ra = Wifi.get_mac_addr(ra_mac)
self.ta = Wifi.get_mac_addr(ta_mac)
def __str__(self):
frame = '%s from %s to %s (duration: %d us)'
frame = frame % (self.name, self.ta, self.ra, self.duration)
return frame
class CTS(Control):
"""Clear to Send Frame (type: 1, subtype: 2)"""
_sfields_ = [('ra', ctypes.c_char_p)] # receiver address -> wlan.ra
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ra': 'ra'}
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Control.__init__(self, frame, no_rtap)
ra_mac = struct.unpack('!6s', self._packet[4:10])[0]
self.ra = Wifi.get_mac_addr(ra_mac)
def __str__(self):
frame = '%s to %s (duration: %d us)'
frame = frame % (self.name, self.ra, self.duration)
return frame
class BACK(Control):
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('ackpolicy', ctypes.c_bool), # acknowledgement policy
('multitid', ctypes.c_bool), # multiple traffic identifier
('ssc_frag', ctypes.c_uint), # starting sequence number fragment
('ssc_seq', ctypes.c_uint), # starting sequence number
('bitmap_str', ctypes.c_char_p), # bitmap string -> in wlan.ba.bm
('acked_seqs', list)] # acknowledged strings -> in wlan.ba.bm and wlan_mgt.fixed.ssc.sequence
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ra': 'ra',
'wlan.ta': 'ta',
'wlan.ba.control.ackpolicy': 'ackpolicy',
'wlan.ba.control.multitid': 'multitid',
'wlan_mgt.fixed.ssc.fragment': 'ssc_frag',
'wlan_mgt.ssc.sequence': 'ssc_seq'}
"""Block Acknowledgement Frame (type: 1, subtype: 9)"""
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Control.__init__(self, frame, no_rtap)
(ra_mac, ta_mac) = struct.unpack('!6s6s', self._packet[4:16])
self.ra = self.ta = None
self.ackpolicy = self.multitid = None
self.ssc_frag = self.ssc_seq = None
self.bitmap_str = None
self.acked_seqs = []
self.ra = Wifi.get_mac_addr(ra_mac)
self.ta = Wifi.get_mac_addr(ta_mac)
idx = 16
payload = self._packet[idx:idx + 2]
self.ackpolicy, self.multitid = BACK.strip_cntrl(payload)
idx += 2
payload = self._packet[idx:idx + 2]
self.ssc_seq, self.ssc_frag = BACK.strip_ssc(payload)
idx += 2
payload = self._packet[idx:idx + 8]
self.bitmap_str = BACK.strip_bitmap_str(payload)
idx += 8
self.acked_seqs = BACK.extract_acked_seqs(self.bitmap_str, self.ssc_seq)
def get_shark_field(self, fields):
"""
:fields: str[]
"""
out = super(BACK, self).get_shark_field(fields)
out.update({'acked_seqs': self.acked_seqs,
'bitmap_str': self.bitmap_str})
return out
@staticmethod
def strip_cntrl(payload):
"""strip(2 byte) wlan.ba.control
:payload: ctypes.structure
:return: int
multitid (tid: traffic indicator)
:return: int
ackpolicy
"""
cntrl = struct.unpack('H', payload)[0]
cntrl_bits = format(cntrl, '016b')[::-1]
ackpolicy = int(cntrl_bits[0])
multitid = int(cntrl_bits[1])
return ackpolicy, multitid
@staticmethod
def strip_ssc(payload):
"""strip(2 byte) wlan_mgt.fixed.ssc
:payload: ctypes.structure
:return: int
ssc_seq (starting sequence control sequence)
:return: int
ssc_frag (starting sequence control fragment number)
"""
ssc = struct.unpack('H', payload)[0]
ssc_seq = ssc >> 4
ssc_frag = ssc & 0x000f
return ssc_seq, ssc_frag
@staticmethod
def strip_bitmap_str(payload):
"""strip(8 byte) wlan.ba.bm
:payload: ctypes.structure
:return: str
bitmap
"""
bitmap = struct.unpack('BBBBBBBB', payload)
bitmap_str = ''
for elem in bitmap:
bitmap_str += format(elem, '08b')[::-1]
return bitmap_str
@staticmethod
def extract_acked_seqs(bitmap, ssc_seq):
"""extracts acknowledged sequences from bitmap and
starting sequence number.
:bitmap: str
:ssc_seq: int
:return: int[]
acknowledged sequence numbers
"""
acked_seqs = []
for idx, val in enumerate(bitmap):
if int(val) == 1:
seq = (ssc_seq + idx) % 4096
acked_seqs.append(seq)
return acked_seqs
def __str__(self):
frame = '%s from %s to %s (starting seq: %d, num_acked: %d)'
frame = frame % (self.name, self.ta, self.ra,
self.ssc_seq, len(self.acked_seqs))
return frame
class Unknown(Wifi):
"""
un-identified packet
"""
def __init__(self, frame, no_rtap):
Wifi.__init__(self, frame, no_rtap)
self.name = "Unkown"
|
the-stack_0_6285 | import json
import pandas as pd
import numpy as np
import requests
from cleanup import bubi_coredata
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
# ruft die collection id für einen collection-Eintrag ab
def get_ezb_id(collection):
# falls keine collection angegeben, ein leeres Feld zurückgeben
if collection == '':
return ''
# die URL zur ezb bilden
url = 'https://ezb.ur.de/api/collections/' + collection
# Seite abrufen
request = requests.get(url)
# wenn die Abfrage erfolgreich war (status = 200) dann die Werte auslesen
if request.status_code == 200:
# den Inhalt der Seite (=request.content) mit BeutifulSoup
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/ einlesen
parsed_html = BeautifulSoup(request.content, features="lxml")
# im p-Tag den JSON-formatierten Inhalt einlesen. Da dort manchmal Hinweise stehen, das ganze in einen try-
# catch-Block einfassen. Falls der Inhalt sich nicht als JSON einlesen lässt, wird ein leeres Feld
# zurückgegeben. Ansonsten wird das Feld "coll_id" aus dem JSON-Teil ausgelesen und zurückgegeben
try:
json_object = json.loads(parsed_html.find("p").get_text())
return json_object['coll_id']
except:
return ''
# falls der Aufruf der Seite keinen Erfolg hatte (status is nicht 200) wird ein leeres Feld zurückgegeben.
else:
return ""
def collect_data(filename):
# der Pfad zu der Originaldatei, in diesem Fall im Unterordner data/input, ausgehend von dem Ort dieser Datei
path = 'data/input/{}'.format(filename)
# Lese die Datei ein, dabei aufpassen, dass die package:collection-Spalte als Text eingelesen wird
df = pd.read_excel(path, dtype={'package:collection': str, 'zdb_id': str})
# alle "Not a number" (nan) durch leere Textfelder ersetzen
df = df.replace(np.nan, "", regex=True)
# Liste der zu schreibenden Reihen vorbereiten
extended_rows = []
# alle Spalten abarbeiten
for index, row in df.iterrows():
# für jeden hundertsten Eintrag den aktuellen Stand auf der Kommandozeile angeben.
if index % 100 == 0:
print("processing line {} of {}".format(index, len(df)))
# die ezb collection id abfragen durch Aufruf der oben definierten Funktion
ezb_collection_id = get_ezb_id(row['package:collection'])
# als neuen Wert in die Spalte "collection_id" eintragen
row['collection_id'] = ezb_collection_id
# diese Reihe der Liste der zu schreibenden Reihen anhängen
extended_rows.append(row)
# die Liste der zu schreibenden Reihen in ein Pandas-Dataframe umwandeln
output_df = pd.DataFrame(extended_rows)
# das Dataframe in eine Datei schreiben. diese heißt genauso wie die Ursprungsdatei, mit vorgehängtem "out_" und
# befindet sich im Ordner data/output relativ zu dieser Datei
output_df.to_excel('data/output/out_{}'.format(filename))
# python-Standard-Startpunkt für das Skript
if __name__ == '__main__':
# der Dateiname der zu erweiternden Datei im Ordner data/input relativ zu dieser Datei
filename = 'Grunddaten_Essen'
# obige Funktion aufrufen und Daten sammeln
bubi_coredata.transform_coredata(filename, 'E')
|
the-stack_0_6286 | from ibm_cloud_security_advisor import NotificationsApiV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
authenticator = IAMAuthenticator(
apikey='abc')
notifications_service =NotificationsApiV1(authenticator=authenticator)
notifications_service.set_service_url("https://us-south.secadvisor.cloud.ibm.com/notifications")
data = {
"name": "sdk_test_notification1",
"description": "test1 description",
"type": "Webhook",
"endpoint": "http://test.com",
"enabled": True,
"severity": [
"high",
"medium",
"low",
"critical"
],
"alert_source": [
{
"provider_name": "VA",
"finding_types": [
"ALL"
]
},
{
"provider_name": "CERT",
"finding_types": [
"ALL"
]
}
]
}
response = notifications_service.create_notification_channel(
account_id="abc",
**data
)
print(response)
|
the-stack_0_6287 | #! /usr/bin/env python
from os import environ
from insightlab import Insight, InsightObjects
TOKEN = environ.get("INSIGHT_TOKEN", "")
## Set login
i = Insight.API(TOKEN, "4")
## Load the object
my_server = i.load("IDLAB-5709")
print(f"Current hostname: {my_server.attribute_value_by_name('Hostname')}")
## Find the attribute's id
id = my_server.attribute_id_by_name("Hostname")
## Create the attribute object and add the value
attr = InsightObjects.Attributes(id)
attr.add_value("new_hostname.test.idlab.org")
## Update the attribute's value
i.update_attribute(my_server.id, attr)
# Reload the object
my_server = i.load("IDLAB-5709")
print(f"New hostname: {my_server.attribute_value_by_name('Hostname')}")
input("Press Enter to continue...")
# Reset to original
i.update_attribute(my_server.id, id, ["test.server.idlab.org"])
|
the-stack_0_6292 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint, cstr
import re
from six import string_types
from frappe.model import log_types
def set_new_name(doc):
"""
Sets the `name` property for the document based on various rules.
1. If amended doc, set suffix.
2. If `autoname` method is declared, then call it.
3. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
4. If no rule defined, use hash.
:param doc: Document to be named.
"""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname or ""
if autoname.lower() != "prompt" and not frappe.flags.in_import:
doc.name = None
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif getattr(doc.meta, "istable", False):
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
set_naming_from_document_naming_rule(doc)
if not doc.name:
doc.run_method("autoname")
if not doc.name and autoname:
set_name_from_naming_options(autoname, doc)
# if the autoname option is 'field:' and no name was derived, we need to
# notify
if not doc.name and autoname.startswith("field:"):
fieldname = autoname[6:]
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
# at this point, we fall back to name generation with the hash option
if not doc.name and autoname == "hash":
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
doc.name = make_autoname("hash", doc.doctype)
doc.name = validate_name(
doc.doctype,
doc.name,
frappe.get_meta(doc.doctype).get_field("name_case")
)
def set_name_from_naming_options(autoname, doc):
"""
Get a name based on the autoname field option
"""
_autoname = autoname.lower()
if _autoname.startswith("field:"):
doc.name = _field_autoname(autoname, doc)
elif _autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif _autoname.startswith("prompt"):
_prompt_autoname(autoname, doc)
elif _autoname.startswith("format:"):
doc.name = _format_autoname(autoname, doc)
elif "#" in autoname:
doc.name = make_autoname(autoname, doc=doc)
def set_naming_from_document_naming_rule(doc):
'''
Evaluate rules based on "Document Naming Series" doctype
'''
if doc.doctype in log_types:
return
# ignore_ddl if naming is not yet bootstrapped
for d in frappe.get_all('Document Naming Rule',
dict(document_type=doc.doctype, disabled=0), order_by='priority desc', ignore_ddl=True):
frappe.get_cached_doc('Document Naming Rule', d.name).apply(doc)
if doc.name:
break
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+".#####", "", doc)
def make_autoname(key="", doctype="", doc=""):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key == "hash":
return frappe.generate_hash(doctype, 10)
if "#" not in key:
key = key + ".#####"
elif "." not in key:
error_message = _("Invalid naming series (. missing)")
if doctype:
error_message = _("Invalid naming series (. missing) for {0}").format(doctype)
frappe.throw(error_message)
parts = key.split('.')
n = parse_naming_series(parts, doctype, doc)
return n
def parse_naming_series(parts, doctype='', doc=''):
n = ''
if isinstance(parts, string_types):
parts = parts.split('.')
series_set = False
today = now_datetime()
for e in parts:
part = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
part = getseries(n, digits)
series_set = True
elif e == 'YY':
part = today.strftime('%y')
elif e == 'MM':
part = today.strftime('%m')
elif e == 'DD':
part = today.strftime("%d")
elif e == 'YYYY':
part = today.strftime('%Y')
elif e == 'timestamp':
part = str(today)
elif e == 'FY':
part = frappe.defaults.get_user_default("fiscal_year")
elif e.startswith('{') and doc:
e = e.replace('{', '').replace('}', '')
part = doc.get(e)
elif doc and doc.get(e):
part = doc.get(e)
else:
part = e
if isinstance(part, string_types):
n += part
return n
def getseries(key, digits):
# series created ?
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` + 1 WHERE `name`=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("INSERT INTO `tabSeries` (`name`, `current`) VALUES (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name, doc=None):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
if '.' in prefix:
prefix = parse_naming_series(prefix.split('.'), doc=doc)
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` - 1 WHERE `name`=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name:
frappe.throw(_("No Name Specified for {0}").format(doctype))
if name.startswith("New "+doctype):
frappe.throw(_("There were some errors setting the name, please contact the administrator"), frappe.NameError)
if case == "Title Case":
name = name.title()
if case == "UPPER CASE":
name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name != "DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
special_characters = "<>"
if re.findall("[{0}]+".format(special_characters), name):
message = ", ".join("'{0}'".format(c) for c in special_characters)
frappe.throw(_("Name cannot contain special characters like {0}").format(message), frappe.NameError)
return name
def append_number_if_name_exists(doctype, value, fieldname="name", separator="-", filters=None):
if not filters:
filters = dict()
filters.update({fieldname: value})
exists = frappe.db.exists(doctype, filters)
regex = "^{value}{separator}\d+$".format(value=re.escape(value), separator=separator)
if exists:
last = frappe.db.sql("""SELECT `{fieldname}` FROM `tab{doctype}`
WHERE `{fieldname}` {regex_character} %s
ORDER BY length({fieldname}) DESC,
`{fieldname}` DESC LIMIT 1""".format(
doctype=doctype,
fieldname=fieldname,
regex_character=frappe.db.REGEX_CHARACTER),
regex)
if last:
count = str(cint(last[0][0].rsplit(separator, 1)[1]) + 1)
else:
count = "1"
value = "{0}{1}{2}".format(value, separator, count)
return value
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split("-")[-1]) + 1
am_prefix = "-".join(doc.amended_from.split("-")[:-1]) # except the last hyphen
doc.name = am_prefix + "-" + str(am_id)
return doc.name
def _field_autoname(autoname, doc, skip_slicing=None):
"""
Generate a name using `DocType` field. This is called when the doctype's
`autoname` field starts with 'field:'
"""
fieldname = autoname if skip_slicing else autoname[6:]
name = (cstr(doc.get(fieldname)) or "").strip()
return name
def _prompt_autoname(autoname, doc):
"""
Generate a name using Prompt option. This simply means the user will have to set the name manually.
This is called when the doctype's `autoname` field starts with 'prompt'.
"""
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via prompt"))
def _format_autoname(autoname, doc):
"""
Generate autoname by replacing all instances of braced params (fields, date params ('DD', 'MM', 'YY'), series)
Independent of remaining string or separators.
Example pattern: 'format:LOG-{MM}-{fieldname1}-{fieldname2}-{#####}'
"""
first_colon_index = autoname.find(":")
autoname_value = autoname[first_colon_index + 1:]
def get_param_value_for_match(match):
param = match.group()
# trim braces
trimmed_param = param[1:-1]
return parse_naming_series([trimmed_param], doc=doc)
# Replace braced params with their parsed value
name = re.sub(r"(\{[\w | #]+\})", get_param_value_for_match, autoname_value)
return name
|
the-stack_0_6293 | # Version 1.0; Erik Husby; Polar Geospatial Center, University of Minnesota; 2017
from __future__ import division
import os
import numbers
from operator import itemgetter
import gdal, ogr, osgeo, osr
import numpy as np
PROJREF_POLAR_STEREO = """PROJCS["unnamed",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",-70],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]]]"""
RASTER_DEFAULT_PROJREF = PROJREF_POLAR_STEREO
gdal.UseExceptions()
class RasterIOError(Exception):
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return repr(self.msg)
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Raster:
"""
*** NOTE THAT ONLY 'NORTH-UP' GEOTIFF IMAGES ARE FULLY SUPPORTED AT THIS TIME ***
Contains methods to extract pixel data, geotransform, projection, corner coordinates, geometry,
and other associated information from a raster image, built on the framework provided by GDAL's
GDALDataset class.
Additionally, 'smart' getter and setter methods are provided for all data members listed in the
class initialization (data members are referred to as a raster's 'parameters' hereafter) that
make it possible to store and modify the values of useful parameters while maintaining a
self-consistent dataset.
A Raster instance starts with all parameters set to None, except for those whose names are
provided to the initialization call as additional arguments beyond the first.
As for the first argument, if it is a path to a valid raster file (or equivalently, if it is an
osgeo.gdal.Dataset object), all values of those parameters which are to be set will be extracted
directly from the provided raster dataset. If 'ds' is not included in that list (or equivalently,
'all' and 'no-ds' are included), the class will not keep a reference to the raster dataset in
its 'ds' parameter after initialization is complete.
If the first argument is instead None, those parameters which are to be set will be set to their
respective default values (as retrieved from the getter methods mentioned later).
After initialization, setting individual parameters should be done via the Raster.set_param()
method. Since changing the value of one parameter of a raster image (such as the 'x' array of
horizontal grid coordinates) may affect the (x-)size of the image in pixel SHAPE, the RESOLUTION
of the image pixels (in the x-direction, dx), and the geographic EXTENT of the image (in its
x-coordinates), to maintain a self-consistent dataset any modifications should be propagated to
those parameters that are based on the same core values of SHAPE, RESOLUTION, or EXTENT.
This is done by default, but may be turned off by passing the 'prop' keyword argument as False.
Core values for each property -- SHAPE ('shape'), RESOLUTION ('dx', 'dy', 'res', the dx and dy
parts of 'geo_trans'), EXTENT (the xmin and ymax parts of 'geo_trans') -- may be set (remember,
every parameter is initialized to None unless specifically set) automatically by passing the
'set_core' keyword argument as True when using Raster.set_param() to set a higher-level
(non-core) parameter.
Furthermore...
When setting a parameter that directly sets a value(s) in only one of the three Raster property
domains SHAPE, RESOLUTION, or EXTENT, it must be determined which of the other two properties
will be held constant (or as close as possible to constant in the case of changing SHAPE/EXTENT
when RESOLUTION is held constant). By default, EXTENT is preserved when setting SHAPE/RESOLUTION
and RESOLUTION is preserved when setting EXTENT. This behavior may be changed when setting any
applicable parameter by passing the 'hold' keyword argument as the name of the property you wish
to preserve ('shape', 'res', or 'extent').
Setting a parameter with Raster.set_param() in 'default mode' (by passing None as the 'value'
argument with the 'set_default' keyword argument set to True) will attempt to use the values of
other already-set parameters to determine a value for the new parameter. This is done to try to
keep the Raster in a self-consistent state. Getter methods for each parameter work to accomplish
this task, and may be used by themselves to extract wanted information from the Raster without
setting any unneeded parameters.
NOTE: These getter methods will give no warning if there are inconsistencies among the parameter
values, and should be used at the risk of the programmer.
Since no copying is done when setting parameters to values that are mutable objects, multiple
references may exist in a program that point to the value of a Raster parameter and one must be
careful. However, since it is highly beneficial to be able to make direct modifications to such
items (without copying, modifying, and passing the result into Raster.set_param() over and over),
calling Raster.prop_param() after making direct modifications to the value of a parameter will
essentially propagate those changes to other parameters in the Raster by forcing the getter
methods to ignore the modified parameter when looking for values that should be held constant
through the propagation.
At this time, changes are not propagated through to the pixel data parameter 'z' after z is set.
"""
def __init__(self, rasterFile_or_ds=None, *set_params):
self.ds = None
self.shape = None
self.z = None
self.x = None
self.y = None
self.dx = None
self.dy = None
self.res = None
self.geo_trans = None
self.corner_coords = None
self.proj_ref = None
self.spat_ref = None
self.geom = None
set_params_unique = list(set(set_params))
if 'all' in set_params_unique:
set_params_unique = ['ds', 'shape', 'z', 'x', 'y', 'dx', 'dy', 'res',
'geo_trans', 'corner_coords', 'proj_ref', 'spat_ref', 'geom']
if 'no-ds' in set_params:
if 'ds' in set_params_unique:
set_params_unique.remove('ds')
if 'no-ds' in set_params_unique:
set_params_unique.remove('no-ds')
if rasterFile_or_ds is not None:
self.set_param('ds', self.open_ds(rasterFile_or_ds))
if set_params_unique:
self.extract_and_set(*set_params_unique)
if 'ds' not in set_params_unique:
self.ds = None
elif set_params_unique:
if 'ds' in set_params_unique:
raise InvalidArgumentError("`ds` parameter cannot be set when `rasterFile_or_ds`"
" argument is None")
self.set_params(*set_params_unique)
@staticmethod
def open_ds(rasterFile_or_ds):
ds = None
if isinstance(rasterFile_or_ds, str):
if not os.path.isfile(rasterFile_or_ds):
raise RasterIOError("No such `rasterFile`: '{}'".format(rasterFile_or_ds))
ds = gdal.Open(rasterFile_or_ds, gdal.GA_ReadOnly)
elif type(rasterFile_or_ds) == osgeo.gdal.Dataset:
ds = rasterFile_or_ds
else:
raise InvalidArgumentError("Invalid input type for `rasterFile_or_ds`: {}".format(
type(rasterFile_or_ds)))
return ds
def extract_z(self):
return self.ds.GetRasterBand(1).ReadAsArray() if self.ds is not None else None
def extract_shape(self):
return (self.ds.RasterYSize, self.ds.RasterXSize) if self.ds is not None else None
def extract_geo_trans(self):
return np.array(self.ds.GetGeoTransform()) if self.ds is not None else None
def extract_proj_ref(self):
return self.ds.GetProjectionRef() if self.ds is not None else None
def wkt(self, corner_coords=None):
if corner_coords is None:
corner_coords = self.get_corner_coords()
return 'POLYGON (({}))'.format(
','.join([" ".join([str(c) for c in cc]) for cc in corner_coords])
)
def wkt_to_coords(self, wkt):
eval_str = 'np.array({})'.format(
wkt.replace('POLYGON ','').replace('(','[').replace(')',']').replace(',','],[').replace(' ',',')
)
return eval(eval_str)
def extract_param(self, pname):
if self.ds is None:
raise RasterIOError("Raster must have a raster dataset reference in its 'ds'"
" data member before parameters may be extracted")
pname = pname.lower()
value = None
if pname in ('shape', 'x', 'y', 'corner_coords'):
shape = self.extract_shape()
if pname in ('x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords'):
geo_trans = self.extract_geo_trans()
if pname in ('proj_ref', 'spat_ref'):
proj_ref = self.extract_proj_ref()
if pname == 'ds':
value = self.ds
elif pname == 'shape':
value = shape
elif pname == 'z':
value = self.extract_z()
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = self.get_corner_coords(geo_trans, shape)
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = osr.SpatialReference(proj_ref) if proj_ref is not None else None
elif pname == 'geom':
value = ogr.Geometry(wkt=self.wkt(self.extract_param('corner_coords')))
elif pname == 'geom_sr':
value = self.extract_param('geom')
spat_ref = self.extract_param('spat_ref')
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
print("WARNING: Spatial reference could not be extracted from raster dataset,"
" so extracted geometry has not been assigned a spatial reference.")
else:
raise InvalidArgumentError("Invalid parameter for extraction: {}".format(pname))
return value
def extract_params(self, *params):
if self.ds is None:
raise RasterIOError("Raster must have a raster dataset reference in its 'ds'"
" data member before parameters may be extracted")
pset = set(params)
valid_pnames = vars(self).keys()
valid_pnames.append('geom_sr')
invalid_pnames = pset.difference(set(valid_pnames))
if invalid_pnames:
raise InvalidArgumentError("Invalid parameter(s) for extraction: {}".format(invalid_pnames))
if pset.intersection({'shape', 'x', 'y', 'corner_coords', 'geom', 'geom_sr'}):
shape = self.extract_shape()
if pset.intersection({'x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords', 'geom', 'geom_sr'}):
geo_trans = self.extract_geo_trans()
if pset.intersection({'proj_ref', 'spat_ref', 'geom_sr'}):
proj_ref = self.extract_proj_ref()
if pset.intersection({'corner_coords', 'geom', 'geom_sr'}):
corner_coords = self.get_corner_coords(geo_trans, shape)
if pset.intersection({'spat_ref', 'geom_sr'}):
spat_ref = osr.SpatialReference(proj_ref) if proj_ref is not None else None
if pset.intersection({'geom', 'geom_sr'}):
geom = ogr.Geometry(wkt=self.wkt(corner_coords))
value_list = []
for pname in params:
pname = pname.lower()
value = None
if pname == 'ds':
value = self.ds
elif pname == 'shape':
value = shape
elif pname == 'z':
value = self.extract_z()
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = corner_coords
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = spat_ref
elif pname == 'geom':
value = geom
elif pname == 'geom_sr':
value = geom.Clone() if 'geom' in params else geom
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
print("WARNING: Spatial reference could not be extracted from raster dataset,"
" so extracted geometry has not been assigned a spatial reference.")
value_list.append(value)
return value_list
def set_params(self, *params):
set_core = False
params_copy = None
if 'all' in params:
params_copy = ('z', 'x', 'y', 'corner_coords', 'spat_ref', 'geom')
set_core = True
params_copy = tuple(set(params_copy))
for p in params_copy:
self.set_param(p, set_core=set_core)
def set_params_and_values(self, *pname_value):
pnames = list(pname_value[0::2])
values = pname_value[1::2]
if len(pnames) != len(values):
raise InvalidArgumentError("Unequal number of parameter names and parameter values")
valid_parameters = vars(self).keys()
for i in range(len(pnames)):
p = pnames[i]
if isinstance(p, str):
if p in valid_parameters:
continue
elif p == 'geom_sr':
pnames[i] = 'geom'
continue
raise InvalidArgumentError("Starting with the first argument, every other argument "
"must be a valid string name of a Raster parameter")
for i in range(len(pnames)):
exec('self.{} = values[{}]'.format(pnames[i], i))
def extract_and_set(self, *params):
self.set_params_and_values(*[a for b in zip(params, self.extract_params(*params)) for a in b])
def clear_params(self):
params = vars(self).keys()
params.remove('ds')
for p in params:
exec('self.{} = None'.format(p))
def get_shape(self, caller_function=None):
if self.shape is not None:
return self.shape
elif self.z is not None:
return self.z.shape
elif caller_function == 'get_res':
return None
xsize, ysize = None, None
if self.x is not None:
xsize = len(self.x)
if self.y is not None:
ysize = len(self.y)
if (xsize is None or ysize is None) and self.corner_coords is not None:
if xsize is None:
dx = self.get_res('dx', 'get_shape')
if not np.isnan(dx):
cc_x = self.corner_coords[:, 0]
if cc_x[2] is not None and cc_x[0] is not None:
xsize = (cc_x[2] - cc_x[0]) / dx
if ysize is None:
dy = self.get_res('dy', 'get_shape')
if not np.isnan(dy):
cc_y = self.corner_coords[:, 1]
if cc_y[2] is not None and cc_y[0] is not None:
ysize = -(cc_y[2] - cc_y[0]) / dy
if xsize is None:
xsize = 0
if ysize is None:
ysize = 0
return ysize, xsize
def get_res(self, param='res', caller_function=None):
if param not in ('dx', 'dy', 'res'):
raise InvalidArgumentError("Invalid `param` argument: {}".format(param))
value = eval('self.{}'.format(param))
if value is not None:
return value
if param in ('dx', 'dy'):
if self.res is not None and not np.isnan(self.res):
value = self.res
elif param == 'dx':
if self.geo_trans is not None:
value = self.geo_trans[1]
elif self.corner_coords is not None and caller_function != 'get_shape':
cc_x = self.corner_coords[:, 0]
shape = self.get_shape('get_res')
if shape is not None:
xsize = shape[1]
value = np.nan if xsize == 0 else (cc_x[2] - cc_x[0]) / xsize
elif self.x is not None:
value = (self.x[1] - self.x[0]) if len(self.x) > 1 else np.nan
elif param == 'dy':
if self.geo_trans is not None:
value = -self.geo_trans[5]
elif self.corner_coords is not None and caller_function != 'get_shape':
cc_y = self.corner_coords[:, 1]
shape = self.get_shape('get_res')
if shape is not None:
ysize = shape[0]
value = np.nan if ysize == 0 else -(cc_y[2] - cc_y[0]) / ysize
elif self.y is not None:
value = (self.y[0] - self.y[1]) if len(self.y) > 1 else np.nan
elif param == 'res':
dx = self.get_res('dx')
dy = self.get_res('dy')
value = dx if dx == dy else np.nan
if value is None:
value = np.nan
return value
def get_xmin_ymax(self):
xmin, ymax = None, None
if self.geo_trans is not None:
xmin, ymax = itemgetter(0, 3)(self.geo_trans)
elif self.corner_coords is not None:
xmin, ymax = self.corner_coords[0]
else:
if self.geom is not None:
corner_coords = self.wkt_to_coords(self.geom.ExportToWkt())
if corner_coords.shape[0] == 5:
xmin, ymax = corner_coords[0]
if xmin is None or ymax is None:
xmin = self.x[0] if (self.x is not None and len(self.x) > 0) else np.nan
ymax = self.y[0] if (self.y is not None and len(self.y) > 0) else np.nan
return np.array([xmin, ymax])
def get_xmax_ymin(self):
xmax, ymin = None, None
if self.corner_coords is not None:
xmax, ymin = self.corner_coords[2]
else:
if self.geom is not None:
corner_coords = self.wkt_to_coords(self.geom.ExportToWkt())
if corner_coords.shape[0] == 5:
xmax, ymin = corner_coords[2]
if xmax is None or ymin is None:
dx = self.get_res('dx')
dy = self.get_res('dy')
xmax = (self.x[-1] + dx) if (self.x is not None and len(self.x) > 0) else np.nan
ymin = (self.y[-1] - dy) if (self.y is not None and len(self.y) > 0) else np.nan
if np.isnan(xmax) or np.isnan(ymin):
xmin, ymax = self.get_xmin_ymax()
ysize, xsize = self.get_shape()
if np.isnan(xmax):
xmax = xmin + xsize*dx
if np.isnan(ymin):
ymin = ymax - ysize*dy
return np.array([xmax, ymin])
def get_x(self, xmin=None, xsize=None, dx=None):
if self.x is not None \
and (xmin is None and xsize is None and dx is None):
return self.x
else:
if xmin is None:
xmin = self.get_xmin_ymax()[0]
if xsize is None:
xsize = self.get_shape()[1]
if dx is None:
dx = self.get_res('dx')
x = xmin + np.arange(xsize)*dx
if xsize > 0:
x[0] = xmin
return x
def get_y(self, ymax=None, ysize=None, dy=None):
if self.y is not None \
and (ymax is None and ysize is None and dy is None):
return self.y
else:
if ymax is None:
ymax = self.get_xmin_ymax()[1]
if ysize is None:
ysize = self.get_shape()[0]
if dy is None:
dy = self.get_res('dy')
y = ymax - np.arange(ysize)*dy
if ysize > 0:
y[0] = ymax
return y
def get_geo_trans(self):
if self.geo_trans is not None:
return self.geo_trans
else:
xmin, ymax = self.get_xmin_ymax()
dx = self.get_res('dx')
dy = self.get_res('dy')
rot1, rot2 = 0, 0
geo_trans = np.array([
xmin,
dx,
rot1,
ymax,
rot2,
-dy
]).astype(float)
return geo_trans
def get_corner_coords(self, geo_trans=None, shape=None):
if geo_trans is None and self.corner_coords is not None:
return self.corner_coords
else:
if geo_trans is None and self.geom is not None:
corner_coords = self.wkt_to_coords(self.geom.ExportToWkt())
if corner_coords.shape[0] == 5:
return corner_coords
gt = geo_trans if geo_trans is not None else self.geo_trans
if gt is not None and (geo_trans is not None or (gt[2] != 0 or gt[4] != 0)):
top_left_x = np.full((5, 1), gt[0])
top_left_y = np.full((5, 1), gt[3])
top_left_mat = np.concatenate((top_left_x, top_left_y), axis=1)
ysize, xsize = shape if shape is not None else self.get_shape()
raster_XY_size_mat = np.array([
[0, 0],
[xsize, 0],
[xsize, ysize],
[0, ysize],
[0, 0]
])
gt_mat = np.array([
[gt[1], gt[4]],
[gt[2], gt[5]]
])
return top_left_mat + np.dot(raster_XY_size_mat, gt_mat)
else:
xmin, ymax = self.get_xmin_ymax()
xmax, ymin = self.get_xmax_ymin()
corner_coords = np.array([
[xmin, ymax],
[xmax, ymax],
[xmax, ymin],
[xmin, ymin],
[xmin, ymax]
])
return corner_coords
def get_proj_ref(self):
if self.proj_ref is not None:
return self.proj_ref
else:
proj_ref = None
spat_ref = self.spat_ref
if spat_ref is None and self.geom is not None:
spat_ref = self.geom.GetSpatialReference()
if spat_ref is not None:
proj_ref = spat_ref.ExportToWkt()
return proj_ref
def get_spat_ref(self):
if self.spat_ref is not None:
return self.spat_ref
else:
spat_ref = None
if self.proj_ref is not None:
spat_ref = osr.SpatialReference(self.proj_ref)
elif self.geom is not None:
spat_ref = self.geom.GetSpatialReference()
return spat_ref
def get_geom(self):
if self.geom is not None:
return self.geom
else:
geom_cc = self.get_corner_coords()
if np.any(np.isnan(geom_cc)):
geom_cc = np.array([[0, 0]])
geom = ogr.Geometry(wkt=self.wkt(geom_cc))
spat_ref = self.spat_ref
if spat_ref is None and self.proj_ref is not None:
spat_ref = osr.SpatialReference(self.proj_ref)
if spat_ref is not None:
geom.AssignSpatialReference(spat_ref)
return geom
def set_shape(self, shape, hold, set_core=True):
if type(shape) not in (tuple, list) or len(shape) != 2 \
or False in [(type(n) in (int, long) and n >= 0) for n in shape]:
raise InvalidArgumentError("`shape` must be a numeric tuple or list of length 2")
if hold != 'off':
new_ysize, new_xsize = shape
xmin, ymax = self.get_xmin_ymax()
dx = None
dy = None
if hold == 'res':
dx = self.get_res('dx')
dy = self.get_res('dy')
self.set_extent((xmin, ymax), (xmin + new_xsize*dx, ymax - new_ysize*dy), 'off', False)
elif hold == 'extent':
xmax, ymin = self.get_xmax_ymin()
new_dx = (xmax-xmin)/new_xsize
new_dy = (ymax-ymin)/new_ysize
self.set_res('dx', new_dx, 'off', False)
self.set_res('dy', new_dy, 'off', False)
dx, dy = new_dx, new_dy
else:
raise InvalidArgumentError("Invalid `hold` argument: {}".format(hold))
if self.x is not None and new_xsize != len(self.x):
self.set_param('x', self.get_x(xmin, new_xsize, dx), False)
if self.y is not None and new_ysize != len(self.y):
self.set_param('y', self.get_y(ymax, new_ysize, dy), False)
if self.shape is not None or set_core:
self.shape = shape
def set_res(self, pname, res, hold, set_core=True, skip_gt=False):
if pname not in ('dx', 'dy', 'res'):
raise InvalidArgumentError("Invalid `pname` argument: {}".format(pname))
if not isinstance(res, numbers.Number) or res < 0 or res == float('inf'):
raise InvalidArgumentError("{} must be a positive, finite number".format(pname))
new_dx = res if pname in ('dx', 'res') else self.get_res('dx')
new_dy = res if pname in ('dy', 'res') else self.get_res('dy')
if hold != 'off':
xmin, ymax = self.get_xmin_ymax()
ysize, xsize = None, None
if hold == 'shape':
ysize, xsize = self.get_shape()
self.set_extent((xmin, ymax), (xmin + xsize*new_dx, ymax - ysize*new_dy), 'off', False)
elif hold == 'extent':
xmax, ymin = self.get_xmax_ymin()
new_xsize = (xmax-xmin)/new_dx
new_ysize = (ymax-ymin)/new_dy
new_xsize = int(new_xsize) if not np.isnan(new_xsize) else 0
new_ysize = int(new_ysize) if not np.isnan(new_ysize) else 0
self.set_shape((new_ysize, new_xsize), 'off', False)
self.set_extent((xmin, ymax), (xmin + new_xsize*new_dx, ymax - new_ysize*new_dy), 'off', False)
ysize, xsize = new_ysize, new_xsize
else:
raise InvalidArgumentError("Invalid `hold` argument: {}".format(hold))
if self.x is not None and len(self.x) > 1 and new_dx != (self.x[1]-self.x[0]):
self.set_param('x', self.get_x(xmin, xsize, new_dx), False)
if self.y is not None and len(self.y) > 1 and new_dy != (self.y[0]-self.y[1]):
self.set_param('y', self.get_y(ymax, ysize, new_dy), False)
if not skip_gt and (self.geo_trans is not None or set_core):
if self.geo_trans is None:
self.set_param('geo_trans')
new_geo_trans = np.array([
self.geo_trans[0],
new_dx,
self.geo_trans[2],
self.geo_trans[3],
self.geo_trans[4],
-new_dy
])
self.set_param('geo_trans', new_geo_trans, False)
if eval('self.{}'.format(pname)) is not None or set_core:
exec('self.{} = res'.format(pname))
if pname == 'res':
if self.dx is not None or set_core:
self.dx = res
if self.dy is not None or set_core:
self.dy = res
elif self.res is not None or set_core:
if self.dx == self.dy and self.dx is not None:
self.res = self.dx
else:
self.res = np.nan
def set_extent(self, xmin_ymax, xmax_ymin, hold, set_core=True,
skip_gt=False, skip_cc=False, skip_geom=False):
if hold in ('off', 'shape', 'res'):
pass
elif hold is None and xmax_ymin is None:
pass
else:
raise InvalidArgumentError("Invalid `hold` argument: {}".format(hold))
arg_check = [np.array(xmin_ymax)]
if xmax_ymin is None:
# Translation will be performed.
hold = None
else:
arg_check.append(np.array(xmax_ymin))
if True in [(p.ndim != 1 or len(p) != 2 or not np.issubdtype(p.dtype, np.number))
for p in arg_check]:
raise InvalidArgumentError("`xmin_ymax`, `xmax_ymin` must be convertible into a "
"numeric numpy.ndarray with ndim=1 and length 2")
new_xmin, new_ymax = xmin_ymax
new_xmax, new_ymin = None, None
if xmax_ymin is not None:
new_xmax, new_ymin = xmax_ymin
else:
ysize, xsize = self.get_shape()
new_xmax = new_xmin + xsize*self.get_res('dx')
new_ymin = new_ymax - ysize*self.get_res('dy')
littleX = True if (self.x is not None and len(self.x) < 2) else False
littleY = True if (self.y is not None and len(self.y) < 2) else False
if hold != 'off':
ysize, xsize = None, None
dx = None
dy = None
if hold == 'shape':
ysize, xsize = self.get_shape()
new_dx = (new_xmax-new_xmin)/xsize
new_dy = (new_ymax-new_ymin)/ysize
self.set_res('dx', new_dx, 'off', False)
self.set_res('dy', new_dy, 'off', False)
dx, dy = new_dx, new_dy
elif hold == 'res':
dx = self.get_res('dx')
dy = self.get_res('dy')
new_xsize = (new_xmax-new_xmin)/dx
new_ysize = (new_ymax-new_ymin)/dy
new_xsize = int(new_xsize) if not np.isnan(new_xsize) else 0
new_ysize = int(new_ysize) if not np.isnan(new_ysize) else 0
self.set_shape((new_ysize, new_xsize), 'off', False)
new_xmax = new_xmin + new_xsize*dx
new_ymin = new_ymax - new_ysize*dy
ysize, xsize = new_ysize, new_xsize
if hold is None:
# Perform translation.
if xmax_ymin is None:
if not littleX and self.x is not None and new_xmin != self.x[0]:
self.set_param('x', self.x + (new_xmin - self.x[0]), False)
self.x[0] = new_xmin
if not littleY and self.y is not None and new_ymax != self.y[0]:
self.set_param('y', self.y + (new_ymax - self.y[0]), False)
self.y[0] = new_ymax
else:
if not littleX and self.x is not None \
and (new_xmin != self.x[0] or new_xmax != (self.x[-1] + (self.x[1] - self.x[0]))):
self.set_param('x', self.get_x(new_xmin, xsize, dx), False)
if not littleY and self.y is not None \
and (new_ymax != self.y[0] or new_ymin != (self.y[-1] - (self.y[0] - self.y[1]))):
self.set_param('y', new_ymax - np.arange(ysize)*dy, False)
if littleX and len(self.x) == 1:
self.set_param('x', self.get_x(new_xmin, 1, 0), False)
if littleY and len(self.y) == 1:
self.set_param('y', self.get_y(new_ymax, 1, 0), False)
if not skip_gt and (self.geo_trans is not None or set_core):
if self.geo_trans is None:
self.set_param('geo_trans')
new_geo_trans = np.array([
new_xmin,
self.geo_trans[1],
self.geo_trans[2],
new_ymax,
self.geo_trans[4],
self.geo_trans[5]
])
self.set_param('geo_trans', new_geo_trans, False)
if not (skip_cc and skip_geom) and (self.corner_coords is not None or self.geom is not None):
corner_coords = np.array([
[new_xmin, new_ymax],
[new_xmax, new_ymax],
[new_xmax, new_ymin],
[new_xmin, new_ymin],
[new_xmin, new_ymax]
])
if not skip_cc and self.corner_coords is not None:
self.set_param('corner_coords', corner_coords, False)
if not skip_geom and self.geom is not None:
spat_ref = self.geom.GetSpatialReference()
geom_cc = corner_coords if not np.any(np.isnan(corner_coords)) else np.array([[0, 0]])
self.set_param('geom', ogr.Geometry(wkt=self.wkt(geom_cc)), False)
if spat_ref is not None:
self.geom.AssignSpatialReference(spat_ref)
def set_projection(self, proj_ref, set_core=True, skip_sr=False, skip_geom=False):
try:
spat_ref = osr.SpatialReference(proj_ref)
spat_ref.IsProjected()
except:
raise InvalidArgumentError("`proj_ref` must be a WKT projection string that can be "
"converted into an osgeo.osr.SpatialReference object")
if not skip_sr and self.spat_ref is not None:
self.set_param('spat_ref', spat_ref, False)
if not skip_geom and self.geom is not None:
self.geom.AssignSpatialReference(spat_ref)
if self.proj_ref is not None or set_core:
self.proj_ref = proj_ref
def set_param(self, pname, value=None, prop=True, hold=None, set_core=False, set_default=True):
if pname not in vars(self).keys():
raise InvalidArgumentError("Raster does not have param `pname` '{}'".format(pname))
if value is None:
# Set default value for parameter.
if not set_default:
return
elif eval('self.{}'.format(pname)) is not None:
# The parameter is already set. Without a value argument, there is nothing to do.
print("This Raster's '{}' data member is already set".format(pname))
return
elif isinstance(value, str) and value == 'extract':
value = self.extract_param(pname)
if value is None:
prop = False
if not prop:
hold = 'off'
if set_core:
prop = True
errmsg = None
if pname in ('all', 'no-ds'):
pass
elif pname == 'ds':
if value is None:
raise InvalidArgumentError("`ds` has no default to be set")
ds = value
if type(ds) != osgeo.gdal.Dataset:
errmsg = "{} must be an osgeo.gdal.Dataset"
else:
self.ds = ds
elif pname == 'shape':
shape = value if value is not None else self.get_shape()
if hold is None:
hold = 'extent'
self.set_shape(shape, hold)
elif pname == 'z':
z = value if value is not None else np.zeros(self.get_shape())
if type(z) != np.ndarray or not np.issubdtype(z.dtype, np.number) or z.ndim != 2:
errmsg = "{} must be a numeric numpy.ndarray with ndim=2".format(pname)
else:
if prop:
if hold is None:
hold = 'extent'
self.set_shape(z.shape, hold, set_core)
self.z = z
elif pname == 'x':
x = value if value is not None else self.get_x()
if type(x) != np.ndarray or not np.issubdtype(x.dtype, np.number) or x.ndim != 1 \
or (len(x) > 1 and np.any(~np.isnan(x)) \
and len(np.unique(np.round((x[1:] - x[:-1]), 8))) > 1):
errmsg = "{} must be a numeric numpy.ndarray with ndim=1 and regular spacing".format(pname)
else:
if prop:
old_ysize, old_xsize = self.get_shape()
old_dx = self.get_res('dx')
old_xmin, old_ymax = self.get_xmin_ymax()
old_xmax, old_ymin = self.get_xmax_ymin()
new_xsize = len(x)
new_dx = None
if len(x) == 0:
new_dx = np.nan
elif len(x) == 1:
new_dx = old_dx
else:
new_dx = (x[1] - x[0])
new_xmin = x[0] if len(x) > 0 else np.nan
new_xmax = new_xmin + new_xsize*new_dx
if new_xsize != old_xsize:
self.set_shape((old_ysize, new_xsize), 'off', set_core)
if new_dx != old_dx:
self.set_res('dx', new_dx, 'off', set_core)
if new_xmin != old_xmin or new_xmax != old_xmax:
self.set_extent((new_xmin, old_ymax), (new_xmax, old_ymin), 'off', set_core)
self.x = x
elif pname == 'y':
y = value if value is not None else self.get_y()
if type(y) != np.ndarray or not np.issubdtype(y.dtype, np.number) or y.ndim != 1 \
or (len(y) > 1 and np.any(~np.isnan(y)) \
and len(np.unique(np.round((y[1:] - y[:-1]), 8))) > 1):
errmsg = "{} must be of type numpy.ndarray with ndim=1 and regular spacing".format(pname)
else:
if prop:
old_ysize, old_xsize = self.get_shape()
old_dy = self.get_res('dy')
old_xmin, old_ymax = self.get_xmin_ymax()
old_xmax, old_ymin = self.get_xmax_ymin()
new_ysize = len(y)
new_dy = None
if len(y) == 0:
new_dy = np.nan
elif len(y) == 1:
new_dy = old_dy
else:
new_dy = (y[0] - y[1])
new_ymax = y[0] if len(y) > 0 else np.nan
new_ymin = new_ymax - new_ysize*new_dy
if new_ysize != old_ysize:
self.set_shape((new_ysize, old_xsize), 'off', set_core)
if new_dy != old_dy:
self.set_res('dy', new_dy, 'off', set_core)
if new_ymax != old_ymax or new_ymin != old_ymin:
self.set_extent((old_xmin, new_ymax), (old_xmax, new_ymin), 'off', set_core)
self.y = y
elif pname in ('dx', 'dy', 'res'):
val = value if value is not None else self.get_res(pname)
if prop:
if hold is None:
hold = 'extent'
self.set_res(pname, value, hold)
else:
if not isinstance(val, numbers.Number) or val < 0 or val == float('inf'):
errmsg = "{} must be a positive, finite number".format(pname)
else:
exec('self.{} = val'.format(pname))
elif pname == "geo_trans":
geo_trans = value if value is not None else self.get_geo_trans()
if type(geo_trans) != np.ndarray or not np.issubdtype(geo_trans.dtype, np.number) \
or geo_trans.shape != (6,):
errmsg = "{} must be a numeric numpy.ndarray with shape (6,)".format(pname)
else:
if prop:
if hold is None:
hold = 'extent'
old_xmin, old_ymax = self.get_xmin_ymax()
old_dx = self.get_res('dx')
old_dy = self.get_res('dy')
new_xmin, new_ymax = itemgetter(0, 3)(geo_trans)
new_dx = geo_trans[1]
new_dy = -geo_trans[5]
if new_dx != old_dx:
self.set_res('dx', new_dx, hold, set_core, skip_gt=True)
if new_dy != old_dy:
self.set_res('dy', new_dy, hold, set_core, skip_gt=True)
if new_xmin != old_xmin or new_ymax != old_ymax:
self.set_extent((new_xmin, new_ymax), None, None, set_core, skip_gt=True)
self.geo_trans = geo_trans
elif pname == 'corner_coords':
corner_coords = value if value is not None else self.get_corner_coords()
if type(corner_coords) != np.ndarray or not np.issubdtype(corner_coords.dtype, np.number) \
or not corner_coords.shape == (5, 2):
errmsg = "{} must be a numeric numpy.ndarray with shape (5, 2)".format(pname)
else:
if prop:
if hold is None:
hold = 'res'
self.set_extent(corner_coords[0], corner_coords[2], hold, set_core, skip_cc=True)
self.corner_coords = corner_coords
elif pname == 'proj_ref':
proj_ref = value if value is not None else RASTER_DEFAULT_PROJREF
if prop:
self.set_projection(proj_ref)
else:
try:
spat_ref = osr.SpatialReference(proj_ref)
spat_ref.IsProjected()
self.proj_ref = proj_ref
except:
raise InvalidArgumentError("{} must be a WKT projection string that can be"
" converted into an osgeo.osr.SpatialReference"
" object".format(pname))
elif pname == 'spat_ref':
spat_ref = value if value is not None else osr.SpatialReference(RASTER_DEFAULT_PROJREF)
try:
if type(spat_ref) != osgeo.osr.SpatialReference:
raise InvalidArgumentError
spat_ref.IsProjected()
except:
errmsg = "{} must be a projected osgeo.osr.SpatialReference object".format(pname)
if errmsg is None:
if prop:
self.set_projection(spat_ref.ExportToWkt(), set_core, skip_sr=True)
self.spat_ref = spat_ref
elif pname in ('geom', 'geom_sr'):
geom = value if value is not None else self.get_geom()
try:
if type(geom) != osgeo.ogr.Geometry \
or geom.GetDimension() != 2 or geom.GetCoordinateDimension() != 2:
raise InvalidArgumentError
wkt = geom.ExportToWkt()
if len(wkt.split(',')) != 5:
prop = False
except:
errmsg = "{} must be a 2D osgeo.ogr.Geometry object"\
" containing 5 pairs of 2D coordinates".format(pname)
if errmsg is None:
if prop:
if hold is None:
hold = 'res'
corner_coords = self.wkt_to_coords(wkt)
self.set_extent(corner_coords[0], corner_coords[2], hold, set_core, skip_geom=True)
spat_ref = self.geom.GetSpatialReference()
if spat_ref is not None:
self.set_projection(spat_ref.ExportToWkt(), set_core, skip_geom=True)
self.geom = geom
else:
errmsg = "No setter mechanism has been implemented yet for parameter '{}'".format(pname)
if errmsg is not None:
if value is not None:
raise InvalidArgumentError(errmsg)
else:
raise RasterIOError(errmsg)
def refresh_param(self, pname):
if pname not in vars(self).keys():
raise InvalidArgumentError("Raster does not have param `pname` '{}'".format(pname))
exec('self.{} = None'.format(pname))
self.set_param(pname)
def prop_param(self, pname, hold=None, set_core=False):
if pname not in vars(self).keys():
raise InvalidArgumentError("Raster does not have param `pname` '{}'".format(pname))
value = eval('self.{}'.format(pname))
if value is None:
print("No value is stored in this Raster's '{}' parameter to propagate".format(pname))
return
exec('self.{} = None'.format(pname))
self.set_param(pname, value, True, hold, set_core, False)
|
the-stack_0_6294 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import decimal
import functools
import operator
import queue
import warnings
import pkg_resources
import mock
try:
import pandas
import pandas.api.types
import pandas.testing
except ImportError: # pragma: NO COVER
pandas = None
import pyarrow
import pyarrow.types
try:
import geopandas
except ImportError: # pragma: NO COVER
geopandas = None
import pytest
from google import api_core
from google.cloud import bigquery_storage
from google.cloud.bigquery import _helpers
from google.cloud.bigquery import schema
PANDAS_MINIUM_VERSION = pkg_resources.parse_version("1.0.0")
if pandas is not None:
PANDAS_INSTALLED_VERSION = pkg_resources.get_distribution("pandas").parsed_version
else:
# Set to less than MIN version.
PANDAS_INSTALLED_VERSION = pkg_resources.parse_version("0.0.0")
@pytest.fixture
def module_under_test():
from google.cloud.bigquery import _pandas_helpers
return _pandas_helpers
def is_none(value):
return value is None
def is_datetime(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#datetime-type
return all_(
pyarrow.types.is_timestamp,
lambda type_: type_.unit == "us",
lambda type_: type_.tz is None,
)(type_)
def is_numeric(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
return all_(
pyarrow.types.is_decimal,
lambda type_: type_.precision == 38,
lambda type_: type_.scale == 9,
)(type_)
def is_bignumeric(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
return all_(
pyarrow.types.is_decimal,
lambda type_: type_.precision == 76,
lambda type_: type_.scale == 38,
)(type_)
def is_timestamp(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type
return all_(
pyarrow.types.is_timestamp,
lambda type_: type_.unit == "us",
lambda type_: type_.tz == "UTC",
)(type_)
def do_all(functions, value):
return all((func(value) for func in functions))
def all_(*functions):
return functools.partial(do_all, functions)
def test_is_datetime():
assert is_datetime(pyarrow.timestamp("us", tz=None))
assert not is_datetime(pyarrow.timestamp("ms", tz=None))
assert not is_datetime(pyarrow.timestamp("us", tz="UTC"))
assert not is_datetime(pyarrow.timestamp("ns", tz="UTC"))
assert not is_datetime(pyarrow.string())
def test_do_all():
assert do_all((lambda _: True, lambda _: True), None)
assert not do_all((lambda _: True, lambda _: False), None)
assert not do_all((lambda _: False,), None)
def test_all_():
assert all_(lambda _: True, lambda _: True)(None)
assert not all_(lambda _: True, lambda _: False)(None)
@pytest.mark.parametrize(
"bq_type,bq_mode,is_correct_type",
[
("STRING", "NULLABLE", pyarrow.types.is_string),
("STRING", None, pyarrow.types.is_string),
("string", "NULLABLE", pyarrow.types.is_string),
("StRiNg", "NULLABLE", pyarrow.types.is_string),
("BYTES", "NULLABLE", pyarrow.types.is_binary),
("INTEGER", "NULLABLE", pyarrow.types.is_int64),
("INT64", "NULLABLE", pyarrow.types.is_int64),
("FLOAT", "NULLABLE", pyarrow.types.is_float64),
("FLOAT64", "NULLABLE", pyarrow.types.is_float64),
("NUMERIC", "NULLABLE", is_numeric),
("BIGNUMERIC", "NULLABLE", is_bignumeric),
("BOOLEAN", "NULLABLE", pyarrow.types.is_boolean),
("BOOL", "NULLABLE", pyarrow.types.is_boolean),
("TIMESTAMP", "NULLABLE", is_timestamp),
("DATE", "NULLABLE", pyarrow.types.is_date32),
("TIME", "NULLABLE", pyarrow.types.is_time64),
("DATETIME", "NULLABLE", is_datetime),
("GEOGRAPHY", "NULLABLE", pyarrow.types.is_string),
("UNKNOWN_TYPE", "NULLABLE", is_none),
# Use pyarrow.list_(item_type) for repeated (array) fields.
(
"STRING",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
(
"STRING",
"repeated",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
(
"STRING",
"RePeAtEd",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
(
"BYTES",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_binary(type_.value_type),
),
),
(
"INTEGER",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_int64(type_.value_type),
),
),
(
"INT64",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_int64(type_.value_type),
),
),
(
"FLOAT",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_float64(type_.value_type),
),
),
(
"FLOAT64",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_float64(type_.value_type),
),
),
(
"NUMERIC",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_numeric(type_.value_type)),
),
(
"BIGNUMERIC",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_bignumeric(type_.value_type)),
),
(
"BOOLEAN",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_boolean(type_.value_type),
),
),
(
"BOOL",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_boolean(type_.value_type),
),
),
(
"TIMESTAMP",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_timestamp(type_.value_type)),
),
(
"DATE",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_date32(type_.value_type),
),
),
(
"TIME",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_time64(type_.value_type),
),
),
(
"DATETIME",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_datetime(type_.value_type)),
),
(
"GEOGRAPHY",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
("RECORD", "REPEATED", is_none),
("UNKNOWN_TYPE", "REPEATED", is_none),
],
)
def test_bq_to_arrow_data_type(module_under_test, bq_type, bq_mode, is_correct_type):
field = schema.SchemaField("ignored_name", bq_type, mode=bq_mode)
actual = module_under_test.bq_to_arrow_data_type(field)
assert is_correct_type(actual)
@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
def test_bq_to_arrow_data_type_w_struct(module_under_test, bq_type):
fields = (
schema.SchemaField("field01", "STRING"),
schema.SchemaField("field02", "BYTES"),
schema.SchemaField("field03", "INTEGER"),
schema.SchemaField("field04", "INT64"),
schema.SchemaField("field05", "FLOAT"),
schema.SchemaField("field06", "FLOAT64"),
schema.SchemaField("field07", "NUMERIC"),
schema.SchemaField("field08", "BIGNUMERIC"),
schema.SchemaField("field09", "BOOLEAN"),
schema.SchemaField("field10", "BOOL"),
schema.SchemaField("field11", "TIMESTAMP"),
schema.SchemaField("field12", "DATE"),
schema.SchemaField("field13", "TIME"),
schema.SchemaField("field14", "DATETIME"),
schema.SchemaField("field15", "GEOGRAPHY"),
)
field = schema.SchemaField("ignored_name", bq_type, mode="NULLABLE", fields=fields)
actual = module_under_test.bq_to_arrow_data_type(field)
expected = (
pyarrow.field("field01", pyarrow.string()),
pyarrow.field("field02", pyarrow.binary()),
pyarrow.field("field03", pyarrow.int64()),
pyarrow.field("field04", pyarrow.int64()),
pyarrow.field("field05", pyarrow.float64()),
pyarrow.field("field06", pyarrow.float64()),
pyarrow.field("field07", module_under_test.pyarrow_numeric()),
pyarrow.field("field08", module_under_test.pyarrow_bignumeric()),
pyarrow.field("field09", pyarrow.bool_()),
pyarrow.field("field10", pyarrow.bool_()),
pyarrow.field("field11", module_under_test.pyarrow_timestamp()),
pyarrow.field("field12", pyarrow.date32()),
pyarrow.field("field13", module_under_test.pyarrow_time()),
pyarrow.field("field14", module_under_test.pyarrow_datetime()),
pyarrow.field("field15", pyarrow.string()),
)
expected = pyarrow.struct(expected)
assert pyarrow.types.is_struct(actual)
assert actual.num_fields == len(fields)
assert actual.equals(expected)
@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
def test_bq_to_arrow_data_type_w_array_struct(module_under_test, bq_type):
fields = (
schema.SchemaField("field01", "STRING"),
schema.SchemaField("field02", "BYTES"),
schema.SchemaField("field03", "INTEGER"),
schema.SchemaField("field04", "INT64"),
schema.SchemaField("field05", "FLOAT"),
schema.SchemaField("field06", "FLOAT64"),
schema.SchemaField("field07", "NUMERIC"),
schema.SchemaField("field08", "BIGNUMERIC"),
schema.SchemaField("field09", "BOOLEAN"),
schema.SchemaField("field10", "BOOL"),
schema.SchemaField("field11", "TIMESTAMP"),
schema.SchemaField("field12", "DATE"),
schema.SchemaField("field13", "TIME"),
schema.SchemaField("field14", "DATETIME"),
schema.SchemaField("field15", "GEOGRAPHY"),
)
field = schema.SchemaField("ignored_name", bq_type, mode="REPEATED", fields=fields)
actual = module_under_test.bq_to_arrow_data_type(field)
expected = (
pyarrow.field("field01", pyarrow.string()),
pyarrow.field("field02", pyarrow.binary()),
pyarrow.field("field03", pyarrow.int64()),
pyarrow.field("field04", pyarrow.int64()),
pyarrow.field("field05", pyarrow.float64()),
pyarrow.field("field06", pyarrow.float64()),
pyarrow.field("field07", module_under_test.pyarrow_numeric()),
pyarrow.field("field08", module_under_test.pyarrow_bignumeric()),
pyarrow.field("field09", pyarrow.bool_()),
pyarrow.field("field10", pyarrow.bool_()),
pyarrow.field("field11", module_under_test.pyarrow_timestamp()),
pyarrow.field("field12", pyarrow.date32()),
pyarrow.field("field13", module_under_test.pyarrow_time()),
pyarrow.field("field14", module_under_test.pyarrow_datetime()),
pyarrow.field("field15", pyarrow.string()),
)
expected_value_type = pyarrow.struct(expected)
assert pyarrow.types.is_list(actual)
assert pyarrow.types.is_struct(actual.value_type)
assert actual.value_type.num_fields == len(fields)
assert actual.value_type.equals(expected_value_type)
def test_bq_to_arrow_data_type_w_struct_unknown_subfield(module_under_test):
fields = (
schema.SchemaField("field1", "STRING"),
schema.SchemaField("field2", "INTEGER"),
# Don't know what to convert UNKNOWN_TYPE to, let type inference work,
# instead.
schema.SchemaField("field3", "UNKNOWN_TYPE"),
)
field = schema.SchemaField("ignored_name", "RECORD", mode="NULLABLE", fields=fields)
with warnings.catch_warnings(record=True) as warned:
actual = module_under_test.bq_to_arrow_data_type(field)
assert actual is None
assert len(warned) == 1
warning = warned[0]
assert "field3" in str(warning)
@pytest.mark.parametrize(
"bq_type,rows",
[
("STRING", ["abc", None, "def", None]),
("BYTES", [b"abc", None, b"def", None]),
("INTEGER", [123, None, 456, None]),
("INT64", [-9223372036854775808, None, 9223372036854775807, 123]),
("FLOAT", [1.25, None, 3.5, None]),
(
"NUMERIC",
[
decimal.Decimal("-99999999999999999999999999999.999999999"),
None,
decimal.Decimal("99999999999999999999999999999.999999999"),
decimal.Decimal("999.123456789"),
],
),
(
"BIGNUMERIC",
[
decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
None,
decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
decimal.Decimal("3.141592653589793238462643383279"),
],
),
("BOOLEAN", [True, None, False, None]),
("BOOL", [False, None, True, None]),
(
"TIMESTAMP",
[
datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
None,
datetime.datetime(
9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
),
datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
],
),
(
"DATE",
[
datetime.date(1, 1, 1),
None,
datetime.date(9999, 12, 31),
datetime.date(1970, 1, 1),
],
),
(
"TIME",
[
datetime.time(0, 0, 0),
None,
datetime.time(23, 59, 59, 999999),
datetime.time(12, 0, 0),
],
),
(
"DATETIME",
[
datetime.datetime(1, 1, 1, 0, 0, 0),
datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
None,
datetime.datetime(1970, 1, 1, 0, 0, 0),
datetime.datetime(1999, 3, 14, 15, 9, 26, 535898),
],
),
(
"GEOGRAPHY",
[
"POINT(30 10)",
None,
"LINESTRING (30 10, 10 30, 40 40)",
"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))",
],
),
],
)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_nullable_scalars(module_under_test, bq_type, rows):
series = pandas.Series(rows, dtype="object")
bq_field = schema.SchemaField("field_name", bq_type)
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert rows == roundtrip
@pytest.mark.parametrize(
"bq_type,rows",
[
(
"TIMESTAMP",
[
"1971-09-28T23:59:07+00:00",
"1975-04-09T23:59:02+00:00",
"1979-08-17T23:59:05+00:00",
"NaT",
"1983-05-09T13:00:00+00:00",
],
),
(
"DATETIME",
[
"1971-09-28T23:59:07",
"1975-04-09T23:59:02",
"1979-08-17T23:59:05",
"NaT",
"1983-05-09T13:00:00",
],
),
],
)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_pandas_timestamp(module_under_test, bq_type, rows):
rows = [pandas.Timestamp(row) for row in rows]
series = pandas.Series(rows)
bq_field = schema.SchemaField("field_name", bq_type)
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pandas()
assert series.equals(roundtrip)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_arrays(module_under_test):
rows = [[1, 2, 3], [], [4, 5, 6]]
series = pandas.Series(rows, dtype="object")
bq_field = schema.SchemaField("field_name", "INTEGER", mode="REPEATED")
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert rows == roundtrip
@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_structs(module_under_test, bq_type):
rows = [
{"int_col": 123, "string_col": "abc"},
None,
{"int_col": 456, "string_col": "def"},
]
series = pandas.Series(rows, dtype="object")
bq_field = schema.SchemaField(
"field_name",
bq_type,
fields=(
schema.SchemaField("int_col", "INTEGER"),
schema.SchemaField("string_col", "STRING"),
),
)
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert rows == roundtrip
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_special_floats(module_under_test):
bq_field = schema.SchemaField("field_name", "FLOAT64")
rows = [float("-inf"), float("nan"), float("inf"), None]
series = pandas.Series(rows, dtype="object")
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert len(rows) == len(roundtrip)
assert roundtrip[0] == float("-inf")
# Since we are converting from pandas, NaN is treated as NULL in pyarrow
# due to pandas conventions.
# https://arrow.apache.org/docs/python/data.html#none-values-and-nan-handling
assert roundtrip[1] is None
assert roundtrip[2] == float("inf")
assert roundtrip[3] is None
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_bq_to_arrow_array_w_geography_dtype(module_under_test):
from shapely import wkb, wkt
bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
series = geopandas.GeoSeries([None, wkt.loads("point(0 0)")])
array = module_under_test.bq_to_arrow_array(series, bq_field)
# The result is binary, because we use wkb format
assert array.type == pyarrow.binary()
assert array.to_pylist() == [None, wkb.dumps(series[1])]
# All na:
series = geopandas.GeoSeries([None, None])
array = module_under_test.bq_to_arrow_array(series, bq_field)
assert array.type == pyarrow.string()
assert array.to_pylist() == list(series)
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_bq_to_arrow_array_w_geography_type_shapely_data(module_under_test):
from shapely import wkb, wkt
bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
series = pandas.Series([None, wkt.loads("point(0 0)")])
array = module_under_test.bq_to_arrow_array(series, bq_field)
# The result is binary, because we use wkb format
assert array.type == pyarrow.binary()
assert array.to_pylist() == [None, wkb.dumps(series[1])]
# All na:
series = pandas.Series([None, None])
array = module_under_test.bq_to_arrow_array(series, bq_field)
assert array.type == pyarrow.string()
assert array.to_pylist() == list(series)
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_bq_to_arrow_array_w_geography_type_wkb_data(module_under_test):
from shapely import wkb, wkt
bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
series = pandas.Series([None, wkb.dumps(wkt.loads("point(0 0)"))])
array = module_under_test.bq_to_arrow_array(series, bq_field)
# The result is binary, because we use wkb format
assert array.type == pyarrow.binary()
assert array.to_pylist() == list(series)
def test_bq_to_arrow_schema_w_unknown_type(module_under_test):
fields = (
schema.SchemaField("field1", "STRING"),
schema.SchemaField("field2", "INTEGER"),
# Don't know what to convert UNKNOWN_TYPE to, let type inference work,
# instead.
schema.SchemaField("field3", "UNKNOWN_TYPE"),
)
with warnings.catch_warnings(record=True) as warned:
actual = module_under_test.bq_to_arrow_schema(fields)
assert actual is None
assert len(warned) == 1
warning = warned[0]
assert "field3" in str(warning)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_not_found(module_under_test):
dataframe = pandas.DataFrame({"not_the_column_youre_looking_for": [1, 2, 3]})
with pytest.raises(ValueError, match="col_is_missing"):
module_under_test.get_column_or_index(dataframe, "col_is_missing")
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_multiindex_not_found(module_under_test):
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3, 4, 5, 6]},
index=pandas.MultiIndex.from_tuples(
[("a", 0), ("a", 1), ("b", 0), ("b", 1), ("c", 0), ("c", 1)]
),
)
with pytest.raises(ValueError, match="not_in_df"):
module_under_test.get_column_or_index(dataframe, "not_in_df")
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_both_prefers_column(module_under_test):
dataframe = pandas.DataFrame(
{"some_name": [1, 2, 3]}, index=pandas.Index([0, 1, 2], name="some_name")
)
series = module_under_test.get_column_or_index(dataframe, "some_name")
expected = pandas.Series([1, 2, 3], name="some_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_column(module_under_test):
dataframe = pandas.DataFrame({"column_name": [1, 2, 3], "other_column": [4, 5, 6]})
series = module_under_test.get_column_or_index(dataframe, "column_name")
expected = pandas.Series([1, 2, 3], name="column_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_named_index(module_under_test):
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3]}, index=pandas.Index([4, 5, 6], name="index_name")
)
series = module_under_test.get_column_or_index(dataframe, "index_name")
expected = pandas.Series([4, 5, 6], name="index_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_datetimeindex(module_under_test):
datetimes = [
datetime.datetime(2000, 1, 2, 3, 4, 5, 101),
datetime.datetime(2006, 7, 8, 9, 10, 11, 202),
datetime.datetime(2012, 1, 14, 15, 16, 17, 303),
]
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3]},
index=pandas.DatetimeIndex(datetimes, name="index_name"),
)
series = module_under_test.get_column_or_index(dataframe, "index_name")
expected = pandas.Series(datetimes, name="index_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_multiindex(module_under_test):
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3, 4, 5, 6]},
index=pandas.MultiIndex.from_tuples(
[("a", 0), ("a", 1), ("b", 0), ("b", 1), ("c", 0), ("c", 1)],
names=["letters", "numbers"],
),
)
series = module_under_test.get_column_or_index(dataframe, "letters")
expected = pandas.Series(["a", "a", "b", "b", "c", "c"], name="letters")
pandas.testing.assert_series_equal(series, expected)
series = module_under_test.get_column_or_index(dataframe, "numbers")
expected = pandas.Series([0, 1, 0, 1, 0, 1], name="numbers")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_without_named_index(module_under_test):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(df_data)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_with_named_index_same_as_column_name(
module_under_test,
):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(
df_data,
# Use same name as an integer column but a different datatype so that
# we can verify that the column is listed but the index isn't.
index=pandas.Index([0.1, 0.2, 0.3, 0.4], name="a_series"),
)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(
pandas is None or PANDAS_INSTALLED_VERSION < PANDAS_MINIUM_VERSION,
reason="Requires `pandas version >= 1.0.0` which introduces pandas.NA",
)
def test_dataframe_to_json_generator(module_under_test):
utcnow = datetime.datetime.utcnow()
df_data = collections.OrderedDict(
[
("a_series", [pandas.NA, 2, 3, 4]),
("b_series", [0.1, float("NaN"), 0.3, 0.4]),
("c_series", ["a", "b", pandas.NA, "d"]),
("d_series", [utcnow, utcnow, utcnow, pandas.NaT]),
("e_series", [True, False, True, None]),
]
)
dataframe = pandas.DataFrame(
df_data, index=pandas.Index([4, 5, 6, 7], name="a_index")
)
dataframe = dataframe.astype({"a_series": pandas.Int64Dtype()})
rows = module_under_test.dataframe_to_json_generator(dataframe)
expected = [
{"b_series": 0.1, "c_series": "a", "d_series": utcnow, "e_series": True},
{"a_series": 2, "c_series": "b", "d_series": utcnow, "e_series": False},
{"a_series": 3, "b_series": 0.3, "d_series": utcnow, "e_series": True},
{"a_series": 4, "b_series": 0.4, "c_series": "d"},
]
assert list(rows) == expected
def test_dataframe_to_json_generator_repeated_field(module_under_test):
pytest.importorskip(
"pandas",
minversion=str(PANDAS_MINIUM_VERSION),
reason=(
f"Requires `pandas version >= {PANDAS_MINIUM_VERSION}` "
"which introduces pandas.NA"
),
)
df_data = [
collections.OrderedDict(
[("repeated_col", [pandas.NA, 2, None, 4]), ("not_repeated_col", "first")]
),
collections.OrderedDict(
[
("repeated_col", ["a", "b", mock.sentinel.foo, "d"]),
("not_repeated_col", "second"),
]
),
]
dataframe = pandas.DataFrame(df_data)
rows = module_under_test.dataframe_to_json_generator(dataframe)
expected = [
{"repeated_col": [pandas.NA, 2, None, 4], "not_repeated_col": "first"},
{
"repeated_col": ["a", "b", mock.sentinel.foo, "d"],
"not_repeated_col": "second",
},
]
assert list(rows) == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_with_named_index(module_under_test):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(
df_data, index=pandas.Index([4, 5, 6, 7], name="a_index")
)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_index", pandas.api.types.pandas_dtype("int64")),
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_with_multiindex(module_under_test):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(
df_data,
index=pandas.MultiIndex.from_tuples(
[(0, 0, 41), (0, 0, 42), (1, 0, 41), (1, 1, 41)],
names=[
"a_index",
# Use same name as column, but different dtype so we can verify
# the column type is included.
"b_series",
"c_index",
],
),
)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_index", pandas.api.types.pandas_dtype("int64")),
("c_index", pandas.api.types.pandas_dtype("int64")),
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_bq_schema_dict_sequence(module_under_test):
df_data = collections.OrderedDict(
[
("str_column", ["hello", "world"]),
("int_column", [42, 8]),
("bool_column", [True, False]),
]
)
dataframe = pandas.DataFrame(df_data)
dict_schema = [
{"name": "str_column", "type": "STRING", "mode": "NULLABLE"},
{"name": "bool_column", "type": "BOOL", "mode": "REQUIRED"},
]
returned_schema = module_under_test.dataframe_to_bq_schema(dataframe, dict_schema)
expected_schema = (
schema.SchemaField("str_column", "STRING", "NULLABLE"),
schema.SchemaField("int_column", "INTEGER", "NULLABLE"),
schema.SchemaField("bool_column", "BOOL", "REQUIRED"),
)
assert returned_schema == expected_schema
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_with_multiindex(module_under_test):
bq_schema = (
schema.SchemaField("str_index", "STRING"),
# int_index is intentionally omitted, to verify that it's okay to be
# missing indexes from the schema.
schema.SchemaField("dt_index", "DATETIME"),
schema.SchemaField("int_col", "INTEGER"),
schema.SchemaField("nullable_int_col", "INTEGER"),
schema.SchemaField("str_col", "STRING"),
)
df_data = collections.OrderedDict(
[
("int_col", [1, 2, 3, 4, 5, 6]),
("nullable_int_col", [6.0, float("nan"), 7.0, float("nan"), 8.0, 9.0]),
("str_col", ["apple", "banana", "cherry", "durian", "etrog", "fig"]),
]
)
df_index = pandas.MultiIndex.from_tuples(
[
("a", 0, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
("a", 0, datetime.datetime(2000, 1, 1, 0, 0, 0)),
("a", 1, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
("b", 1, datetime.datetime(2000, 1, 1, 0, 0, 0)),
("b", 0, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
("b", 0, datetime.datetime(2000, 1, 1, 0, 0, 0)),
],
names=["str_index", "int_index", "dt_index"],
)
dataframe = pandas.DataFrame(df_data, index=df_index)
arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
assert arrow_table.schema.names == [
"str_index",
"dt_index",
"int_col",
"nullable_int_col",
"str_col",
]
arrow_data = arrow_table.to_pydict()
assert arrow_data["str_index"] == ["a", "a", "a", "b", "b", "b"]
expected_dt_index = [
pandas.Timestamp(dt)
for dt in (
datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2000, 1, 1, 0, 0, 0),
datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2000, 1, 1, 0, 0, 0),
datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2000, 1, 1, 0, 0, 0),
)
]
assert arrow_data["dt_index"] == expected_dt_index
assert arrow_data["int_col"] == [1, 2, 3, 4, 5, 6]
assert arrow_data["nullable_int_col"] == [6, None, 7, None, 8, 9]
assert arrow_data["str_col"] == [
"apple",
"banana",
"cherry",
"durian",
"etrog",
"fig",
]
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_with_required_fields(module_under_test):
bq_schema = (
schema.SchemaField("field01", "STRING", mode="REQUIRED"),
schema.SchemaField("field02", "BYTES", mode="REQUIRED"),
schema.SchemaField("field03", "INTEGER", mode="REQUIRED"),
schema.SchemaField("field04", "INT64", mode="REQUIRED"),
schema.SchemaField("field05", "FLOAT", mode="REQUIRED"),
schema.SchemaField("field06", "FLOAT64", mode="REQUIRED"),
schema.SchemaField("field07", "NUMERIC", mode="REQUIRED"),
schema.SchemaField("field08", "BIGNUMERIC", mode="REQUIRED"),
schema.SchemaField("field09", "BOOLEAN", mode="REQUIRED"),
schema.SchemaField("field10", "BOOL", mode="REQUIRED"),
schema.SchemaField("field11", "TIMESTAMP", mode="REQUIRED"),
schema.SchemaField("field12", "DATE", mode="REQUIRED"),
schema.SchemaField("field13", "TIME", mode="REQUIRED"),
schema.SchemaField("field14", "DATETIME", mode="REQUIRED"),
schema.SchemaField("field15", "GEOGRAPHY", mode="REQUIRED"),
)
data = {
"field01": ["hello", "world"],
"field02": [b"abd", b"efg"],
"field03": [1, 2],
"field04": [3, 4],
"field05": [1.25, 9.75],
"field06": [-1.75, -3.5],
"field07": [decimal.Decimal("1.2345"), decimal.Decimal("6.7891")],
"field08": [
decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
],
"field09": [True, False],
"field10": [False, True],
"field11": [
datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
datetime.datetime(2012, 12, 21, 9, 7, 42, tzinfo=datetime.timezone.utc),
],
"field12": [datetime.date(9999, 12, 31), datetime.date(1970, 1, 1)],
"field13": [datetime.time(23, 59, 59, 999999), datetime.time(12, 0, 0)],
"field14": [
datetime.datetime(1970, 1, 1, 0, 0, 0),
datetime.datetime(2012, 12, 21, 9, 7, 42),
],
"field15": ["POINT(30 10)", "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"],
}
dataframe = pandas.DataFrame(data)
arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
arrow_schema = arrow_table.schema
assert len(arrow_schema) == len(bq_schema)
for arrow_field in arrow_schema:
assert not arrow_field.nullable
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_with_unknown_type(module_under_test):
bq_schema = (
schema.SchemaField("field00", "UNKNOWN_TYPE"),
schema.SchemaField("field01", "STRING"),
schema.SchemaField("field02", "BYTES"),
schema.SchemaField("field03", "INTEGER"),
)
dataframe = pandas.DataFrame(
{
"field00": ["whoami", "whatami"],
"field01": ["hello", "world"],
"field02": [b"abd", b"efg"],
"field03": [1, 2],
}
)
with warnings.catch_warnings(record=True) as warned:
arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
arrow_schema = arrow_table.schema
assert len(warned) == 1
warning = warned[0]
assert "field00" in str(warning)
assert len(arrow_schema) == len(bq_schema)
assert arrow_schema[0].name == "field00"
assert arrow_schema[1].name == "field01"
assert arrow_schema[2].name == "field02"
assert arrow_schema[3].name == "field03"
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_dict_sequence_schema(module_under_test):
dict_schema = [
{"name": "field01", "type": "STRING", "mode": "REQUIRED"},
{"name": "field02", "type": "BOOL", "mode": "NULLABLE"},
]
dataframe = pandas.DataFrame(
{"field01": ["hello", "world"], "field02": [True, False]}
)
arrow_table = module_under_test.dataframe_to_arrow(dataframe, dict_schema)
arrow_schema = arrow_table.schema
expected_fields = [
pyarrow.field("field01", "string", nullable=False),
pyarrow.field("field02", "bool", nullable=True),
]
assert list(arrow_schema) == expected_fields
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_parquet_w_extra_fields(module_under_test):
with pytest.raises(ValueError) as exc_context:
module_under_test.dataframe_to_parquet(
pandas.DataFrame(), (schema.SchemaField("not_in_df", "STRING"),), None
)
message = str(exc_context.value)
assert "bq_schema contains fields not present in dataframe" in message
assert "not_in_df" in message
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_parquet_w_missing_fields(module_under_test):
with pytest.raises(ValueError) as exc_context:
module_under_test.dataframe_to_parquet(
pandas.DataFrame({"not_in_bq": [1, 2, 3]}), (), None
)
message = str(exc_context.value)
assert "bq_schema is missing fields from dataframe" in message
assert "not_in_bq" in message
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_parquet_compression_method(module_under_test):
bq_schema = (schema.SchemaField("field00", "STRING"),)
dataframe = pandas.DataFrame({"field00": ["foo", "bar"]})
write_table_patch = mock.patch.object(
module_under_test.pyarrow.parquet, "write_table", autospec=True
)
with write_table_patch as fake_write_table:
module_under_test.dataframe_to_parquet(
dataframe, bq_schema, None, parquet_compression="ZSTD"
)
call_args = fake_write_table.call_args
assert call_args is not None
assert call_args.kwargs.get("compression") == "ZSTD"
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_bq_schema_fallback_needed_w_pyarrow(module_under_test):
dataframe = pandas.DataFrame(
data=[
{"id": 10, "status": "FOO", "created_at": datetime.date(2019, 5, 10)},
{"id": 20, "status": "BAR", "created_at": datetime.date(2018, 9, 12)},
]
)
with warnings.catch_warnings(record=True) as warned:
detected_schema = module_under_test.dataframe_to_bq_schema(
dataframe, bq_schema=[]
)
expected_schema = (
schema.SchemaField("id", "INTEGER", mode="NULLABLE"),
schema.SchemaField("status", "STRING", mode="NULLABLE"),
schema.SchemaField("created_at", "DATE", mode="NULLABLE"),
)
by_name = operator.attrgetter("name")
assert sorted(detected_schema, key=by_name) == sorted(expected_schema, key=by_name)
# there should be no relevant warnings
unwanted_warnings = [
warning for warning in warned if "could not determine" in str(warning).lower()
]
assert not unwanted_warnings
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_bq_schema_pyarrow_fallback_fails(module_under_test):
dataframe = pandas.DataFrame(
data=[
{"struct_field": {"one": 2}, "status": "FOO"},
{"struct_field": {"two": "222"}, "status": "BAR"},
]
)
with warnings.catch_warnings(record=True) as warned:
detected_schema = module_under_test.dataframe_to_bq_schema(
dataframe, bq_schema=[]
)
assert detected_schema is None
# a warning should also be issued
expected_warnings = [
warning for warning in warned if "could not determine" in str(warning).lower()
]
assert len(expected_warnings) == 1
assert "struct_field" in str(expected_warnings[0])
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_dataframe_to_bq_schema_geography(module_under_test):
from shapely import wkt
df = geopandas.GeoDataFrame(
pandas.DataFrame(
dict(
name=["foo", "bar"],
geo1=[None, None],
geo2=[None, wkt.loads("Point(1 1)")],
)
),
geometry="geo1",
)
bq_schema = module_under_test.dataframe_to_bq_schema(df, [])
assert bq_schema == (
schema.SchemaField("name", "STRING"),
schema.SchemaField("geo1", "GEOGRAPHY"),
schema.SchemaField("geo2", "GEOGRAPHY"),
)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_no_valid_items(module_under_test):
series = pandas.Series([None, pandas.NA, float("NaN")])
result = module_under_test._first_array_valid(series)
assert result is None
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_valid_item_exists(module_under_test):
series = pandas.Series([None, [0], [1], None])
result = module_under_test._first_array_valid(series)
assert result == 0
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_all_nan_items_in_first_valid_candidate(module_under_test):
import numpy
series = pandas.Series(
[
None,
[None, float("NaN"), pandas.NA, pandas.NaT, numpy.nan],
None,
[None, None],
[None, float("NaN"), pandas.NA, pandas.NaT, numpy.nan, 42, None],
[1, 2, 3],
None,
]
)
result = module_under_test._first_array_valid(series)
assert result == 42
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_no_arrays_with_valid_items(module_under_test):
series = pandas.Series([[None, None], [None, None]])
result = module_under_test._first_array_valid(series)
assert result is None
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_type_detection_succeeds(module_under_test):
dataframe = pandas.DataFrame(
data=[
{
"bool_field": False,
"int_field": 123,
"float_field": 3.141592,
"time_field": datetime.time(17, 59, 47),
"timestamp_field": datetime.datetime(2005, 5, 31, 14, 25, 55),
"date_field": datetime.date(2005, 5, 31),
"bytes_field": b"some bytes",
"string_field": "some characters",
"numeric_field": decimal.Decimal("123.456"),
"bignumeric_field": decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
}
]
)
# NOTE: In Pandas dataframe, the dtype of Python's datetime instances is
# set to "datetime64[ns]", and pyarrow converts that to pyarrow.TimestampArray.
# We thus cannot expect to get a DATETIME date when converting back to the
# BigQuery type.
current_schema = (
schema.SchemaField("bool_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("int_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("float_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("time_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("timestamp_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("date_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("bytes_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("string_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("numeric_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("bignumeric_field", field_type=None, mode="NULLABLE"),
)
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
# there should be no relevant warnings
unwanted_warnings = [
warning for warning in warned if "Pyarrow could not" in str(warning)
]
assert not unwanted_warnings
# the augmented schema must match the expected
expected_schema = (
schema.SchemaField("bool_field", field_type="BOOL", mode="NULLABLE"),
schema.SchemaField("int_field", field_type="INT64", mode="NULLABLE"),
schema.SchemaField("float_field", field_type="FLOAT64", mode="NULLABLE"),
schema.SchemaField("time_field", field_type="TIME", mode="NULLABLE"),
schema.SchemaField("timestamp_field", field_type="TIMESTAMP", mode="NULLABLE"),
schema.SchemaField("date_field", field_type="DATE", mode="NULLABLE"),
schema.SchemaField("bytes_field", field_type="BYTES", mode="NULLABLE"),
schema.SchemaField("string_field", field_type="STRING", mode="NULLABLE"),
schema.SchemaField("numeric_field", field_type="NUMERIC", mode="NULLABLE"),
schema.SchemaField(
"bignumeric_field", field_type="BIGNUMERIC", mode="NULLABLE"
),
)
by_name = operator.attrgetter("name")
assert sorted(augmented_schema, key=by_name) == sorted(expected_schema, key=by_name)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_repeated_fields(module_under_test):
dataframe = pandas.DataFrame(
data=[
# Include some values useless for type detection to make sure the logic
# indeed finds the value that is suitable.
{"string_array": None, "timestamp_array": None, "datetime_array": None},
{
"string_array": [None],
"timestamp_array": [None],
"datetime_array": [None],
},
{"string_array": None, "timestamp_array": None, "datetime_array": None},
{
"string_array": [None, "foo"],
"timestamp_array": [
None,
datetime.datetime(
2005, 5, 31, 14, 25, 55, tzinfo=datetime.timezone.utc
),
],
"datetime_array": [None, datetime.datetime(2005, 5, 31, 14, 25, 55)],
},
{"string_array": None, "timestamp_array": None, "datetime_array": None},
]
)
current_schema = (
schema.SchemaField("string_array", field_type=None, mode="NULLABLE"),
schema.SchemaField("timestamp_array", field_type=None, mode="NULLABLE"),
schema.SchemaField("datetime_array", field_type=None, mode="NULLABLE"),
)
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
# there should be no relevant warnings
unwanted_warnings = [
warning for warning in warned if "Pyarrow could not" in str(warning)
]
assert not unwanted_warnings
# the augmented schema must match the expected
expected_schema = (
schema.SchemaField("string_array", field_type="STRING", mode="REPEATED"),
schema.SchemaField("timestamp_array", field_type="TIMESTAMP", mode="REPEATED"),
schema.SchemaField("datetime_array", field_type="DATETIME", mode="REPEATED"),
)
by_name = operator.attrgetter("name")
assert sorted(augmented_schema, key=by_name) == sorted(expected_schema, key=by_name)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_type_detection_fails(module_under_test):
dataframe = pandas.DataFrame(
data=[
{
"status": "FOO",
"struct_field": {"one": 1},
"struct_field_2": {"foo": "123"},
},
{
"status": "BAR",
"struct_field": {"two": "111"},
"struct_field_2": {"bar": 27},
},
]
)
current_schema = [
schema.SchemaField("status", field_type="STRING", mode="NULLABLE"),
schema.SchemaField("struct_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("struct_field_2", field_type=None, mode="NULLABLE"),
]
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
assert augmented_schema is None
expected_warnings = [
warning for warning in warned if "could not determine" in str(warning)
]
assert len(expected_warnings) == 1
warning_msg = str(expected_warnings[0])
assert "pyarrow" in warning_msg.lower()
assert "struct_field" in warning_msg and "struct_field_2" in warning_msg
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_type_detection_fails_array_data(module_under_test):
dataframe = pandas.DataFrame(
data=[{"all_none_array": [None, float("NaN")], "empty_array": []}]
)
current_schema = [
schema.SchemaField("all_none_array", field_type=None, mode="NULLABLE"),
schema.SchemaField("empty_array", field_type=None, mode="NULLABLE"),
]
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
assert augmented_schema is None
expected_warnings = [
warning for warning in warned if "could not determine" in str(warning)
]
assert len(expected_warnings) == 1
warning_msg = str(expected_warnings[0])
assert "pyarrow" in warning_msg.lower()
assert "all_none_array" in warning_msg and "empty_array" in warning_msg
def test_dataframe_to_parquet_dict_sequence_schema(module_under_test):
pandas = pytest.importorskip("pandas")
dict_schema = [
{"name": "field01", "type": "STRING", "mode": "REQUIRED"},
{"name": "field02", "type": "BOOL", "mode": "NULLABLE"},
]
dataframe = pandas.DataFrame(
{"field01": ["hello", "world"], "field02": [True, False]}
)
write_table_patch = mock.patch.object(
module_under_test.pyarrow.parquet, "write_table", autospec=True
)
to_arrow_patch = mock.patch.object(
module_under_test, "dataframe_to_arrow", autospec=True
)
with write_table_patch, to_arrow_patch as fake_to_arrow:
module_under_test.dataframe_to_parquet(dataframe, dict_schema, None)
expected_schema_arg = [
schema.SchemaField("field01", "STRING", mode="REQUIRED"),
schema.SchemaField("field02", "BOOL", mode="NULLABLE"),
]
schema_arg = fake_to_arrow.call_args.args[1]
assert schema_arg == expected_schema_arg
def test__download_table_bqstorage_stream_includes_read_session(
monkeypatch, module_under_test
):
import google.cloud.bigquery_storage_v1.reader
import google.cloud.bigquery_storage_v1.types
monkeypatch.setattr(_helpers.BQ_STORAGE_VERSIONS, "_installed_version", None)
monkeypatch.setattr(bigquery_storage, "__version__", "2.5.0")
bqstorage_client = mock.create_autospec(
bigquery_storage.BigQueryReadClient, instance=True
)
reader = mock.create_autospec(
google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
)
bqstorage_client.read_rows.return_value = reader
session = google.cloud.bigquery_storage_v1.types.ReadSession()
module_under_test._download_table_bqstorage_stream(
module_under_test._DownloadState(),
bqstorage_client,
session,
google.cloud.bigquery_storage_v1.types.ReadStream(name="test"),
queue.Queue(),
mock.Mock(),
)
reader.rows.assert_called_once_with(session)
@pytest.mark.skipif(
not _helpers.BQ_STORAGE_VERSIONS.is_read_session_optional,
reason="Requires `google-cloud-bigquery-storage` >= 2.6.0",
)
def test__download_table_bqstorage_stream_omits_read_session(
monkeypatch, module_under_test
):
import google.cloud.bigquery_storage_v1.reader
import google.cloud.bigquery_storage_v1.types
monkeypatch.setattr(_helpers.BQ_STORAGE_VERSIONS, "_installed_version", None)
monkeypatch.setattr(bigquery_storage, "__version__", "2.6.0")
bqstorage_client = mock.create_autospec(
bigquery_storage.BigQueryReadClient, instance=True
)
reader = mock.create_autospec(
google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
)
bqstorage_client.read_rows.return_value = reader
session = google.cloud.bigquery_storage_v1.types.ReadSession()
module_under_test._download_table_bqstorage_stream(
module_under_test._DownloadState(),
bqstorage_client,
session,
google.cloud.bigquery_storage_v1.types.ReadStream(name="test"),
queue.Queue(),
mock.Mock(),
)
reader.rows.assert_called_once_with()
@pytest.mark.parametrize(
"stream_count,maxsize_kwarg,expected_call_count,expected_maxsize",
[
(3, {"max_queue_size": 2}, 3, 2), # custom queue size
(4, {}, 4, 4), # default queue size
(7, {"max_queue_size": None}, 7, 0), # infinite queue size
],
)
def test__download_table_bqstorage(
module_under_test,
stream_count,
maxsize_kwarg,
expected_call_count,
expected_maxsize,
):
from google.cloud.bigquery import dataset
from google.cloud.bigquery import table
queue_used = None # A reference to the queue used by code under test.
bqstorage_client = mock.create_autospec(
bigquery_storage.BigQueryReadClient, instance=True
)
fake_session = mock.Mock(streams=["stream/s{i}" for i in range(stream_count)])
bqstorage_client.create_read_session.return_value = fake_session
table_ref = table.TableReference(
dataset.DatasetReference("project-x", "dataset-y"),
"table-z",
)
def fake_download_stream(
download_state, bqstorage_client, session, stream, worker_queue, page_to_item
):
nonlocal queue_used
queue_used = worker_queue
try:
worker_queue.put_nowait("result_page")
except queue.Full: # pragma: NO COVER
pass
download_stream = mock.Mock(side_effect=fake_download_stream)
with mock.patch.object(
module_under_test, "_download_table_bqstorage_stream", new=download_stream
):
result_gen = module_under_test._download_table_bqstorage(
"some-project", table_ref, bqstorage_client, **maxsize_kwarg
)
list(result_gen)
# Timing-safe, as the method under test should block until the pool shutdown is
# complete, at which point all download stream workers have already been submitted
# to the thread pool.
assert download_stream.call_count == stream_count # once for each stream
assert queue_used.maxsize == expected_maxsize
def test_download_arrow_row_iterator_unknown_field_type(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], [2.2, 22.22, 222.222]]
pages = [fake_page]
bq_schema = [
schema.SchemaField("population_size", "INTEGER"),
schema.SchemaField("alien_field", "ALIEN_FLOAT_TYPE"),
]
results_gen = module_under_test.download_arrow_row_iterator(pages, bq_schema)
with warnings.catch_warnings(record=True) as warned:
result = next(results_gen)
unwanted_warnings = [
warning
for warning in warned
if "please pass schema= explicitly" in str(warning).lower()
]
assert not unwanted_warnings
assert len(result.columns) == 2
col = result.columns[0]
assert type(col) is pyarrow.lib.Int64Array
assert col.to_pylist() == [1, 10, 100]
col = result.columns[1]
assert type(col) is pyarrow.lib.DoubleArray
assert col.to_pylist() == [2.2, 22.22, 222.222]
def test_download_arrow_row_iterator_known_field_type(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
pages = [fake_page]
bq_schema = [
schema.SchemaField("population_size", "INTEGER"),
schema.SchemaField("non_alien_field", "STRING"),
]
results_gen = module_under_test.download_arrow_row_iterator(pages, bq_schema)
with warnings.catch_warnings(record=True) as warned:
result = next(results_gen)
unwanted_warnings = [
warning
for warning in warned
if "please pass schema= explicitly" in str(warning).lower()
]
assert not unwanted_warnings
assert len(result.columns) == 2
col = result.columns[0]
assert type(col) is pyarrow.lib.Int64Array
assert col.to_pylist() == [1, 10, 100]
col = result.columns[1]
assert type(col) is pyarrow.lib.StringArray
assert col.to_pylist() == ["2.2", "22.22", "222.222"]
def test_download_arrow_row_iterator_dict_sequence_schema(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
pages = [fake_page]
dict_schema = [
{"name": "population_size", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "non_alien_field", "type": "STRING", "mode": "NULLABLE"},
]
results_gen = module_under_test.download_arrow_row_iterator(pages, dict_schema)
result = next(results_gen)
assert len(result.columns) == 2
col = result.columns[0]
assert type(col) is pyarrow.lib.Int64Array
assert col.to_pylist() == [1, 10, 100]
col = result.columns[1]
assert type(col) is pyarrow.lib.StringArray
assert col.to_pylist() == ["2.2", "22.22", "222.222"]
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_download_dataframe_row_iterator_dict_sequence_schema(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
pages = [fake_page]
dict_schema = [
{"name": "population_size", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "non_alien_field", "type": "STRING", "mode": "NULLABLE"},
]
results_gen = module_under_test.download_dataframe_row_iterator(
pages, dict_schema, dtypes={}
)
result = next(results_gen)
expected_result = pandas.DataFrame(
collections.OrderedDict(
[
("population_size", [1, 10, 100]),
("non_alien_field", ["2.2", "22.22", "222.222"]),
]
)
)
assert result.equals(expected_result)
with pytest.raises(StopIteration):
result = next(results_gen)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_table_data_listpage_to_dataframe_skips_stop_iteration(module_under_test):
dataframe = module_under_test._row_iterator_page_to_dataframe([], [], {})
assert isinstance(dataframe, pandas.DataFrame)
def test_bq_to_arrow_field_type_override(module_under_test):
# When loading pandas data, we may need to override the type
# decision based on data contents, because GEOGRAPHY data can be
# stored as either text or binary.
assert (
module_under_test.bq_to_arrow_field(schema.SchemaField("g", "GEOGRAPHY")).type
== pyarrow.string()
)
assert (
module_under_test.bq_to_arrow_field(
schema.SchemaField("g", "GEOGRAPHY"),
pyarrow.binary(),
).type
== pyarrow.binary()
)
@pytest.mark.parametrize(
"field_type, metadata",
[
("datetime", {b"ARROW:extension:name": b"google:sqlType:datetime"}),
(
"geography",
{
b"ARROW:extension:name": b"google:sqlType:geography",
b"ARROW:extension:metadata": b'{"encoding": "WKT"}',
},
),
],
)
def test_bq_to_arrow_field_metadata(module_under_test, field_type, metadata):
assert (
module_under_test.bq_to_arrow_field(
schema.SchemaField("g", field_type)
).metadata
== metadata
)
def test_verify_pandas_imports_no_pandas(module_under_test, monkeypatch):
monkeypatch.setattr(module_under_test, "pandas", None)
with pytest.raises(ValueError, match="Please install the 'pandas' package"):
module_under_test.verify_pandas_imports()
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_verify_pandas_imports_no_db_dtypes(module_under_test, monkeypatch):
monkeypatch.setattr(module_under_test, "db_dtypes", None)
with pytest.raises(ValueError, match="Please install the 'db-dtypes' package"):
module_under_test.verify_pandas_imports()
|
the-stack_0_6295 | #!/usr/bin/env python
"""
Created by howie.hu at 2021/4/10.
Description:常用调度函数
- 运行: 根目录执行,其中环境文件pro.env根据实际情况选择即可
- 命令: PIPENV_DOTENV_LOCATION=./pro.env pipenv run python src/schedule_task/all_tasks.py
Changelog: all notable changes to this file will be documented
"""
import time
from src.classifier import model_predict_factory
from src.collector.collect_factory import collect_factory
from src.config import Config
from src.databases import MongodbManager
from src.processor import fetch_keyword_list
from src.sender import send_factory
from src.utils.log import LOGGER
def update_wechat_doc():
"""
抓取最新的文章,然后持久化到数据库
:param wechat_list:
:return:
"""
# TODO 统一的地方进行配置管理
t_collect_type = "wechat_sougou"
t_collect_config = {
"wechat_list": Config.WECHAT_LIST,
"delta_time": 5,
# playwright
"spider_type": "playwright",
}
collect_factory(t_collect_type, t_collect_config)
def update_ads_tag(is_force=False):
"""
对订阅的文章进行广告标记
:param is_force: 是否强制重新判决
:return:
"""
mongo_base = MongodbManager.get_mongo_base(mongodb_config=Config.MONGODB_CONFIG)
coll = mongo_base.get_collection(coll_name="liuli_articles")
if is_force:
query = {}
else:
query = {"cos_model": {"$exists": False}}
# 查找没有被标记的文章,基于相似度模型进行判断
for each_data in coll.find(query):
doc_name = each_data["doc_name"]
doc_link = each_data["doc_link"]
doc_source_name = each_data["doc_source_name"]
doc_content = each_data["doc_content"]
doc_keywords = each_data.get("doc_keywords")
if not doc_keywords:
keyword_list = fetch_keyword_list(doc_content)
doc_keywords = " ".join(keyword_list)
each_data["doc_keywords"] = doc_keywords
# 基于余弦相似度
cos_model_resp = model_predict_factory(
model_name="cos",
model_path="",
input_dict={"text": doc_name + doc_keywords, "cos_value": Config.COS_VALUE},
# input_dict={"text": doc_name, "cos_value": Config.COS_VALUE},
).to_dict()
each_data["cos_model"] = cos_model_resp
if cos_model_resp["result"] == 1:
LOGGER.info(
f"[{doc_source_name}] {doc_name} 被识别为广告[{cos_model_resp['probability']}],链接为:{each_data['doc_link']}"
)
coll.update_one(
filter={"doc_id": each_data["doc_id"]},
update={"$set": each_data},
upsert=True,
)
def send_doc():
"""
对文章进行分发
:return:
"""
if Config.SENDER_LIST:
# 是否启用分发器
mongo_base = MongodbManager.get_mongo_base(mongodb_config=Config.MONGODB_CONFIG)
coll = mongo_base.get_collection(coll_name="liuli_articles")
cur_ts = time.time()
filter_dict = {
# 时间范围,除第一次外后面其实可以去掉
"doc_ts": {"$gte": cur_ts - (2 * 24 * 60 * 60), "$lte": cur_ts},
# 至少打上一个模型标签
"cos_model": {"$exists": True},
}
# 查找所有可分发文章
for each_data in coll.find(filter_dict):
# 分别分发给各个目标
for send_type in Config.SENDER_LIST:
# 暂时固定,测试
send_config = {}
each_data["doc_cus_des"] = "🤓非广告"
cos_model_resp = each_data["cos_model"]
if cos_model_resp["result"] == 1:
# 广告标记
each_data[
"doc_cus_des"
] = f"👿广告[概率:{cos_model_resp['probability']}]"
send_factory(
send_type=send_type, send_config=send_config, send_data=each_data
)
else:
LOGGER.info("未配置分发器!")
if __name__ == "__main__":
# 第一次启动请执行
# update_wechat_doc()
# 每次强制重新打标签
# update_ads_tag(is_force=False)
send_doc()
|
the-stack_0_6296 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the "theanolm train" command.
"""
import sys
import mmap
import logging
import h5py
import numpy
import theano
from theanolm import Vocabulary, Architecture, Network
from theanolm.backend import TextFileType, get_default_device
from theanolm.parsing import LinearBatchIterator
from theanolm.training import Trainer, create_optimizer, CrossEntropyCost, \
NCECost, BlackoutCost
from theanolm.scoring import TextScorer
from theanolm.vocabulary import compute_word_counts
def add_arguments(parser):
"""Specifies the command line arguments supported by the "theanolm train"
command.
:type parser: argparse.ArgumentParser
:param parser: a command line argument parser
"""
argument_group = parser.add_argument_group("data")
argument_group.add_argument(
'model_path', metavar='MODEL-FILE', type=str,
help='path where the best model state will be saved in HDF5 binary '
'data format')
argument_group.add_argument(
'--training-set', metavar='FILE', type=TextFileType('r'), nargs='+',
required=True,
help='text files containing training data (UTF-8, one sentence per '
'line, assumed to be compressed if the name ends in ".gz")')
argument_group.add_argument(
'--validation-file', metavar='VALID-FILE', type=TextFileType('r'),
default=None,
help='text file containing validation data for early stopping (UTF-8, '
'one sentence per line, assumed to be compressed if the name ends '
'in ".gz")')
argument_group = parser.add_argument_group("vocabulary")
argument_group.add_argument(
'--vocabulary', metavar='FILE', type=str, default=None,
help='word or class vocabulary to be used in the neural network input '
'and output, in the format specified by the --vocabulary-format '
'argument (UTF-8 text, default is to use all the words from the '
'training data)')
argument_group.add_argument(
'--vocabulary-format', metavar='FORMAT', type=str, default='words',
choices=['words', 'classes', 'srilm-classes'],
help='format of the file specified with --vocabulary argument, one of '
'"words" (one word per line, default), "classes" (word and class '
'ID per line), "srilm-classes" (class name, membership '
'probability, and word per line)')
argument_group.add_argument(
'--num-classes', metavar='N', type=int, default=None,
help='generate N classes using a simple word frequency based algorithm '
'when --vocabulary argument is not given (default is to not use '
'word classes)')
argument_group = parser.add_argument_group("network architecture")
argument_group.add_argument(
'--architecture', metavar='FILE', type=str, default='lstm300',
help='path to neural network architecture description, or a standard '
'architecture name, "lstm300" or "lstm1500" (default "lstm300")')
argument_group = parser.add_argument_group("training process")
argument_group.add_argument(
'--sampling', metavar='FRACTION', type=float, nargs='*', default=[],
help='randomly sample only FRACTION of each training file on each '
'epoch (list the fractions in the same order as the training '
'files)')
argument_group.add_argument(
'--sequence-length', metavar='N', type=int, default=100,
help='ignore sentences longer than N words (default 100)')
argument_group.add_argument(
'--batch-size', metavar='N', type=int, default=16,
help='each mini-batch will contain N sentences (default 16)')
argument_group.add_argument(
'--validation-frequency', metavar='N', type=int, default='5',
help='cross-validate for reducing learning rate or early stopping N '
'times per training epoch (default 5)')
argument_group.add_argument(
'--patience', metavar='N', type=int, default=4,
help='allow perplexity to increase N consecutive cross-validations, '
'before decreasing learning rate; if less than zero, never '
'decrease learning rate (default 4)')
argument_group.add_argument(
'--random-seed', metavar='N', type=int, default=None,
help='seed to initialize the random state (default is to seed from a '
'random source provided by the oprating system)')
argument_group = parser.add_argument_group("optimization")
argument_group.add_argument(
'--optimization-method', metavar='NAME', type=str, default='adagrad',
choices=['sgd', 'nesterov', 'adagrad', 'adadelta', 'rmsprop-sgd',
'rmsprop-nesterov', 'adam'],
help='optimization method, one of "sgd", "nesterov", "adagrad", '
'"adadelta", "rmsprop-sgd", "rmsprop-nesterov", "adam" '
'(default "adagrad")')
argument_group.add_argument(
'--learning-rate', metavar='ALPHA', type=float, default=0.1,
help='initial learning rate (default 0.1)')
argument_group.add_argument(
'--l1-regularization', metavar='LAMBDA', type=float, default=None,
help='add L1 regularization term with weight LAMBDA to the cost')
argument_group.add_argument(
'--l2-regularization', metavar='LAMBDA', type=float, default=None,
help='add L2 regularization term with weight LAMBDA to the cost')
argument_group.add_argument(
'--momentum', metavar='BETA', type=float, default=0.9,
help='momentum coefficient for momentum optimization methods (default '
'0.9)')
argument_group.add_argument(
'--gradient-decay-rate', metavar='GAMMA', type=float, default=0.9,
help='geometric rate for averaging gradients (default 0.9)')
argument_group.add_argument(
'--sqr-gradient-decay-rate', metavar='GAMMA', type=float, default=0.999,
help='geometric rate for averaging squared gradients in Adam optimizer '
'(default 0.999)')
argument_group.add_argument(
'--numerical-stability-term', metavar='EPSILON', type=float,
default=1e-6,
help='a value that is used to prevent instability when dividing by '
'very small numbers (default 1e-6)')
argument_group.add_argument(
'--gradient-normalization', metavar='THRESHOLD', type=float,
default=5,
help='scale down the gradients if necessary to make sure their norm '
'(normalized by mini-batch size) will not exceed THRESHOLD '
'(default 5)')
argument_group.add_argument(
'--cost', metavar='NAME', type=str, default='cross-entropy',
choices=['cross-entropy', 'nce', 'blackout'],
help='cost function, one of "cross-entropy" (default), "nce" '
'(noise-contrastive estimation), or "blackout"')
argument_group.add_argument(
'--num-noise-samples', metavar='K', type=int, default=5,
help='sampling based costs sample K noise words per one training word '
'(default 5)')
argument_group.add_argument(
'--noise-distribution', metavar='DIST', type=str, default='uniform',
choices=['uniform', 'log-uniform', 'unigram'],
help='sample noise from DIST; one of "uniform" (default, but less '
'accurate), "log-uniform" (the vocabulary should be ordered by '
'decreasing frequency), "unigram" (unigram distribution of words '
'in training data, slow)')
argument_group.add_argument(
'--noise-dampening', metavar='ALPHA', type=float, default=0.5,
help='the empirical unigram distribution is raised to the power ALPHA '
'before sampling noise words; 0.0 corresponds to the uniform '
'distribution and 1.0 corresponds to the unigram distribution '
'(only applicable with --noise-distribution=unigram, default 0.5)')
argument_group.add_argument(
'--noise-sharing', metavar='SHARING', type=str, default=None,
choices=['seq', 'batch', None],
help='can be "seq" for sharing noise samples between mini-batch '
'sequences, or "batch" for sharing noise samples across einter '
'mini-batch for improved speed (default is no sharing, which is '
'very slow)')
argument_group.add_argument(
'--exclude-unk', action="store_true",
help="exclude <unk> tokens from cost and perplexity computations")
argument_group.add_argument(
'--weights', metavar='LAMBDA', type=float, nargs='*', default=[],
help='scale a mini-batch update by LAMBDA if the data is from the '
'corresponding training file (list the weights in the same order '
'as the training files)')
argument_group = parser.add_argument_group("early stopping")
argument_group.add_argument(
'--stopping-criterion', metavar='NAME', type=str,
default='annealing-count',
choices=['epoch-count', 'no-improvement', 'annealing-count'],
help='selects a criterion for early-stopping, one of "epoch-count" '
'(fixed number of epochs), "no-improvement" (no improvement since '
'learning rate was decreased), "annealing-count" (default, '
'learning rate is decreased a fixed number of times)')
argument_group.add_argument(
'--min-epochs', metavar='N', type=int, default=1,
help='perform at least N training epochs (default 1)')
argument_group.add_argument(
'--max-epochs', metavar='N', type=int, default=100,
help='perform at most N training epochs (default 100)')
argument_group.add_argument(
'--max-annealing-count', metavar='N', type=int, default=0,
help='when using annealing-count stopping criterion, continue training '
'after decreasing learning rate at most N times (default 0)')
argument_group = parser.add_argument_group("configuration")
argument_group.add_argument(
'--default-device', metavar='DEVICE', type=str, default=None,
help='when multiple GPUs are present, use DEVICE as default')
argument_group = parser.add_argument_group("logging and debugging")
argument_group.add_argument(
'--log-file', metavar='FILE', type=str, default='-',
help='path where to write log file (default is standard output)')
argument_group.add_argument(
'--log-level', metavar='LEVEL', type=str, default='info',
choices=['debug', 'info', 'warn'],
help='minimum level of events to log, one of "debug", "info", "warn" '
'(default "info")')
argument_group.add_argument(
'--log-interval', metavar='N', type=int, default=1000,
help='print statistics of every Nth mini-batch update; quiet if less '
'than one (default 1000)')
argument_group.add_argument(
'--debug', action="store_true",
help='use test values to get better error messages from Theano')
argument_group.add_argument(
'--print-graph', action="store_true",
help='print Theano computation graph')
argument_group.add_argument(
'--profile', action="store_true",
help='enable profiling Theano functions')
argument_group.add_argument(
'--load-and-train', action="store_true",
help='load the weight matrices from the MODEL and retrain')
def _read_vocabulary(args, state):
"""If ``state`` contains data, reads the vocabulary from the HDF5 state.
Otherwise reads a vocabulary file or constructs the vocabulary from the
training set and writes it to the HDF5 state.
If the state does not contain data and --vocabulary argument is given, reads
the vocabulary from the file given after the argument. The rest of the words
in the training set will be added as out-of-shortlist words.
If the state does not contain data and no vocabulary is given, constructs a
vocabulary that contains all the training set words. In that case,
--num-classes argument can be used to control the number of classes.
:type args: argparse.Namespace
:param args: a collection of command line arguments
:type state: hdf5.File
:param state: HDF5 file where the vocabulary should be saved
:rtype: Vocabulary
:returns: the created vocabulary
"""
if state.keys():
logging.info("Reading vocabulary from existing network state.")
result = Vocabulary.from_state(state)
if not result.has_unigram_probs():
# This is for backward compatibility. Remove at some point.
logging.info("Computing unigram word probabilities from training "
"set.")
word_counts = compute_word_counts(args.training_set)
shortlist_words = list(result.id_to_word)
shortlist_set = set(shortlist_words)
oos_words = [x for x in word_counts.keys()
if x not in shortlist_set]
result.id_to_word = numpy.asarray(shortlist_words + oos_words,
dtype=object)
result.word_to_id = {word: word_id
for word_id, word in enumerate(result.id_to_word)}
result.compute_probs(word_counts, update_class_probs=False)
result.get_state(state)
elif args.vocabulary is None:
logging.info("Constructing vocabulary from training set.")
word_counts = compute_word_counts(args.training_set)
result = Vocabulary.from_word_counts(word_counts, args.num_classes)
result.get_state(state)
else:
logging.info("Reading vocabulary from %s.", args.vocabulary)
word_counts = compute_word_counts(args.training_set)
oos_words = word_counts.keys()
with open(args.vocabulary, 'rt', encoding='utf-8') as vocab_file:
result = Vocabulary.from_file(vocab_file,
args.vocabulary_format,
oos_words=oos_words)
if args.vocabulary_format == 'classes':
logging.info("Computing class membership probabilities and unigram "
"probabilities for out-of-shortlist words.")
update_class_probs = True
else:
logging.info("Computing unigram probabilities for out-of-shortlist "
"words.")
update_class_probs = False
result.compute_probs(word_counts,
update_class_probs=update_class_probs)
result.get_state(state)
logging.info("Number of words in vocabulary: %d",
result.num_words())
logging.info("Number of words in shortlist: %d",
result.num_shortlist_words())
logging.info("Number of word classes: %d",
result.num_classes())
return result
def log_options(training_options, optimization_options, args):
"""Writes the command line arguments to debug log.
"""
logging.debug("Training options:")
for option_name, option_value in sorted(training_options.items()):
logging.debug(" %s: %s", option_name, str(option_value))
logging.debug("Optimization options:")
for option_name, option_value in sorted(optimization_options.items()):
logging.debug(" %s=%s", option_name, str(option_value))
logging.debug(" cost_function=%s", args.cost)
logging.debug(" noise_distribution=%s", args.noise_distribution)
logging.debug(" noise_dampening=%d", args.noise_dampening)
logging.debug(" noise_sharing=%s", args.noise_sharing
if args.noise_sharing is not None
else 'no')
logging.debug(" exclude_unk=%s", 'yes' if args.exclude_unk else 'no')
logging.debug(" l1_regularization=%f", args.l1_regularization
if args.l1_regularization is not None
else 0.0)
logging.debug(" l2_regularization=%f", args.l2_regularization
if args.l2_regularization is not None
else 0.0)
logging.debug("Data sampling: %s", str(numpy.array(args.sampling)))
def train(args):
"""A function that performs the "theanolm train" command.
:type args: argparse.Namespace
:param args: a collection of command line arguments
"""
numpy.random.seed(args.random_seed)
log_file = args.log_file
log_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(log_level, int):
print("Invalid logging level requested:", args.log_level)
sys.exit(1)
log_format = '%(asctime)s %(funcName)s: %(message)s'
if args.log_file == '-':
logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level)
else:
logging.basicConfig(filename=log_file, format=log_format, level=log_level)
if args.debug:
theano.config.compute_test_value = 'warn'
logging.info("Enabled computing test values for tensor variables.")
logging.warning("GpuArray backend will fail random number generation!")
else:
theano.config.compute_test_value = 'off'
theano.config.profile = args.profile
theano.config.profile_memory = args.profile
with h5py.File(args.model_path, 'a', driver='core') as state:
vocabulary = _read_vocabulary(args, state)
if args.num_noise_samples > vocabulary.num_classes():
print("Number of noise samples ({}) is larger than the number of "
"classes. This doesn't make sense and would cause unigram "
"sampling to fail.".format(args.num_noise_samples))
sys.exit(1)
num_training_files = len(args.training_set)
if len(args.weights) > num_training_files:
print("You specified more weights than training files.")
sys.exit(1)
weights = numpy.ones(num_training_files).astype(theano.config.floatX)
for index, weight in enumerate(args.weights):
weights[index] = weight
if len(args.sampling) > num_training_files:
print("You specified more sampling coefficients than training "
"files.")
sys.exit(1)
training_options = {
'batch_size': args.batch_size,
'sequence_length': args.sequence_length,
'validation_frequency': args.validation_frequency,
'patience': args.patience,
'stopping_criterion': args.stopping_criterion,
'max_epochs': args.max_epochs,
'min_epochs': args.min_epochs,
'max_annealing_count': args.max_annealing_count
}
optimization_options = {
'method': args.optimization_method,
'epsilon': args.numerical_stability_term,
'gradient_decay_rate': args.gradient_decay_rate,
'sqr_gradient_decay_rate': args.sqr_gradient_decay_rate,
'learning_rate': args.learning_rate,
'weights': weights,
'momentum': args.momentum,
'max_gradient_norm': args.gradient_normalization,
'num_noise_samples': args.num_noise_samples,
'noise_sharing': args.noise_sharing,
}
log_options(training_options, optimization_options, args)
logging.info("Creating trainer.")
trainer = Trainer(training_options, vocabulary, args.training_set,
args.sampling)
trainer.set_logging(args.log_interval)
logging.info("Building neural network.")
if args.architecture == 'lstm300' or args.architecture == 'lstm1500':
architecture = Architecture.from_package(args.architecture)
else:
with open(args.architecture, 'rt', encoding='utf-8') as arch_file:
architecture = Architecture.from_description(arch_file)
default_device = get_default_device(args.default_device)
network = Network(architecture, vocabulary, trainer.class_prior_probs,
default_device=default_device,
profile=args.profile)
network.set_sampling(args.noise_distribution, args.noise_dampening,
args.noise_sharing)
logging.info("Building optimizer.")
exclude_id = vocabulary.word_to_id['<unk>'] if args.exclude_unk \
else None
epsilon = args.numerical_stability_term
if args.cost == 'cross-entropy':
cost_function = CrossEntropyCost(network, exclude_id,
args.l1_regularization,
args.l2_regularization, epsilon)
elif args.cost == 'nce':
cost_function = NCECost(network, exclude_id, args.l1_regularization,
args.l2_regularization, epsilon)
else:
assert args.cost == 'blackout'
cost_function = BlackoutCost(network, exclude_id,
args.l1_regularization,
args.l2_regularization, epsilon)
try:
optimizer = create_optimizer(optimization_options, network,
cost_function, profile=args.profile)
except theano.gradient.DisconnectedInputError as e:
print("Cannot train the neural network because some of the "
"parameters are disconnected from the output. Make sure all "
"the layers are correctly connected in the network "
"architecture. The error message was: `{}´".format(e))
if args.print_graph:
print("Cost function computation graph:")
theano.printing.debugprint(optimizer.gradient_update_function)
trainer.initialize(network, state, optimizer, args.load_and_train)
if args.validation_file is not None:
logging.info("Building text scorer for cross-validation.")
scorer = TextScorer(network, use_shortlist=True,
exclude_unk=args.exclude_unk,
profile=args.profile)
logging.info("Validation text: %s", args.validation_file.name)
validation_mmap = mmap.mmap(args.validation_file.fileno(),
0,
prot=mmap.PROT_READ)
validation_iter = \
LinearBatchIterator(validation_mmap,
vocabulary,
batch_size=args.batch_size,
max_sequence_length=args.sequence_length,
map_oos_to_unk=False)
trainer.set_validation(validation_iter, scorer)
else:
logging.info("Cross-validation will not be performed.")
validation_iter = None
logging.info("Training neural network.")
trainer.train()
if 'layers' not in state.keys():
print("The model has not been trained. No cross-validations were "
"performed or training did not improve the model.")
elif validation_iter is not None:
network.set_state(state)
perplexity = scorer.compute_perplexity(validation_iter)
print("Best validation set perplexity:", perplexity)
|
the-stack_0_6300 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from symbol.resnet import *
from symbol.config import config
from symbol.processing import bbox_pred, clip_boxes, nms
import face_embedding
from mapr_streams_python import Consumer, KafkaError, Producer
import numpy as np
import cv2, os, json, time, sys, pickle
import mxnet as mx
import argparse, random, sklearn
import tensorflow as tf
from scipy import misc
from sklearn.decomposition import PCA
from time import sleep
from easydict import EasyDict as edict
from mtcnn_detector import MtcnnDetector
import face_image, face_preprocess
from flask import Flask, Response
app = Flask(__name__)
@app.route('/')
def index():
return Response(kafkastream(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def ch_dev(arg_params, aux_params, ctx):
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def resize(im, target_size, max_size):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale
def get_face_embedding(filename, arg_params, aux_params, sym, model, ctx):
img_orig = cv2.imread(filename)
img_orig = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
img, scale = resize(img_orig.copy(), 600, 1000)
im_info = np.array([[img.shape[0], img.shape[1], scale]], dtype=np.float32) # (h, w, scale)
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
arg_params["data"] = mx.nd.array(img, ctx)
arg_params["im_info"] = mx.nd.array(im_info, ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
output_dict = {name: nd for name, nd in zip(sym.list_outputs(), exe.outputs)}
rois = output_dict['rpn_rois_output'].asnumpy()[:, 1:] # first column is index
scores = output_dict['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output_dict['bbox_pred_reshape_output'].asnumpy()[0]
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, (im_info[0][0], im_info[0][1]))
cls_boxes = pred_boxes[:, 4:8]
cls_scores = scores[:, 1]
keep = np.where(cls_scores >0.6)[0]
cls_boxes = cls_boxes[keep, :]
cls_scores = cls_scores[keep]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets.astype(np.float32), 0.3)
dets = dets[keep, :]
bbox = dets[0, :4]
roundfunc = lambda t: int(round(t/scale))
vfunc = np.vectorize(roundfunc)
bbox = vfunc(bbox)
f_vector, jpeg = model.get_feature(img_orig, bbox, None)
fT = f_vector.T
return fT
def kafkastream():
if args.gpuid >= 0:
ctx = mx.gpu(args.gpuid)
else:
ctx = mx.cpu()
_, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0)
arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
sym = resnet_50(num_class=2)
model = face_embedding.FaceModel(args.gpuid)
f1T = get_face_embedding(args.filename, arg_params, aux_params, sym, model, ctx)
c = Consumer({'group.id': args.groupid,
'default.topic.config': {'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false'}})
c.subscribe([args.readstream+':'+args.readtopic])
running = True
p = Producer({'streams.producer.default.stream': args.writestream})
while running:
msg = c.poll(timeout=0)
if msg is None: continue
if not msg.error():
pickle_vector = pickle.loads(msg.value())
nparr = np.fromstring(pickle_vector[0], np.uint8)
img_orig = cv2.imdecode(nparr, 1)
bbox_vector = pickle_vector[1]
print(len(bbox_vector))
embedding_vector = pickle_vector[2]
if len(embedding_vector) > 0:
sim_vector = [np.dot(f, f1T) for f in embedding_vector]
idx = sim_vector.index(max(sim_vector))
bbox = bbox_vector[idx]
sim = sim_vector[idx]
if sim > args.threshold:
img = cv2.cvtColor(img_orig, cv2.COLOR_RGB2BGR)
cv2.rectangle(img, (int(round(bbox[0])), int(round(bbox[1]))),
(int(round(bbox[2])), int(round(bbox[3]))), (0, 255, 0), 2)
ret, jpeg = cv2.imencode('.png', img)
bytecode = jpeg.tobytes()
time.sleep(args.timeout)
yield (b'--frame\r\n'
b'Content-Type: image/png\r\n\r\n' + bytecode + b'\r\n\r\n')
if args.writetostream:
p.produce(args.writetopic, jpeg.tostring())
print(args.writetopic)
elif msg.error().code() != KafkaError._PARTITION_EOF:
print(msg.error())
running = False
c.close()
p.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='mapr consumer settings')
parser.add_argument('--groupid', default='dong001', help='mapr consumer to read from')
parser.add_argument('--gpuid', default='-1', type=int, help='')
parser.add_argument('--port', default='5013', type=int, help='')
parser.add_argument('--threshold', default='0.3', type=float, help='')
parser.add_argument('--readstream', default='/tmp/processedvideostream', help='')
parser.add_argument('--writestream', default='/tmp/identifiedstream', help='')
parser.add_argument('--timeout', default='0.3', type=float, help='')
parser.add_argument('--writetostream', default='0', type=int, help='')
parser.add_argument('--writetopic', default='sam', help='topic to write to')
parser.add_argument('--readtopic', default='topic1', help='topic to write to')
parser.add_argument('--filename', default='sam_.jpg', help='')
args = parser.parse_args()
app.run(host='0.0.0.0', port=args.port, debug=True)
|
the-stack_0_6301 | # additional transforms for okutama-action dataset
import random
from PIL import Image, ImageOps
class GroupRandomVerticalFlip(object):
"""
Randomly vertical flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, is_flow=False):
self.is_flow = is_flow
def __call__(self, img_group, is_flow=False):
v = random.random()
if v < 0.5:
ret = [img.transpose(Image.FLIP_TOP_BOTTOM) for img in img_group]
if self.is_flow:
for i in range(1, len(ret), 2):
# invert y_flow pixel values when flipping
ret[i] = ImageOps.invert(ret[i])
return ret
else:
return img_group
|
the-stack_0_6303 | from flask import Flask, render_template, request, json
from module import utils
from os import remove
import face_recognition
app = Flask(
__name__,
static_url_path="",
static_folder="static",
template_folder="template"
)
@app.route("/", methods=["GET","POST"])
def index():
if request.method == "GET":
return render_template("index.html")
else:
encoding = []
local = "template/media/ori.jpg"
path = utils.b64_img(request.form['image'])
for i in [local, path]:
encoding.append(face_recognition.face_encodings(face_recognition.load_image_file(i))[0])
remove(path)
if face_recognition.compare_faces([encoding[0]], encoding[1])[0]:
result = "Wajah cocok"
else:
result = "Wajah tidak cocok"
return app.response_class(
response=json.dumps({
'status': result
}),
mimetype='application/json'
)
if __name__ == "__main__":
app.run() |
the-stack_0_6304 | """Script to produce catalogues for use in stacking analysis.
The catalogues themselves are randomly produced for the purpose of trialing
the code. Modification of variable n can produces a catalogue with an
arbitrary number of sources.
"""
import numpy as np
import os
import logging
import random
import zlib
from flarestack.shared import catalogue_dir
cat_dtype = [
("ra_rad", np.float), ("dec_rad", np.float),
("base_weight", np.float),
("injection_weight_modifier", np.float),
("ref_time_mjd", np.float),
("start_time_mjd", np.float),
("end_time_mjd", np.float),
('distance_mpc', np.float), ('source_name', 'a30'),
]
def single_source(sindec, ra_rad=np.pi):
"""Produces a catalogue with a single source_path.
:param sindec: Sin(Declination) of Source
:param ra: Right Ascension in radians
:return: Source Array
"""
sources = np.empty(
1, dtype=cat_dtype)
ref_time = 55800.4164699
sources['ra_rad'] = np.array([ra_rad])
sources['dec_rad'] = np.arcsin(sindec)
sources['base_weight'] = np.array([1.])
sources['injection_weight_modifier'] = np.array([1.])
sources['distance_mpc'] = np.array([1.0])
sources['ref_time_mjd'] = (np.array([ref_time]))
sources['start_time_mjd'] = (np.array([ref_time - 50]))
sources['end_time_mjd'] = (np.array([ref_time + 100]))
sources['source_name'] = 'PS_dec=' + str(sindec)
return sources
def build_ps_cat_name(sindec):
return catalogue_dir + "single_source/sindec_" + '{0:.2f}'.format(sindec)\
+ ".npy"
def build_ps_stack_cat_name(sindecs):
return f"{catalogue_dir}multi_source/{zlib.adler32(str(list(sindecs)).encode())}.npy"
def make_single_source(sindec):
cat = single_source(sindec)
save_path = build_ps_cat_name(sindec)
try:
os.makedirs(os.path.dirname(save_path))
except FileExistsError:
pass
logging.info("Saving to {0}".format(save_path))
np.save(save_path, cat)
def ps_catalogue_name(sindec):
name = build_ps_cat_name(sindec)
if not os.path.isfile(name):
make_single_source(sindec)
return name
def make_stacked_source(sindecs):
cat = []
for sindec in sindecs:
ra_rad = random.random() ** 2 * np.pi
cat.append(single_source(sindec, ra_rad=ra_rad))
cat = np.array(cat, dtype=cat[0].dtype).T[0]
save_path = build_ps_stack_cat_name(sindecs)
try:
os.makedirs(os.path.dirname(save_path))
except FileExistsError:
pass
logging.info("Saving to {0}".format(save_path))
np.save(save_path, cat)
def ps_stack_catalogue_name(*args):
name = build_ps_stack_cat_name(args)
if not os.path.isfile(name):
make_stacked_source(args)
return name
def make_single_sources():
"""Makes single-source catalogues for a variety of sindec intervals."""
logging.info("Making single-source catalogues for the following sin(declinations):")
sindecs = np.linspace(1.00, -1.00, 41)
logging.info(sindecs)
try:
os.makedirs(os.path.dirname(ps_catalogue_name(0.0)))
except OSError:
pass
for sindec in sindecs:
make_single_source(sindec)
logging.info("Single Source catalogues created!")
def custom_sources(name, ra, dec, weight, distance,
injection_modifier=None, ref_time=np.nan,
start_time=np.nan, end_time=np.nan):
"""Creates a catalogue array,
:param name: Source Name
:param ra: Right Ascension (Degrees)
:param dec: Declination (Degrees)
:param weight: Relative Weights
:param distance: Distance to source (a.u.)
:param ref_time: Reference Time (MJD)
:param start_time: Start Time for window (MJD)
:param end_time: End Time for window (MJD)
:return: Catalogue Array
"""
sources = np.empty(np.array([ra]).__len__(), dtype=cat_dtype)
sources['ra_rad'] = np.deg2rad(np.array([ra]))
sources['dec_rad'] = np.deg2rad(np.array([dec]))
# If some sources are to be brighter than others, a non-uniform weight
# array can be passed.
sources['base_weight'] = np.array([weight])
# The source distance can be provided, in arbitrary units. The injector
# and reconstructor will weight sources according to 1/ (distance ^ 2).
sources['distance_mpc'] = np.array([distance])
# The sources can have a modified injection weight. This means the
# weights used in the likelihood will not match the weights used in the
# injection stage
if injection_modifier is not None:
sources["injection_weight_modifier"] = np.array(injection_modifier)
else:
sources["injection_weight_modifier"] = np.ones_like(ra)
# The source reference time can be arbitrarily defined, for example as
# the discovery date or the date of lightcurve peak. It is important that
# this is consistently defined between sources. Box Time PDFs can be defined
# relative to this point.
sources['ref_time_mjd'] = (np.array([ref_time]))
# The source csan also be assigned fixed start and end times. Fixed Box
# Time PDFs can be defined relative to these values. This allows for the
# Time PDF duration to vary between sources.
sources['start_time_mjd'] = (np.array([start_time]))
sources['end_time_mjd'] = (np.array([end_time]))
sources['source_name'] = np.array([name])
return sources
|
the-stack_0_6305 | import json
from herbieapp.services import logging, SchemaRegistry, SchemaPackage
from herbieapp.models import Schema
class SchemaImporter:
def __init__(self):
self._logger = logging.getLogger(__name__)
self._schema_package = SchemaPackage()
def import_schemas(self):
schema_list = self._schema_package.get_all_json_schemas()
if len(schema_list) is 0:
self._logger.error('No schemas defined!')
return 0
self._logger.info('Schema import started!')
for schema in schema_list:
schema_data = json.loads(schema)
self._create_update_json_schema(schema_data['business_entity'], schema_data['version'], schema_data['data'])
self._logger.info('Schemas imported successfully!')
return 0
def _create_update_json_schema(self, business_entity: str, version: str, data: str):
schema = Schema.objects.filter(name=business_entity, version=version)
reg = SchemaRegistry()
reg.find_schema(business_entity, version)
schema_data = json.loads(data) if data != '' else {}
if schema.exists() is False:
json_schema = Schema()
json_schema.name = business_entity
json_schema.version = version
json_schema.content = schema_data
json_schema.save()
else:
schema.update(name=business_entity, version=version, content=schema_data)
|
the-stack_0_6306 | import numpy as np
from opytimizer.optimizers.science import eo
from opytimizer.spaces import search
def test_eo_params():
params = {
'a1': 2.0,
'a2': 1.0,
'GP': 0.5,
'V': 1.0
}
new_eo = eo.EO(params=params)
assert new_eo.a1 == 2.0
assert new_eo.a2 == 1.0
assert new_eo.GP == 0.5
assert new_eo.V == 1.0
def test_eo_params_setter():
new_eo = eo.EO()
try:
new_eo.a1 = 'a'
except:
new_eo.a1 = 2.0
try:
new_eo.a1 = -1
except:
new_eo.a1 = 2.0
assert new_eo.a1 == 2.0
try:
new_eo.a2 = 'b'
except:
new_eo.a2 = 1.0
try:
new_eo.a2 = -1
except:
new_eo.a2 = 1.0
assert new_eo.a2 == 1.0
try:
new_eo.GP = 'c'
except:
new_eo.GP = 0.5
try:
new_eo.GP = -1
except:
new_eo.GP = 0.5
assert new_eo.GP == 0.5
try:
new_eo.V = 'd'
except:
new_eo.V = 1.0
try:
new_eo.V = -1
except:
new_eo.V = 1.0
assert new_eo.V == 1.0
def test_eo_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
try:
new_eo.C = 1
except:
new_eo.C = []
assert new_eo.C == []
def test_eo_calculate_equilibrium():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
new_eo._calculate_equilibrium(search_space.agents)
def test_eo_average_concentration():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
C_avg = new_eo._average_concentration(square)
assert type(C_avg).__name__ == 'Agent'
def test_eo_update():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
new_eo.update(search_space, square, 1, 10)
|
the-stack_0_6307 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Base class for MIME specializations."""
__all__ = ['MIMEBase']
import email.policy
from email import message
class MIMEBase(message.Message):
"""Base class for MIME specializations."""
def __init__(self, _maintype, _subtype, *, policy=None, **_params):
"""This constructor adds a Content-Type: and a MIME-Version: header.
The Content-Type: header is taken from the _maintype and _subtype
arguments. Additional parameters for this header are taken from the
keyword arguments.
"""
if policy is None:
policy = email.policy.compat32
message.Message.__init__(self, policy=policy)
ctype = '%s/%s' % (_maintype, _subtype)
self.add_header('Content-Type', ctype, **_params)
self['MIME-Version'] = '1.0'
|
the-stack_0_6309 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import shutil
import tempfile
import subprocess
from typing import List, Any, Union, Optional, Dict
from pathlib import Path
class TemporaryDirectoryCopy(tempfile.TemporaryDirectory): # type: ignore
"""Creates a full copy of a directory inside a temporary directory
This class can be used as TemporaryDirectory but:
- the created copy path is available through the copyname attribute
- the contextmanager returns the clean copy path
- the directory where the temporary directory will be created
can be controlled through the CLEAN_COPY_DIRECTORY environment
variable
"""
key = "CLEAN_COPY_DIRECTORY"
@classmethod
def set_clean_copy_environment_variable(cls, directory: Union[Path, str]) -> None:
"""Sets the CLEAN_COPY_DIRECTORY environment variable in
order for subsequent calls to use this directory as base for the
copies.
"""
assert Path(directory).exists(), "Directory does not exist"
os.environ[cls.key] = str(directory)
# pylint: disable=redefined-builtin
def __init__(self, source: Union[Path, str], dir: Optional[Union[Path, str]] = None) -> None:
if dir is None:
dir = os.environ.get(self.key, None)
super().__init__(prefix="tmp_clean_copy_", dir=dir)
self.copyname = Path(self.name) / Path(source).name
shutil.copytree(str(source), str(self.copyname))
def __enter__(self) -> Path:
super().__enter__()
return self.copyname
class FailedJobError(RuntimeError):
"""Job failed during processing
"""
class CommandFunction:
"""Wraps a command as a function in order to make sure it goes through the
pipeline and notify when it is finished.
The output is a string containing everything that has been sent to stdout
Parameters
----------
command: list
command to run, as a list
verbose: bool
prints the command and stdout at runtime
cwd: Path/str
path to the location where the command must run from
Returns
-------
str
Everything that has been sent to stdout
"""
def __init__(self, command: List[str], verbose: bool = False, cwd: Optional[Union[str, Path]] = None,
env: Optional[Dict[str, str]] = None) -> None:
if not isinstance(command, list):
raise TypeError("The command must be provided as a list")
self.command = command
self.verbose = verbose
self.cwd = None if cwd is None else str(cwd)
self.env = env
def __call__(self, *args: Any, **kwargs: Any) -> str:
"""Call the cammand line with addidional arguments
The keyword arguments will be sent as --{key}={val}
The logs are bufferized. They will be printed if the job fails, or sent as output of the function
Errors are provided with the internal stderr
"""
# TODO make the following command more robust (probably fails in multiple cases)
full_command = self.command + [str(x) for x in args] + ["--{}={}".format(x, y) for x, y in kwargs.items()]
if self.verbose:
print(f"The following command is sent: {full_command}")
outlines: List[str] = []
with subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, cwd=self.cwd, env=self.env) as process:
try:
for line in iter(process.stdout.readline, b''):
if not line:
break
outlines.append(line.decode().strip())
if self.verbose:
print(outlines[-1], flush=True)
except Exception: # pylint: disable=broad-except
process.kill()
process.wait()
raise FailedJobError("Job got killed for an unknown reason.")
stderr = process.communicate()[1] # we already got stdout
stdout = "\n".join(outlines)
retcode = process.poll()
if stderr and (retcode or self.verbose):
print(stderr.decode(), file=sys.stderr)
if retcode:
subprocess_error = subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr)
raise FailedJobError(stderr.decode()) from subprocess_error
return stdout
|
the-stack_0_6310 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENT_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test that publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required to
access ingredients"""
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'test123'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredients_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_limited_to_user(self):
"""Test that ingredients are returned
for the authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENT_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipe(self):
"""Testing filtering ingredient by assigned recipe"""
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apple'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Turkey'
)
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_unique(self):
"""Test filtering ingredient by assigned return unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name="Cheese")
recipe1 = Recipe.objects.create(
title='Eggs benedict',
time_minutes=20,
price=12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
the-stack_0_6312 | from asyncio import Lock, create_task
from time import time
from pyrogram import filters
from pyrogram.types import Message
from wbb import BOT_ID, SUDOERS
from wbb.core.sections import bold, section, w
tasks = {}
TASKS_LOCK = Lock()
arrow = lambda x: (x.text if x else "") + "\n`→`"
def all_tasks():
return tasks
async def add_task(
taskFunc,
task_name,
*args,
**kwargs,
):
async with TASKS_LOCK:
global tasks
task_id = (list(tasks.keys())[-1] + 1) if tasks else 0
task = create_task(
taskFunc(*args, **kwargs),
name=task_name,
)
tasks[task_id] = task, int(time())
return task, task_id
async def rm_task(task_id=None):
global tasks
async with TASKS_LOCK:
for key, value in list(tasks.items()):
if value[0].done() or value[0].cancelled():
del tasks[key]
if (task_id is not None) and (task_id in tasks):
task = tasks[task_id][0]
if not task.done():
task.cancel()
del tasks[task_id]
async def _get_tasks_text():
await rm_task() # Clean completed tasks
if not tasks:
return f"{arrow('')} No pending task"
text = bold("Tasks") + "\n"
for i, task in enumerate(list(tasks.items())):
indent = w * 4
t, started = task[1]
elapsed = round(time() - started)
info = t._repr_info()
id = task[0]
text += section(
f"{indent}Task {i}",
body={
"Name": t.get_name(),
"Task ID": id,
"Status": info[0].capitalize(),
"Origin": info[2].split("/")[-1].replace(">", ""),
"Running since": f"{elapsed}s",
},
indent=8,
)
return text
|
the-stack_0_6313 | import subprocess
from text2speech.modules import TTS, TTSValidator
class ESpeakNG(TTS):
audio_ext = "wav"
def __init__(self, config=None):
config = config or {"lang": "en-us", "voice": "m1"}
super(ESpeakNG, self).__init__(config, ESpeakNGValidator(self),
ssml_tags=["speak", "say-as", "voice",
"audio", "prosody", "break",
"emphasis", "sub",
"tts:style", "p", "s",
"mark"])
@property
def gender(self):
return self.voice[0]
def modify_tag(self, tag):
"""Override to modify each supported ssml tag"""
if "%" in tag:
if "-" in tag:
val = tag.split("-")[1].split("%")[0]
tag = tag.replace("-", "").replace("%", "")
new_val = int(val) / 100
tag = tag.replace(val, new_val)
elif "+" in tag:
val = tag.split("+")[1].split("%")[0]
tag = tag.replace("+", "").replace("%", "")
new_val = int(val) / 100
tag = tag.replace(val, new_val)
return tag
def get_tts(self, sentence, wav_file):
subprocess.call(
['espeak-ng', '-m', "-w", wav_file, '-v', self.lang + '+' +
self.voice, sentence])
return wav_file, None
def describe_voices(self):
output = subprocess.check_output(["espeak-ng", "--voices"]).decode(
"utf-8")
voices = {}
for v in output.split("\n")[1:]:
if len(v.split()) < 3:
continue
_, lang_code = v.split()[:2]
voices[lang_code] = ["m1", "m2", "m3", "m4", "m5", "m6", "m7",
"f1", "f2", "f3", "f4", "f5", "croak",
"whisper"]
return voices
class ESpeakNGValidator(TTSValidator):
def __init__(self, tts):
super(ESpeakNGValidator, self).__init__(tts)
def validate_connection(self):
try:
subprocess.call(['espeak-ng', '--version'])
except:
raise Exception(
'ESpeak is not installed. Run: sudo apt-get install espeak-ng')
def get_tts_class(self):
return ESpeakNG
|
the-stack_0_6314 | import datetime
import json
import operator
import time
from typing import Any, Callable, Generator, List, Optional, Tuple, Union
from sqlalchemy import inspect
from wtforms import Form, ValidationError, fields, widgets
from sqladmin import widgets as sqladmin_widgets
from sqladmin.helpers import as_str
__all__ = [
"DateField",
"DateTimeField",
"JSONField",
"QuerySelectField",
"QuerySelectMultipleField",
"Select2Field",
"Select2TagsField",
"TimeField",
]
class DateField(fields.DateField):
"""
Add custom DatePickerWidget for data-format and data-date-format fields
"""
widget = sqladmin_widgets.DatePickerWidget()
class DateTimeField(fields.DateTimeField):
"""
Allows modifying the datetime format of a DateTimeField using form_args.
"""
widget = sqladmin_widgets.DateTimePickerWidget()
def __init__(
self,
label: str = None,
validators: list = None,
format: str = None,
**kwargs: Any,
) -> None:
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param format:
Format for text to date conversion. Defaults to '%Y-%m-%d %H:%M:%S'
:param kwargs:
Any additional parameters
"""
super().__init__(label, validators, **kwargs)
self.format = format or "%Y-%m-%d %H:%M:%S"
class TimeField(fields.Field):
"""
A text field which stores a `datetime.time` object.
Accepts time string in multiple formats: 20:10, 20:10:00, 10:00 am, 9:30pm, etc.
"""
widget = sqladmin_widgets.TimePickerWidget()
def __init__(
self,
label: str = None,
validators: list = None,
formats: List[str] = None,
default_format: str = None,
**kwargs: Any,
) -> None:
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param formats:
Supported time formats, as a enumerable.
:param default_format:
Default time format. Defaults to '%H:%M:%S'
:param kwargs:
Any additional parameters
"""
super().__init__(label, validators, **kwargs)
self.formats = formats or (
"%H:%M:%S",
"%H:%M",
"%I:%M:%S%p",
"%I:%M%p",
"%I:%M:%S %p",
"%I:%M %p",
)
self.default_format = default_format or "%H:%M:%S"
self.data: Optional[datetime.time]
def _value(self) -> str:
if self.raw_data:
return " ".join(self.raw_data)
elif self.data is not None:
return self.data.strftime(self.default_format)
else:
return ""
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
date_str = " ".join(valuelist)
if date_str.strip():
for format in self.formats:
try:
timetuple = time.strptime(date_str, format)
self.data = datetime.time(
timetuple.tm_hour, timetuple.tm_min, timetuple.tm_sec
)
return
except ValueError:
pass
raise ValueError("Invalid time format")
else:
self.data = None
class Select2Field(fields.SelectField):
"""
`Select2 <https://github.com/select2/select2>`_ styled select widget.
"""
widget = sqladmin_widgets.Select2Widget()
def __init__(
self,
label: str = None,
validators: list = None,
coerce: type = str,
choices: Union[list, Callable] = None,
allow_blank: bool = False,
blank_text: str = None,
**kwargs: Any,
) -> None:
super().__init__(label, validators, coerce, choices, **kwargs)
self.allow_blank = allow_blank
self.blank_text = blank_text or " "
def iter_choices(self) -> Generator[Tuple[str, str, bool], None, None]:
choices = self.choices or []
if self.allow_blank:
yield ("__None", self.blank_text, self.data is None)
for choice in choices:
if isinstance(choice, tuple):
yield (choice[0], choice[1], self.coerce(choice[0]) == self.data)
else:
yield (
choice.value,
choice.name,
self.coerce(choice.value) == self.data,
)
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
if valuelist[0] == "__None":
self.data = None
else:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext("Invalid Choice: could not coerce"))
def pre_validate(self, form: Form) -> None:
if self.allow_blank and self.data is None:
return
super().pre_validate(form)
class Select2TagsField(fields.StringField):
"""
`Select2 <https://github.com/select2/select2>`_ styled text field.
"""
widget = sqladmin_widgets.Select2TagsWidget()
def __init__(
self,
label: str = None,
validators: list = None,
save_as_list: bool = False,
coerce: type = str,
**kwargs: Any,
) -> None:
"""
Initialization
:param save_as_list:
If `True` then populate ``obj`` using list else string
"""
self.save_as_list = save_as_list
self.coerce = coerce
super().__init__(label, validators, **kwargs)
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
if self.save_as_list:
self.data = [
self.coerce(v.strip()) for v in valuelist[0].split(",") if v.strip()
]
else:
self.data = self.coerce(valuelist[0])
def _value(self) -> str:
if isinstance(self.data, (list, tuple)):
return ",".join(as_str(v) for v in self.data)
elif self.data:
return as_str(self.data)
else:
return ""
class JSONField(fields.TextAreaField):
def _value(self) -> str:
if self.raw_data:
return self.raw_data[0]
elif self.data:
return as_str(json.dumps(self.data, ensure_ascii=False))
else:
return "{}"
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
value = valuelist[0]
# allow saving blank field as None
if not value:
self.data = None
return
try:
self.data = json.loads(valuelist[0])
except ValueError:
raise ValueError(self.gettext("Invalid JSON"))
class QuerySelectField(fields.SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(
self,
object_list: list = None,
label: str = None,
validators: list = None,
get_label: Union[Callable, str] = None,
allow_blank: bool = False,
blank_text: str = "",
**kwargs: Any,
) -> None:
super().__init__(label=label, validators=validators, **kwargs)
self._object_list = object_list or []
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, str):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._data: Optional[tuple]
self._formdata: Optional[Union[str, List[str]]]
@property
def data(self) -> Optional[tuple]:
if self._formdata is not None:
for pk, obj in self._object_list:
if pk == self._formdata:
self.data = obj
break
return self._data
@data.setter
def data(self, data: tuple) -> None:
self._data = data
self._formdata = None
def iter_choices(self) -> Generator[Tuple[str, str, bool], None, None]:
if self.allow_blank:
yield ("__None", self.blank_text, self.data is None)
identity = inspect(self.data).identity[0] if self.data else "__None"
for pk, obj in self._object_list:
yield (pk, self.get_label(obj), pk == str(identity))
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
if self.allow_blank and valuelist[0] == "__None":
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form: Form) -> None:
data = self.data
if data is not None:
for _, obj in self._object_list:
if data == obj:
break
else: # pragma: no cover
raise ValidationError(self.gettext("Not a valid choice"))
elif self._formdata or not self.allow_blank:
raise ValidationError(self.gettext("Not a valid choice"))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(
self,
object_list: list = None,
label: str = None,
validators: list = None,
default: Any = None,
**kwargs: Any,
) -> None:
default = default or []
super().__init__(label=label, validators=validators, default=default, **kwargs)
self._object_list = object_list or []
if kwargs.get("allow_blank", False):
import warnings
warnings.warn(
"allow_blank=True does not do anything for QuerySelectMultipleField."
)
self._invalid_formdata = False
self._formdata: Optional[List[str]] = None
self._data: Optional[tuple] = None
@property
def data(self) -> Optional[tuple]:
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._object_list:
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self.data = data or self._data # type: ignore
return self._data
@data.setter
def data(self, data: tuple) -> None:
self._data = data
self._formdata = None
def iter_choices(self) -> Generator[Tuple[str, Any, bool], None, None]:
if self.data is not None:
primary_keys = [str(inspect(m).identity[0]) for m in self.data]
for pk, obj in self._object_list:
yield (pk, self.get_label(obj), pk in primary_keys)
def process_formdata(self, valuelist: List[str]) -> None:
self._formdata = list(set(valuelist))
def pre_validate(self, form: Form) -> None:
if self._invalid_formdata:
raise ValidationError(self.gettext("Not a valid choice"))
elif self.data:
pk_list = [x[0] for x in self._object_list]
for v in self.data:
identity = inspect(v).identity
if identity and str(identity[0]) not in pk_list: # pragma: no cover
raise ValidationError(self.gettext("Not a valid choice"))
|
the-stack_0_6315 | _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_icdar2021.py'
rpn_weight = 0.7
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
sampling=False,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
sampling=True,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
]),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(2000, 900), (2000, 600)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 724),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'Icdar2021Dataset'
# data_root = '/home/weibaole/disk1/gpu/Workspace/Datas/ICDAR2021/'
data_root = '/home/wbl/workspace/data/ICDAR2021/'
classes = ('embedded', 'isolated',)
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'TrM_isolated.json',
img_prefix=data_root + 'TrM/',
classes=classes,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'VaM_isolated.json',
img_prefix=data_root + 'VaM/',
classes=classes,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'VaM_isolated.json',
img_prefix=data_root + 'VaM/',
classes=classes,
pipeline=test_pipeline))
# model training and testing settings
train_cfg = dict(
rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
rpn_proposal=dict(max_num=300, nms_thr=0.8),
rcnn=dict(
assigner=dict(pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(type='RandomSampler', num=256)))
test_cfg = dict(rpn=dict(max_num=300, nms_thr=0.8), rcnn=dict(score_thr=1e-3))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(step=[16, 22])
total_epochs = 24
|
the-stack_0_6317 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# Library: pip3 install opencv-python
import cv2
# Load the cascade
# /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/cv2/data/haarcascade_frontalface_alt.xml
face_cascade = cv2.CascadeClassifier('face_detector.xml')
# Read the input image
img = cv2.imread('img_test.jpg')
# Detect faces in the image
faces = face_cascade.detectMultiScale(img, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 250, 205), 2)
# Export the result
cv2.imwrite('img_test.png', img)
print('Found {0} face(s)!'.format(len(faces)), '\nSuccessfully saved')
|
the-stack_0_6318 | #!/usr/bin/env python3
from marshmallow import Schema, fields, RAISE
from marshmallow import ValidationError
from marshmallow.validate import Range
class BytesField(fields.Field):
def _validate(self, value):
if not isinstance(value, bytes):
raise ValidationError('Invalid input type.')
if value is None or value == b'':
raise ValidationError('Invalid value')
class CacheSchema(Schema):
content = BytesField(required=True)
status_code = fields.Integer(required=True, validate=Range(min=100, max=599))
headers = fields.Dict(required=True)
class Meta:
unknown = RAISE
|
the-stack_0_6319 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
import json
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.tests \
import utils as tables_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.check_constraint.tests import utils as chk_constraint_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.exclusion_constraint.tests import utils as exclusion_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.foreign_key.tests import utils as fk_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.index_constraint.tests import utils as index_constraint_utils
from . import utils as constraints_utils
class ConstraintDeleteMultipleTestCase(BaseTestGenerator):
"""This class will delete constraints under table node."""
url = '/browser/constraints/nodes/'
# Generates scenarios from cast_test_data.json file
scenarios = utils.generate_scenarios("constraints_get_nodes",
constraints_utils.test_cases)
def setUp(self):
# Load test data
self.data = self.test_data
# Create db connection
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a table.")
# Create schema
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a table.")
# Create table
self.table_name = "table_constraint_delete_%s" % \
(str(uuid.uuid4())[1:8])
self.table_id = tables_utils.create_table(self.server,
self.db_name,
self.schema_name,
self.table_name)
# Create Check Constraints
self.check_constraint_name = "test_constraint_delete_%s" % \
(str(uuid.uuid4())[1:8])
self.check_constraint_id = \
chk_constraint_utils.create_check_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.check_constraint_name)
self.check_constraint_name_1 = "test_constraint_delete1_%s" % (
str(uuid.uuid4())[1:8])
self.check_constraint_id_1 = \
chk_constraint_utils.create_check_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.check_constraint_name_1)
# Create Exclusion Constraint
self.exclustion_constraint_name = "test_exclusion_get_%s" % (
str(uuid.uuid4())[1:8])
self.exclustion_constraint_id = \
exclusion_utils.create_exclusion_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.exclustion_constraint_name
)
# Create Foreign Key
self.foreign_table_name = "foreign_table_foreignkey_get_%s" % \
(str(uuid.uuid4())[1:8])
self.foreign_table_id = tables_utils.create_table(
self.server, self.db_name, self.schema_name,
self.foreign_table_name)
self.foreign_key_name = "test_foreignkey_get_%s" % \
(str(uuid.uuid4())[1:8])
self.foreign_key_id = fk_utils.create_foreignkey(
self.server, self.db_name, self.schema_name, self.table_name,
self.foreign_table_name)
# Create Primary Key
self.primary_key_name = "test_primary_key_get_%s" % \
(str(uuid.uuid4())[1:8])
self.primary_key_id = \
index_constraint_utils.create_index_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.primary_key_name, "PRIMARY KEY")
# Create Unique Key constraint
self.unique_constraint_name = "test_unique_constraint_get_%s" % (
str(uuid.uuid4())[1:8])
self.unique_constraint_id = \
index_constraint_utils.create_index_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.unique_constraint_name, "UNIQUE")
def runTest(self):
"""This function will delete constraints under table node."""
if self.is_positive_test:
response = constraints_utils.api_get(self)
# Assert response
utils.assert_status_code(self, response)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
|
the-stack_0_6322 | import extract_sift, extract_global, retrieval, config
import os, shutil, argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Evaluate dataset')
parser.add_argument(
'--sift_mode', # mode = 0 -> SIFT detector; 1 -> Hessian affine detector
type=int,
required=False,
default=1
)
parser.add_argument(
'--num_threads',
type=int,
required=False,
default=8
)
args = parser.parse_args()
return args
def main(args):
videosearch_dir = '../videosearch'
db_dir = os.path.join(config.DATASET_DIR, 'model_frames')
query_dir = os.path.join(config.DATASET_DIR, 'queries')
extract_sift.extract(videosearch_dir, db_dir, args.sift_mode, args.num_threads)
extract_sift.extract(videosearch_dir, query_dir, args.sift_mode, args.num_threads)
extract_global.generateFrameLists(config.DATASET_DIR)
if __name__ == '__main__':
main(parse_arguments())
|
the-stack_0_6323 | #Built in Python
import os
import sys
import glob
#Standard Packages
from astropy.io import ascii
from astropy import table
from astropy.time import Time
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
matplotlib.style.use('seaborn-colorblind')
from scipy.interpolate import interp1d
#Installed for this project
import extinction
#Mine
import visualization
#get_ipython().magic('matplotlib inline')
import connect_to_sndavis
import define_filters
import supernova
FIG_DIR = '../figures'
def build_sn_list():
db, cursor = connect_to_sndavis.get_cursor()
# 6 = II, 17=IIP-like, 18=IIL-like
query_str = '{} {} {} {} {} {} {} {} {} {}'.format(
'SELECT DISTINCT idsupernovae.`id`, sntype,name, slope, slopetype',
'FROM idsupernovae',
'JOIN supernovanames',
'ON idsupernovae.`id` = supernovanames.`targetid`',
'JOIN snslope',
'ON idsupernovae.`id` = snslope.`targetid` ',
'WHERE (sntype = 6 ',
'OR sntype = 17',
'OR sntype = 18)',
"AND slopetype = 's50';")
query = cursor.execute(query_str)
results = cursor.fetchall()
id = []
name = []
slope = []
for idict in results:
id.append(idict['id'])
name.append(idict['name'])
slope.append(idict['slope'])
tbdata = table.Table([id, name, slope], names = ['id', 'name', 's50'])
return tbdata
def find_closest_slope(tbdata):
slope_diff = np.abs((tbdata['s50'] - tbdata['s50'][tbdata['name']=='ASASSN-15oz']))
indx = np.argsort(slope_diff)
return indx
def compare_sn(snname1, snname2, rank, band='all', sn2_phase_offset = 0):
sn1 = supernova.LightCurve2(snname1)
sn1.get_photometry(band=band)
sn2 = supernova.LightCurve2(snname2)
sn2.get_photometry(band=band)
common_bands = set(sn1.apparent_mag.keys())&(sn2.apparent_mag.keys())
fig = plt.figure(figsize=[8.5, 11])
for plot_num, iband in enumerate(common_bands):
if plot_num == 5:
plt.savefig(os.path.join(FIG_DIR, 'similar_lc_{}_2.pdf'.format(sn2.name)))
plt.close()
fig = plt.figure(figsize=[8.5, 11])
ax = fig.add_subplot(3, 2, plot_num%6+1)
ax.plot(sn1.phase[iband], sn1.apparent_mag[iband]/sn1.apparent_mag[iband].min(), 'o', label = sn1.name, markersize=2)
ax.plot(sn2.phase[iband]+sn2_phase_offset, sn2.apparent_mag[iband]/sn2.apparent_mag[iband].min(), 's', label = sn2.name, markersize=2)
ax.set_title('{}, {} band, rank={}'.format(sn2.name, iband, rank), fontsize='small')
ax.set_ylim(ymax=np.max((sn1.apparent_mag[iband])[sn1.phase[iband] < 100])/sn1.apparent_mag[iband].min()+0.05)
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_xlim(0, 100)
ax.legend(loc='best', fontsize='xx-small')
fig.tight_layout()
plt.savefig(os.path.join(FIG_DIR, 'similar_lc_{}.pdf'.format(sn2.name)))
plt.close()
if __name__ == "__main__":
num_sn = 10
tbdata = build_sn_list()
best_match_indx = find_closest_slope(tbdata)
print(tbdata[best_match_indx[1:num_sn+1]])
for rank, sn_indx in enumerate(best_match_indx[1:num_sn+1]): #start at 1 b/c the SN is always the best match to itself
compare_sn('ASASSN-15oz', tbdata['name'][sn_indx], rank+1)
compare_sn('ASASSN-15oz', '2016zb', 1, sn2_phase_offset = 8)
|
the-stack_0_6324 | import time
import urllib
import urllib2
from bs4 import BeautifulSoup
from google import search
from slackclient import SlackClient
# from nltk.sentiment.vader import SentimentIntensityAnalyzer
import config
bot_name = 'ninja'
bot_id = SlackClient(config.bot_id['BOT_ID'])
at_bot = "<@" + str(bot_id) + ">:"
slack_client = SlackClient(config.slack_token['SLACK_TOKEN'])
def parse_data(slack_data):
inputdata = slack_data
if inputdata and len(inputdata) > 0:
for data in inputdata:
if data and 'text' in data != bot_id:
return data['text'], data['channel']
return None, None
def chat(input_command, channel):
input_command = input_command.replace("<@" + str(bot_id) + "> ", "")
so_url = "http://stackoverflow.com"
for url in search(urllib.quote_plus(input_command.encode('utf8'))):
if "http://stackoverflow.com/" in url:
so_url = url
slack_client.api_call("chat.postMessage", channel=channel, text=str(url), as_user=True)
break
else:
continue
try:
page = urllib2.urlopen(so_url)
soup = BeautifulSoup(page.read())
result = soup.find(attrs={'class': 'answer accepted-answer'})
if result is not None:
res = result.find(attrs={'class': 'post-text'})
for a in res:
if a.string is None:
a.string = ' '
slack_client.api_call("chat.postMessage", channel=channel, text="```" + res.get_text() + "```",
as_user=True)
# slack_client.api_call("chat.postMessage", channel=channel,
# text="```" + sentimentalAnalyser(res.get_text()) + "```",
# as_user=True)
# print(sentimentalAnalyser(res.get_text()))
except IndexError:
page = urllib2.urlopen(so_url)
soup = BeautifulSoup(page.read())
result = soup.find(attrs={'class': 'answer'})
if result is not None:
res = result.find(attrs={'class': 'post-text'})
for a in res:
if a.string is None:
a.string = ' '
slack_client.api_call("chat.postMessage", channel=channel, text="```" + res.get_text() + "```",
as_user=True)
# slack_client.api_call("chat.postMessage", channel=channel,
# text="```" + "Sentiment: " + sentimentalAnalyser(res.get_text()) + "```",
# as_user=True)
# print(sentimentalAnalyser(res.get_text()))
except:
print("Could not parse")
slack_client.api_call("chat.postMessage", channel=channel, text="Could not find a relevant link", as_user=True)
raise
# def sentimentalAnalyser(data):
# sresult = []
# stringData = data
# sid = SentimentIntensityAnalyzer()
# ss = sid.polarity_scores(stringData)
# '''for k in sorted(ss):
# print('{0}: {1}, '.format(k, ss[k]))
# print()'''
# for k in sorted(ss):
# sresult.append('{0}'.format(ss[k]))
# print(sresult[0])
# return sresult[0]
def ninjafy():
if slack_client.rtm_connect():
print("Connected")
while True:
input_command, channel = parse_data(slack_client.rtm_read())
if input_command and channel:
chat(input_command, channel)
time.sleep(1)
else:
print("Connection failed")
if __name__ == '__main__':
ninjafy()
|
the-stack_0_6325 | import tkinter as tk
from tkinter import messagebox
class FillAllFields(Exception):
pass
class StudentAlreadyRegistered(Exception):
pass
class EmptyField(Exception):
pass
class MatriculaRepeated(Exception):
pass
class Estudante:
def __init__(self, nroMatric, nome):
self.__nroMatric = nroMatric
self.__nome = nome
def getNroMatric(self):
return self.__nroMatric
def getNome(self):
return self.__nome
class LimiteInsereEstudantes(tk.Toplevel):
def __init__(self, controle):
tk.Toplevel.__init__(self)
self.geometry('250x100')
self.title("Estudante")
self.controle = controle
self.frameNro = tk.Frame(self)
self.frameNome = tk.Frame(self)
self.frameButton = tk.Frame(self)
self.frameNro.pack()
self.frameNome.pack()
self.frameButton.pack()
self.labelNro = tk.Label(self.frameNro, text="Nro Matrícula: ")
self.labelNome = tk.Label(self.frameNome, text="Nome: ")
self.labelNro.pack(side="left")
self.labelNome.pack(side="left")
self.inputNro = tk.Entry(self.frameNro, width=20)
self.inputNro.pack(side="left")
self.inputNome = tk.Entry(self.frameNome, width=20)
self.inputNome.pack(side="left")
self.buttonSubmit = tk.Button(self.frameButton, text="Enter")
self.buttonSubmit.pack(side="left")
self.buttonSubmit.bind("<Button>", controle.enterHandler)
self.buttonClear = tk.Button(self.frameButton, text="Clear")
self.buttonClear.pack(side="left")
self.buttonClear.bind("<Button>", controle.clearHandler)
self.buttonFecha = tk.Button(self.frameButton, text="Concluído")
self.buttonFecha.pack(side="left")
self.buttonFecha.bind("<Button>", controle.fechaHandler)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class LimiteMostraEstudantes():
def __init__(self, str):
messagebox.showinfo('Lista de alunos', str)
class LimiteConsultaEstudantes(tk.Toplevel):
def __init__(self, controle):
tk.Toplevel.__init__(self)
self.geometry('250x100')
self.title("Consultar estudante")
self.controle = controle
self.frameNro = tk.Frame(self)
self.frameButton = tk.Frame(self)
self.frameNro.pack()
self.frameButton.pack()
self.labelNro = tk.Label(self.frameNro, text='Nro Matrícula: ')
self.labelNro.pack(side='left')
self.inputNro = tk.Entry(self.frameNro, width=20)
self.inputNro.pack(side='left')
self.buttonConsulta = tk.Button(
self.frameButton, text='Consultar', font=('Negrito', 9))
self.buttonConsulta.pack(side='left')
self.buttonConsulta.bind("<Button>", controle.consultaHandler)
self.buttonConcluido = tk.Button(
self.frameButton, text='Concluído', font=('Negrito', 9))
self.buttonConcluido.pack(side='left')
self.buttonConcluido.bind("<Button>", controle.concluiHandler)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class CtrlEstudante():
def __init__(self):
self.listaEstudantes = [
Estudante('1001', 'Joao Santos'),
Estudante('1002', 'Marina Cintra'),
Estudante('1003', 'Felipe Reis'),
Estudante('1004', 'Ana Souza')
]
def getEstudante(self, nroMatric):
estRet = None
for est in self.listaEstudantes:
if est.getNroMatric() == nroMatric:
estRet = est
return estRet
def getListaNroMatric(self):
listaNro = []
for est in self.listaEstudantes:
listaNro.append(est.getNroMatric())
return listaNro
def insereEstudantes(self):
self.limiteIns = LimiteInsereEstudantes(self)
def mostraEstudantes(self):
if len(self.listaEstudantes) == 0:
str = "Não existem alunos cadastrados"
self.limiteLista = LimiteMostraEstudantes(str)
else:
str = "Nro Matric. -- Nome\n"
for est in self.listaEstudantes:
str += est.getNroMatric() + ' -- ' + est.getNome() + '\n'
self.limiteLista = LimiteMostraEstudantes(str)
def consultaEstudantes(self):
self.limiteCon = LimiteConsultaEstudantes(self)
def enterHandler(self, event):
try:
if len(self.limiteIns.inputNro.get()) == 0 or len(self.limiteIns.inputNome.get()) == 0:
raise FillAllFields()
for estud in self.listaEstudantes:
if estud.getNroMatric() == self.limiteIns.inputNro.get() and estud.getNome() == self.limiteIns.inputNome.get():
raise StudentAlreadyRegistered()
if estud.getNroMatric() == self.limiteIns.inputNro.get():
raise MatriculaRepeated()
except StudentAlreadyRegistered:
self.limiteIns.mostraJanela(
'Cuidado, atenção!', 'Estudante já cadastrado!')
except FillAllFields:
self.limiteIns.mostraJanela(
'Cuidado, atenção!', 'Por favor, preencha todos os campos!')
except MatriculaRepeated:
self.limiteIns.mostraJanela(
'Cuidado, atenção!', 'Número de matrícula já está existe!')
else:
nroMatric = self.limiteIns.inputNro.get()
nome = self.limiteIns.inputNome.get()
estudante = Estudante(nroMatric, nome)
self.listaEstudantes.append(estudante)
self.limiteIns.mostraJanela(
'Sucesso', 'Estudante cadastrado com sucesso')
self.clearHandler(event)
def clearHandler(self, event):
self.limiteIns.inputNro.delete(0, len(self.limiteIns.inputNro.get()))
self.limiteIns.inputNome.delete(0, len(self.limiteIns.inputNome.get()))
def fechaHandler(self, event):
self.limiteIns.destroy()
def consultaHandler(self, event):
try:
if len(self.limiteCon.inputNro.get()) == 0:
raise EmptyField()
except EmptyField:
str = 'Campo de matrícula vazio! Por favor, digite um número de matrícula!'
self.limiteCon.mostraJanela('Erro', str)
else:
nroMatric = self.limiteCon.inputNro.get()
est = self.getEstudante(nroMatric)
if est == None:
str = (f'Não existe aluno com a matrícula {nroMatric}')
self.limiteCon.mostraJanela('Aluno não encontrado', str)
self.limiteCon.inputNro.delete(
0, len(self.limiteCon.inputNro.get()))
else:
str = 'Informações do aluno consultado:\n'
str += 'Nro Matric. -- Nome\n'
str += est.getNroMatric() + ' -- ' + est.getNome()
self.limiteCon.mostraJanela('Aluno encontrado', str)
self.limiteCon.inputNro.delete(
0, len(self.limiteCon.inputNro.get()))
def concluiHandler(self, event):
self.limiteCon.destroy()
|
the-stack_0_6327 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
from turtle import Turtle # 引入turtle库的turtle模块
import turtle
p = Turtle()
p.speed(2) # 设置速度
p.pensize(3) # 设置线条粗细
p.color('black', 'yellow') # 笔的颜色及填充颜色
p.begin_fill() # 开始填充
for i in range(5): # 5条线
p.fd(200) # 向前200
p.right(144) # 向右144度 与向左216度的结果是一样的
p.end_fill() # 结束填充
turtle.done()
|
the-stack_0_6331 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2014 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The PyBuilder reactor module.
Operates a build process by instrumenting an ExecutionManager from the
execution module.
"""
import imp
import os.path
from pybuilder.core import (TASK_ATTRIBUTE, DEPENDS_ATTRIBUTE,
DESCRIPTION_ATTRIBUTE, AFTER_ATTRIBUTE,
BEFORE_ATTRIBUTE, INITIALIZER_ATTRIBUTE,
ACTION_ATTRIBUTE, ONLY_ONCE_ATTRIBUTE,
Project, NAME_ATTRIBUTE, ENVIRONMENTS_ATTRIBUTE)
from pybuilder.errors import PyBuilderException, ProjectValidationFailedException
from pybuilder.pluginloader import (BuiltinPluginLoader,
DispatchingPluginLoader,
ThirdPartyPluginLoader,
DownloadingPluginLoader)
from pybuilder.utils import as_list
from pybuilder.execution import Action, Initializer, Task
class BuildSummary(object):
def __init__(self, project, task_execution_summaries):
self.project = project
self.task_summaries = task_execution_summaries
class Reactor(object):
_current_instance = None
@staticmethod
def current_instance():
return Reactor._current_instance
def __init__(self, logger, execution_manager, plugin_loader=None):
self.logger = logger
self.execution_manager = execution_manager
if not plugin_loader:
builtin_plugin_loader = BuiltinPluginLoader(self.logger)
installed_thirdparty_plugin_loader = ThirdPartyPluginLoader(self.logger)
downloading_thirdparty_plugin_loader = DownloadingPluginLoader(self.logger)
self.plugin_loader = DispatchingPluginLoader(
self.logger, builtin_plugin_loader, installed_thirdparty_plugin_loader, downloading_thirdparty_plugin_loader)
else:
self.plugin_loader = plugin_loader
self._plugins = []
self.project = None
def require_plugin(self, plugin):
if plugin not in self._plugins:
try:
self._plugins.append(plugin)
self.import_plugin(plugin)
except: # NOQA
self._plugins.remove(plugin)
raise
def get_plugins(self):
return self._plugins
def get_tasks(self):
return self.execution_manager.tasks
def validate_project(self):
validation_messages = self.project.validate()
if len(validation_messages) > 0:
raise ProjectValidationFailedException(validation_messages)
def prepare_build(self,
property_overrides=None,
project_directory=".",
project_descriptor="build.py"):
if not property_overrides:
property_overrides = {}
Reactor._current_instance = self
project_directory, project_descriptor = self.verify_project_directory(
project_directory, project_descriptor)
self.logger.debug("Loading project module from %s", project_descriptor)
self.project = Project(basedir=project_directory)
self.project_module = self.load_project_module(project_descriptor)
self.apply_project_attributes()
self.override_properties(property_overrides)
self.logger.debug("Have loaded plugins %s", ", ".join(self._plugins))
self.collect_tasks_and_actions_and_initializers(self.project_module)
self.execution_manager.resolve_dependencies()
def build(self, tasks=None, environments=None):
if not tasks:
tasks = []
if not environments:
environments = []
Reactor._current_instance = self
if environments:
self.logger.info(
"Activated environments: %s", ", ".join(environments))
self.execution_manager.execute_initializers(
environments, logger=self.logger, project=self.project)
self.log_project_properties()
self.validate_project()
tasks = as_list(tasks)
if not len(tasks):
if self.project.default_task:
tasks += as_list(self.project.default_task)
else:
raise PyBuilderException("No default task given.")
execution_plan = self.execution_manager.build_execution_plan(tasks)
self.logger.debug("Execution plan is %s", ", ".join(
[task.name for task in execution_plan]))
self.logger.info(
"Building %s version %s", self.project.name, self.project.version)
self.logger.info("Executing build in %s", self.project.basedir)
if len(tasks) == 1:
self.logger.info("Going to execute task %s", tasks[0])
else:
list_of_tasks = ", ".join(tasks)
self.logger.info("Going to execute tasks: %s", list_of_tasks)
task_execution_summaries = self.execution_manager.execute_execution_plan(
execution_plan,
logger=self.logger,
project=self.project,
reactor=self)
return BuildSummary(self.project, task_execution_summaries)
def execute_task(self, task_name):
execution_plan = self.execution_manager.build_execution_plan(task_name)
self.execution_manager.execute_execution_plan(execution_plan,
logger=self.logger,
project=self.project,
reactor=self)
def override_properties(self, property_overrides):
for property_override in property_overrides:
self.project.set_property(
property_override, property_overrides[property_override])
def log_project_properties(self):
formatted = ""
for key in sorted(self.project.properties):
formatted += "\n%40s : %s" % (key, self.project.get_property(key))
self.logger.debug("Project properties: %s", formatted)
def import_plugin(self, plugin):
self.logger.debug("Loading plugin '%s'", plugin)
plugin_module = self.plugin_loader.load_plugin(self.project, plugin)
self.collect_tasks_and_actions_and_initializers(plugin_module)
def collect_tasks_and_actions_and_initializers(self, project_module):
for name in dir(project_module):
candidate = getattr(project_module, name)
if hasattr(candidate, NAME_ATTRIBUTE):
name = getattr(candidate, NAME_ATTRIBUTE)
elif hasattr(candidate, "__name__"):
name = candidate.__name__
description = getattr(candidate, DESCRIPTION_ATTRIBUTE) if hasattr(
candidate, DESCRIPTION_ATTRIBUTE) else ""
if hasattr(candidate, TASK_ATTRIBUTE) and getattr(candidate, TASK_ATTRIBUTE):
dependencies = getattr(candidate, DEPENDS_ATTRIBUTE) if hasattr(
candidate, DEPENDS_ATTRIBUTE) else None
self.logger.debug("Found task %s", name)
self.execution_manager.register_task(
Task(name, candidate, dependencies, description))
elif hasattr(candidate, ACTION_ATTRIBUTE) and getattr(candidate, ACTION_ATTRIBUTE):
before = getattr(candidate, BEFORE_ATTRIBUTE) if hasattr(
candidate, BEFORE_ATTRIBUTE) else None
after = getattr(candidate, AFTER_ATTRIBUTE) if hasattr(
candidate, AFTER_ATTRIBUTE) else None
only_once = False
if hasattr(candidate, ONLY_ONCE_ATTRIBUTE):
only_once = getattr(candidate, ONLY_ONCE_ATTRIBUTE)
self.logger.debug("Found action %s", name)
self.execution_manager.register_action(
Action(name, candidate, before, after, description, only_once))
elif hasattr(candidate, INITIALIZER_ATTRIBUTE) and getattr(candidate, INITIALIZER_ATTRIBUTE):
environments = []
if hasattr(candidate, ENVIRONMENTS_ATTRIBUTE):
environments = getattr(candidate, ENVIRONMENTS_ATTRIBUTE)
self.execution_manager.register_initializer(
Initializer(name, candidate, environments, description))
def apply_project_attributes(self):
self.propagate_property("name")
self.propagate_property("version")
self.propagate_property("default_task")
self.propagate_property("summary")
self.propagate_property("home_page")
self.propagate_property("description")
self.propagate_property("authors")
self.propagate_property("license")
self.propagate_property("url")
def propagate_property(self, property):
if hasattr(self.project_module, property):
value = getattr(self.project_module, property)
setattr(self.project, property, value)
@staticmethod
def load_project_module(project_descriptor):
try:
return imp.load_source("build", project_descriptor)
except ImportError as e:
raise PyBuilderException(
"Error importing project descriptor %s: %s" % (project_descriptor, e))
@staticmethod
def verify_project_directory(project_directory, project_descriptor):
project_directory = os.path.abspath(project_directory)
if not os.path.exists(project_directory):
raise PyBuilderException(
"Project directory does not exist: %s", project_directory)
if not os.path.isdir(project_directory):
raise PyBuilderException(
"Project directory is not a directory: %s", project_directory)
project_descriptor_full_path = os.path.join(
project_directory, project_descriptor)
if not os.path.exists(project_descriptor_full_path):
raise PyBuilderException(
"Project directory does not contain descriptor file: %s",
project_descriptor_full_path)
if not os.path.isfile(project_descriptor_full_path):
raise PyBuilderException(
"Project descriptor is not a file: %s", project_descriptor_full_path)
return project_directory, project_descriptor_full_path
|
the-stack_0_6332 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.devtools.testing_v1.types import test_execution
from .base import TestExecutionServiceTransport, DEFAULT_CLIENT_INFO
class TestExecutionServiceGrpcTransport(TestExecutionServiceTransport):
"""gRPC backend transport for TestExecutionService.
A service for requesting test executions and querying their
status.
This service is part of Firebase Test Lab. To learn about how to
use the product, and how to integrate it with your system,
visit https://firebase.google.com/docs/test-lab.
Each test execution will wait for available capacity. It will
then be invoked as described. The test may be invoked multiple
times if an infrastructure failure is detected. Results and
other files generated by the test will be stored in an external
storage system.
The TestExecutionService models this behavior using two resource
types:
- TestMatrix: a group of one or more TestExecutions, built by
taking a product of values over a pre-defined set of axes. In
the case of Android Tests, for example, device model and OS
version are two axes of the matrix.
- TestExecution: a single execution of one or more test targets
on a single device. These are created automatically when a
TestMatrix is created.
This service returns any error codes from the canonical error
space (i.e. google.rpc.Code). The errors which may be returned
are specified on each method. In addition, any method may return
UNAVAILABLE or INTERNAL.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'testing.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'testing.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def create_test_matrix(self) -> Callable[
[test_execution.CreateTestMatrixRequest],
test_execution.TestMatrix]:
r"""Return a callable for the create test matrix method over gRPC.
Creates and runs a matrix of tests according to the given
specifications. Unsupported environments will be returned in the
state UNSUPPORTED. A test matrix is limited to use at most 2000
devices in parallel.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to write to
project
- INVALID_ARGUMENT - if the request is malformed or if the
matrix tries to use too many simultaneous devices.
Returns:
Callable[[~.CreateTestMatrixRequest],
~.TestMatrix]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_test_matrix' not in self._stubs:
self._stubs['create_test_matrix'] = self.grpc_channel.unary_unary(
'/google.devtools.testing.v1.TestExecutionService/CreateTestMatrix',
request_serializer=test_execution.CreateTestMatrixRequest.serialize,
response_deserializer=test_execution.TestMatrix.deserialize,
)
return self._stubs['create_test_matrix']
@property
def get_test_matrix(self) -> Callable[
[test_execution.GetTestMatrixRequest],
test_execution.TestMatrix]:
r"""Return a callable for the get test matrix method over gRPC.
Checks the status of a test matrix.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to read
project
- INVALID_ARGUMENT - if the request is malformed
- NOT_FOUND - if the Test Matrix does not exist
Returns:
Callable[[~.GetTestMatrixRequest],
~.TestMatrix]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_test_matrix' not in self._stubs:
self._stubs['get_test_matrix'] = self.grpc_channel.unary_unary(
'/google.devtools.testing.v1.TestExecutionService/GetTestMatrix',
request_serializer=test_execution.GetTestMatrixRequest.serialize,
response_deserializer=test_execution.TestMatrix.deserialize,
)
return self._stubs['get_test_matrix']
@property
def cancel_test_matrix(self) -> Callable[
[test_execution.CancelTestMatrixRequest],
test_execution.CancelTestMatrixResponse]:
r"""Return a callable for the cancel test matrix method over gRPC.
Cancels unfinished test executions in a test matrix. This call
returns immediately and cancellation proceeds asynchronously. If
the matrix is already final, this operation will have no effect.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to read
project
- INVALID_ARGUMENT - if the request is malformed
- NOT_FOUND - if the Test Matrix does not exist
Returns:
Callable[[~.CancelTestMatrixRequest],
~.CancelTestMatrixResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'cancel_test_matrix' not in self._stubs:
self._stubs['cancel_test_matrix'] = self.grpc_channel.unary_unary(
'/google.devtools.testing.v1.TestExecutionService/CancelTestMatrix',
request_serializer=test_execution.CancelTestMatrixRequest.serialize,
response_deserializer=test_execution.CancelTestMatrixResponse.deserialize,
)
return self._stubs['cancel_test_matrix']
def close(self):
self.grpc_channel.close()
__all__ = (
'TestExecutionServiceGrpcTransport',
)
|
the-stack_0_6335 | """
sphinx.domains.c
~~~~~~~~~~~~~~~~
The C language domain.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from typing import (Any, Callable, Dict, Generator, Iterator, List, Optional, Tuple, TypeVar,
Union, cast)
from docutils import nodes
from docutils.nodes import Element, Node, TextElement, system_message
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx60Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.roles import SphinxRole, XRefRole
from sphinx.transforms import SphinxTransform
from sphinx.transforms.post_transforms import ReferencesResolver
from sphinx.util import logging
from sphinx.util.cfamily import (ASTAttribute, ASTBaseBase, ASTBaseParenExprList, BaseParser,
DefinitionError, NoOldIdError, StringifyTransform,
UnsupportedMultiCharacterCharLiteral, anon_identifier_re,
binary_literal_re, char_literal_re, float_literal_re,
float_literal_suffix_re, hex_literal_re, identifier_re,
integer_literal_re, integers_literal_suffix_re,
octal_literal_re, verify_description_mode)
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import make_refnode
from sphinx.util.typing import OptionSpec
logger = logging.getLogger(__name__)
T = TypeVar('T')
DeclarationType = Union[
"ASTStruct", "ASTUnion", "ASTEnum", "ASTEnumerator",
"ASTType", "ASTTypeWithInit", "ASTMacro",
]
# https://en.cppreference.com/w/c/keyword
_keywords = [
'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', 'double',
'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'inline', 'int', 'long',
'register', 'restrict', 'return', 'short', 'signed', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while',
'_Alignas', '_Alignof', '_Atomic', '_Bool', '_Complex',
'_Decimal32', '_Decimal64', '_Decimal128',
'_Generic', '_Imaginary', '_Noreturn', '_Static_assert', '_Thread_local',
]
# These are only keyword'y when the corresponding headers are included.
# They are used as default value for c_extra_keywords.
_macroKeywords = [
'alignas', 'alignof', 'bool', 'complex', 'imaginary', 'noreturn', 'static_assert',
'thread_local',
]
# these are ordered by preceedence
_expression_bin_ops = [
['||', 'or'],
['&&', 'and'],
['|', 'bitor'],
['^', 'xor'],
['&', 'bitand'],
['==', '!=', 'not_eq'],
['<=', '>=', '<', '>'],
['<<', '>>'],
['+', '-'],
['*', '/', '%'],
['.*', '->*']
]
_expression_unary_ops = ["++", "--", "*", "&", "+", "-", "!", "not", "~", "compl"]
_expression_assignment_ops = ["=", "*=", "/=", "%=", "+=", "-=",
">>=", "<<=", "&=", "and_eq", "^=", "xor_eq", "|=", "or_eq"]
_max_id = 1
_id_prefix = [None, 'c.', 'Cv2.']
# Ids are used in lookup keys which are used across pickled files,
# so when _max_id changes, make sure to update the ENV_VERSION.
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
# bool, complex, and imaginary are macro "keywords", so they are handled seperately
_simple_type_specifiers_re = re.compile(r"""(?x)
\b(
void|_Bool
|signed|unsigned
|short|long
|char
|int
|__uint128|__int128
|__int(8|16|32|64|128) # extension
|float|double
|_Decimal(32|64|128)
|_Complex|_Imaginary
|__float80|_Float64x|__float128|_Float128|__ibm128 # extension
|__fp16 # extension
|_Sat|_Fract|fract|_Accum|accum # extension
)\b
""")
class _DuplicateSymbolError(Exception):
def __init__(self, symbol: "Symbol", declaration: "ASTDeclaration") -> None:
assert symbol
assert declaration
self.symbol = symbol
self.declaration = declaration
def __str__(self) -> str:
return "Internal C duplicate symbol error:\n%s" % self.symbol.dump(0)
class ASTBase(ASTBaseBase):
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
# Names
################################################################################
class ASTIdentifier(ASTBaseBase):
def __init__(self, identifier: str) -> None:
assert identifier is not None
assert len(identifier) != 0
self.identifier = identifier
def __eq__(self, other: Any) -> bool:
return type(other) is ASTIdentifier and self.identifier == other.identifier
def is_anon(self) -> bool:
return self.identifier[0] == '@'
# and this is where we finally make a difference between __str__ and the display string
def __str__(self) -> str:
return self.identifier
def get_display_string(self) -> str:
return "[anonymous]" if self.is_anon() else self.identifier
def describe_signature(self, signode: TextElement, mode: str, env: "BuildEnvironment",
prefix: str, symbol: "Symbol") -> None:
# note: slightly different signature of describe_signature due to the prefix
verify_description_mode(mode)
if self.is_anon():
node = addnodes.desc_sig_name(text="[anonymous]")
else:
node = addnodes.desc_sig_name(self.identifier, self.identifier)
if mode == 'markType':
targetText = prefix + self.identifier
pnode = addnodes.pending_xref('', refdomain='c',
reftype='identifier',
reftarget=targetText, modname=None,
classname=None)
pnode['c:parent_key'] = symbol.get_lookup_key()
pnode += node
signode += pnode
elif mode == 'lastIsName':
nameNode = addnodes.desc_name()
nameNode += node
signode += nameNode
elif mode == 'noneIsName':
signode += node
else:
raise Exception('Unknown description mode: %s' % mode)
class ASTNestedName(ASTBase):
def __init__(self, names: List[ASTIdentifier], rooted: bool) -> None:
assert len(names) > 0
self.names = names
self.rooted = rooted
@property
def name(self) -> "ASTNestedName":
return self
def get_id(self, version: int) -> str:
return '.'.join(str(n) for n in self.names)
def _stringify(self, transform: StringifyTransform) -> str:
res = '.'.join(transform(n) for n in self.names)
if self.rooted:
return '.' + res
else:
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
# just print the name part, with template args, not template params
if mode == 'noneIsName':
if self.rooted:
assert False, "Can this happen?" # TODO
signode += nodes.Text('.')
for i in range(len(self.names)):
if i != 0:
assert False, "Can this happen?" # TODO
signode += nodes.Text('.')
n = self.names[i]
n.describe_signature(signode, mode, env, '', symbol)
elif mode == 'param':
assert not self.rooted, str(self)
assert len(self.names) == 1
self.names[0].describe_signature(signode, 'noneIsName', env, '', symbol)
elif mode == 'markType' or mode == 'lastIsName' or mode == 'markName':
# Each element should be a pending xref targeting the complete
# prefix.
prefix = ''
first = True
names = self.names[:-1] if mode == 'lastIsName' else self.names
# If lastIsName, then wrap all of the prefix in a desc_addname,
# else append directly to signode.
# TODO: also for C?
# NOTE: Breathe previously relied on the prefix being in the desc_addname node,
# so it can remove it in inner declarations.
dest = signode
if mode == 'lastIsName':
dest = addnodes.desc_addname()
if self.rooted:
prefix += '.'
if mode == 'lastIsName' and len(names) == 0:
signode += addnodes.desc_sig_punctuation('.', '.')
else:
dest += addnodes.desc_sig_punctuation('.', '.')
for i in range(len(names)):
ident = names[i]
if not first:
dest += addnodes.desc_sig_punctuation('.', '.')
prefix += '.'
first = False
txt_ident = str(ident)
if txt_ident != '':
ident.describe_signature(dest, 'markType', env, prefix, symbol)
prefix += txt_ident
if mode == 'lastIsName':
if len(self.names) > 1:
dest += addnodes.desc_sig_punctuation('.', '.')
signode += dest
self.names[-1].describe_signature(signode, mode, env, '', symbol)
else:
raise Exception('Unknown description mode: %s' % mode)
################################################################################
# Expressions
################################################################################
class ASTExpression(ASTBase):
pass
# Primary expressions
################################################################################
class ASTLiteral(ASTExpression):
pass
class ASTBooleanLiteral(ASTLiteral):
def __init__(self, value: bool) -> None:
self.value = value
def _stringify(self, transform: StringifyTransform) -> str:
if self.value:
return 'true'
else:
return 'false'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_keyword(txt, txt)
class ASTNumberLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_literal_number(txt, txt)
class ASTCharLiteral(ASTLiteral):
def __init__(self, prefix: str, data: str) -> None:
self.prefix = prefix # may be None when no prefix
self.data = data
decoded = data.encode().decode('unicode-escape')
if len(decoded) == 1:
self.value = ord(decoded)
else:
raise UnsupportedMultiCharacterCharLiteral(decoded)
def _stringify(self, transform: StringifyTransform) -> str:
if self.prefix is None:
return "'" + self.data + "'"
else:
return self.prefix + "'" + self.data + "'"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_literal_char(txt, txt)
class ASTStringLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_literal_string(txt, txt)
class ASTIdExpression(ASTExpression):
def __init__(self, name: ASTNestedName):
# note: this class is basically to cast a nested name as an expression
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def get_id(self, version: int) -> str:
return self.name.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.name.describe_signature(signode, mode, env, symbol)
class ASTParenExpr(ASTExpression):
def __init__(self, expr):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '(' + transform(self.expr) + ')'
def get_id(self, version: int) -> str:
return self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_punctuation('(', '(')
self.expr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
# Postfix expressions
################################################################################
class ASTPostfixOp(ASTBase):
pass
class ASTPostfixCallExpr(ASTPostfixOp):
def __init__(self, lst: Union["ASTParenExprList", "ASTBracedInitList"]) -> None:
self.lst = lst
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.lst)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.lst.describe_signature(signode, mode, env, symbol)
class ASTPostfixArray(ASTPostfixOp):
def __init__(self, expr: ASTExpression) -> None:
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '[' + transform(self.expr) + ']'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_punctuation('[', '[')
self.expr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(']', ']')
class ASTPostfixInc(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '++'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_operator('++', '++')
class ASTPostfixDec(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '--'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_operator('--', '--')
class ASTPostfixMemberOfPointer(ASTPostfixOp):
def __init__(self, name):
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return '->' + transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_operator('->', '->')
self.name.describe_signature(signode, 'noneIsName', env, symbol)
class ASTPostfixExpr(ASTExpression):
def __init__(self, prefix: ASTExpression, postFixes: List[ASTPostfixOp]):
self.prefix = prefix
self.postFixes = postFixes
def _stringify(self, transform: StringifyTransform) -> str:
res = [transform(self.prefix)]
for p in self.postFixes:
res.append(transform(p))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.prefix.describe_signature(signode, mode, env, symbol)
for p in self.postFixes:
p.describe_signature(signode, mode, env, symbol)
# Unary expressions
################################################################################
class ASTUnaryOpExpr(ASTExpression):
def __init__(self, op: str, expr: ASTExpression):
self.op = op
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
if self.op[0] in 'cn':
return self.op + " " + transform(self.expr)
else:
return self.op + transform(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.op[0] in 'cn':
signode += addnodes.desc_sig_keyword(self.op, self.op)
signode += addnodes.desc_sig_space()
else:
signode += addnodes.desc_sig_operator(self.op, self.op)
self.expr.describe_signature(signode, mode, env, symbol)
class ASTSizeofType(ASTExpression):
def __init__(self, typ):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof(" + transform(self.typ) + ")"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_keyword('sizeof', 'sizeof')
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
class ASTSizeofExpr(ASTExpression):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof " + transform(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_keyword('sizeof', 'sizeof')
signode += addnodes.desc_sig_space()
self.expr.describe_signature(signode, mode, env, symbol)
class ASTAlignofExpr(ASTExpression):
def __init__(self, typ: "ASTType"):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "alignof(" + transform(self.typ) + ")"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_keyword('alignof', 'alignof')
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
# Other expressions
################################################################################
class ASTCastExpr(ASTExpression):
def __init__(self, typ: "ASTType", expr: ASTExpression):
self.typ = typ
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.typ))
res.append(')')
res.append(transform(self.expr))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
self.expr.describe_signature(signode, mode, env, symbol)
class ASTBinOpExpr(ASTBase):
def __init__(self, exprs: List[ASTExpression], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode += addnodes.desc_sig_space()
op = self.ops[i - 1]
if ord(op[0]) >= ord('a') and ord(op[0]) <= ord('z'):
signode += addnodes.desc_sig_keyword(op, op)
else:
signode += addnodes.desc_sig_operator(op, op)
signode += addnodes.desc_sig_space()
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTAssignmentExpr(ASTExpression):
def __init__(self, exprs: List[ASTExpression], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode += addnodes.desc_sig_space()
op = self.ops[i - 1]
if ord(op[0]) >= ord('a') and ord(op[0]) <= ord('z'):
signode += addnodes.desc_sig_keyword(op, op)
else:
signode += addnodes.desc_sig_operator(op, op)
signode += addnodes.desc_sig_space()
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTFallbackExpr(ASTExpression):
def __init__(self, expr: str):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return self.expr
def get_id(self, version: int) -> str:
return str(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.literal(self.expr, self.expr)
################################################################################
# Types
################################################################################
class ASTTrailingTypeSpec(ASTBase):
pass
class ASTTrailingTypeSpecFundamental(ASTTrailingTypeSpec):
def __init__(self, names: List[str]) -> None:
assert len(names) != 0
self.names = names
def _stringify(self, transform: StringifyTransform) -> str:
return ' '.join(self.names)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
first = True
for n in self.names:
if not first:
signode += addnodes.desc_sig_space()
else:
first = False
signode += addnodes.desc_sig_keyword_type(n, n)
class ASTTrailingTypeSpecName(ASTTrailingTypeSpec):
def __init__(self, prefix: str, nestedName: ASTNestedName) -> None:
self.prefix = prefix
self.nestedName = nestedName
@property
def name(self) -> ASTNestedName:
return self.nestedName
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.prefix:
res.append(self.prefix)
res.append(' ')
res.append(transform(self.nestedName))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.prefix:
signode += addnodes.desc_sig_keyword(self.prefix, self.prefix)
signode += addnodes.desc_sig_space()
self.nestedName.describe_signature(signode, mode, env, symbol=symbol)
class ASTFunctionParameter(ASTBase):
def __init__(self, arg: "ASTTypeWithInit", ellipsis: bool = False) -> None:
self.arg = arg
self.ellipsis = ellipsis
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=False)
def _stringify(self, transform: StringifyTransform) -> str:
if self.ellipsis:
return '...'
else:
return transform(self.arg)
def describe_signature(self, signode: Any, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.ellipsis:
signode += addnodes.desc_sig_punctuation('...', '...')
else:
self.arg.describe_signature(signode, mode, env, symbol=symbol)
class ASTParameters(ASTBase):
def __init__(self, args: List[ASTFunctionParameter], attrs: List[ASTAttribute]) -> None:
self.args = args
self.attrs = attrs
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.args
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append('(')
first = True
for a in self.args:
if not first:
res.append(', ')
first = False
res.append(str(a))
res.append(')')
for attr in self.attrs:
res.append(' ')
res.append(transform(attr))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
# only use the desc_parameterlist for the outer list, not for inner lists
if mode == 'lastIsName':
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
else:
signode += addnodes.desc_sig_punctuation('(', '(')
first = True
for arg in self.args:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
first = False
arg.describe_signature(signode, 'markType', env, symbol=symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
for attr in self.attrs:
signode += addnodes.desc_sig_space()
attr.describe_signature(signode)
class ASTDeclSpecsSimple(ASTBaseBase):
def __init__(self, storage: str, threadLocal: str, inline: bool,
restrict: bool, volatile: bool, const: bool, attrs: List[Any]) -> None:
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
self.restrict = restrict
self.volatile = volatile
self.const = const
self.attrs = attrs
def mergeWith(self, other: "ASTDeclSpecsSimple") -> "ASTDeclSpecsSimple":
if not other:
return self
return ASTDeclSpecsSimple(self.storage or other.storage,
self.threadLocal or other.threadLocal,
self.inline or other.inline,
self.volatile or other.volatile,
self.const or other.const,
self.restrict or other.restrict,
self.attrs + other.attrs)
def _stringify(self, transform: StringifyTransform) -> str:
res: List[str] = []
res.extend(transform(attr) for attr in self.attrs)
if self.storage:
res.append(self.storage)
if self.threadLocal:
res.append(self.threadLocal)
if self.inline:
res.append('inline')
if self.restrict:
res.append('restrict')
if self.volatile:
res.append('volatile')
if self.const:
res.append('const')
return ' '.join(res)
def describe_signature(self, modifiers: List[Node]) -> None:
def _add(modifiers: List[Node], text: str) -> None:
if len(modifiers) > 0:
modifiers.append(addnodes.desc_sig_space())
modifiers.append(addnodes.desc_sig_keyword(text, text))
for attr in self.attrs:
if len(modifiers) > 0:
modifiers.append(addnodes.desc_sig_space())
modifiers.append(attr.describe_signature(modifiers))
if self.storage:
_add(modifiers, self.storage)
if self.threadLocal:
_add(modifiers, self.threadLocal)
if self.inline:
_add(modifiers, 'inline')
if self.restrict:
_add(modifiers, 'restrict')
if self.volatile:
_add(modifiers, 'volatile')
if self.const:
_add(modifiers, 'const')
class ASTDeclSpecs(ASTBase):
def __init__(self, outer: str,
leftSpecs: ASTDeclSpecsSimple,
rightSpecs: ASTDeclSpecsSimple,
trailing: ASTTrailingTypeSpec) -> None:
# leftSpecs and rightSpecs are used for output
# allSpecs are used for id generation TODO: remove?
self.outer = outer
self.leftSpecs = leftSpecs
self.rightSpecs = rightSpecs
self.allSpecs = self.leftSpecs.mergeWith(self.rightSpecs)
self.trailingTypeSpec = trailing
def _stringify(self, transform: StringifyTransform) -> str:
res: List[str] = []
l = transform(self.leftSpecs)
if len(l) > 0:
res.append(l)
if self.trailingTypeSpec:
if len(res) > 0:
res.append(" ")
res.append(transform(self.trailingTypeSpec))
r = str(self.rightSpecs)
if len(r) > 0:
if len(res) > 0:
res.append(" ")
res.append(r)
return "".join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
modifiers: List[Node] = []
self.leftSpecs.describe_signature(modifiers)
for m in modifiers:
signode += m
if self.trailingTypeSpec:
if len(modifiers) > 0:
signode += addnodes.desc_sig_space()
self.trailingTypeSpec.describe_signature(signode, mode, env,
symbol=symbol)
modifiers = []
self.rightSpecs.describe_signature(modifiers)
if len(modifiers) > 0:
signode += addnodes.desc_sig_space()
for m in modifiers:
signode += m
# Declarator
################################################################################
class ASTArray(ASTBase):
def __init__(self, static: bool, const: bool, volatile: bool, restrict: bool,
vla: bool, size: ASTExpression):
self.static = static
self.const = const
self.volatile = volatile
self.restrict = restrict
self.vla = vla
self.size = size
if vla:
assert size is None
if size is not None:
assert not vla
def _stringify(self, transform: StringifyTransform) -> str:
el = []
if self.static:
el.append('static')
if self.restrict:
el.append('restrict')
if self.volatile:
el.append('volatile')
if self.const:
el.append('const')
if self.vla:
return '[' + ' '.join(el) + '*]'
elif self.size:
el.append(transform(self.size))
return '[' + ' '.join(el) + ']'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('[', '[')
addSpace = False
def _add(signode: TextElement, text: str) -> bool:
if addSpace:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_keyword(text, text)
return True
if self.static:
addSpace = _add(signode, 'static')
if self.restrict:
addSpace = _add(signode, 'restrict')
if self.volatile:
addSpace = _add(signode, 'volatile')
if self.const:
addSpace = _add(signode, 'const')
if self.vla:
signode += addnodes.desc_sig_punctuation('*', '*')
elif self.size:
if addSpace:
signode += addnodes.desc_sig_space()
self.size.describe_signature(signode, 'markType', env, symbol)
signode += addnodes.desc_sig_punctuation(']', ']')
class ASTDeclarator(ASTBase):
@property
def name(self) -> ASTNestedName:
raise NotImplementedError(repr(self))
@property
def function_params(self) -> List[ASTFunctionParameter]:
raise NotImplementedError(repr(self))
def require_space_after_declSpecs(self) -> bool:
raise NotImplementedError(repr(self))
class ASTDeclaratorNameParam(ASTDeclarator):
def __init__(self, declId: ASTNestedName,
arrayOps: List[ASTArray], param: ASTParameters) -> None:
self.declId = declId
self.arrayOps = arrayOps
self.param = param
@property
def name(self) -> ASTNestedName:
return self.declId
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.param.function_params
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
for op in self.arrayOps:
res.append(transform(op))
if self.param:
res.append(transform(self.param))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
for op in self.arrayOps:
op.describe_signature(signode, mode, env, symbol)
if self.param:
self.param.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorNameBitField(ASTDeclarator):
def __init__(self, declId: ASTNestedName, size: ASTExpression):
self.declId = declId
self.size = size
@property
def name(self) -> ASTNestedName:
return self.declId
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
res.append(" : ")
res.append(transform(self.size))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation(':', ':')
signode += addnodes.desc_sig_space()
self.size.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorPtr(ASTDeclarator):
def __init__(self, next: ASTDeclarator, restrict: bool, volatile: bool, const: bool,
attrs: Any) -> None:
assert next
self.next = next
self.restrict = restrict
self.volatile = volatile
self.const = const
self.attrs = attrs
@property
def name(self) -> ASTNestedName:
return self.next.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.next.function_params
def require_space_after_declSpecs(self) -> bool:
return self.const or self.volatile or self.restrict or \
len(self.attrs) > 0 or \
self.next.require_space_after_declSpecs()
def _stringify(self, transform: StringifyTransform) -> str:
res = ['*']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and (self.restrict or self.volatile or self.const):
res.append(' ')
if self.restrict:
res.append('restrict')
if self.volatile:
if self.restrict:
res.append(' ')
res.append('volatile')
if self.const:
if self.restrict or self.volatile:
res.append(' ')
res.append('const')
if self.const or self.volatile or self.restrict or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
res.append(' ')
res.append(transform(self.next))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('*', '*')
for a in self.attrs:
a.describe_signature(signode)
if len(self.attrs) > 0 and (self.restrict or self.volatile or self.const):
signode += addnodes.desc_sig_space()
def _add_anno(signode: TextElement, text: str) -> None:
signode += addnodes.desc_sig_keyword(text, text)
if self.restrict:
_add_anno(signode, 'restrict')
if self.volatile:
if self.restrict:
signode += addnodes.desc_sig_space()
_add_anno(signode, 'volatile')
if self.const:
if self.restrict or self.volatile:
signode += addnodes.desc_sig_space()
_add_anno(signode, 'const')
if self.const or self.volatile or self.restrict or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
signode += addnodes.desc_sig_space()
self.next.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorParen(ASTDeclarator):
def __init__(self, inner: ASTDeclarator, next: ASTDeclarator) -> None:
assert inner
assert next
self.inner = inner
self.next = next
# TODO: we assume the name and params are in inner
@property
def name(self) -> ASTNestedName:
return self.inner.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.inner.function_params
def require_space_after_declSpecs(self) -> bool:
return True
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.inner))
res.append(')')
res.append(transform(self.next))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('(', '(')
self.inner.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
self.next.describe_signature(signode, "noneIsName", env, symbol)
# Initializer
################################################################################
class ASTParenExprList(ASTBaseParenExprList):
def __init__(self, exprs: List[ASTExpression]) -> None:
self.exprs = exprs
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
return '(%s)' % ', '.join(exprs)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('(', '(')
first = True
for e in self.exprs:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
else:
first = False
e.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
class ASTBracedInitList(ASTBase):
def __init__(self, exprs: List[ASTExpression], trailingComma: bool) -> None:
self.exprs = exprs
self.trailingComma = trailingComma
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
trailingComma = ',' if self.trailingComma else ''
return '{%s%s}' % (', '.join(exprs), trailingComma)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('{', '{')
first = True
for e in self.exprs:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
else:
first = False
e.describe_signature(signode, mode, env, symbol)
if self.trailingComma:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_punctuation('}', '}')
class ASTInitializer(ASTBase):
def __init__(self, value: Union[ASTBracedInitList, ASTExpression],
hasAssign: bool = True) -> None:
self.value = value
self.hasAssign = hasAssign
def _stringify(self, transform: StringifyTransform) -> str:
val = transform(self.value)
if self.hasAssign:
return ' = ' + val
else:
return val
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.hasAssign:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation('=', '=')
signode += addnodes.desc_sig_space()
self.value.describe_signature(signode, 'markType', env, symbol)
class ASTType(ASTBase):
def __init__(self, declSpecs: ASTDeclSpecs, decl: ASTDeclarator) -> None:
assert declSpecs
assert decl
self.declSpecs = declSpecs
self.decl = decl
@property
def name(self) -> ASTNestedName:
return self.decl.name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.decl.function_params
def _stringify(self, transform: StringifyTransform) -> str:
res = []
declSpecs = transform(self.declSpecs)
res.append(declSpecs)
if self.decl.require_space_after_declSpecs() and len(declSpecs) > 0:
res.append(' ')
res.append(transform(self.decl))
return ''.join(res)
def get_type_declaration_prefix(self) -> str:
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if (self.decl.require_space_after_declSpecs() and
len(str(self.declSpecs)) > 0):
signode += addnodes.desc_sig_space()
# for parameters that don't really declare new names we get 'markType',
# this should not be propagated, but be 'noneIsName'.
if mode == 'markType':
mode = 'noneIsName'
self.decl.describe_signature(signode, mode, env, symbol)
class ASTTypeWithInit(ASTBase):
def __init__(self, type: ASTType, init: ASTInitializer) -> None:
self.type = type
self.init = init
@property
def name(self) -> ASTNestedName:
return self.type.name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return self.type.get_id(version, objectType, symbol)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.type))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, mode, env, symbol)
class ASTMacroParameter(ASTBase):
def __init__(self, arg: ASTNestedName, ellipsis: bool = False,
variadic: bool = False) -> None:
self.arg = arg
self.ellipsis = ellipsis
self.variadic = variadic
def _stringify(self, transform: StringifyTransform) -> str:
if self.ellipsis:
return '...'
elif self.variadic:
return transform(self.arg) + '...'
else:
return transform(self.arg)
def describe_signature(self, signode: Any, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.ellipsis:
signode += addnodes.desc_sig_punctuation('...', '...')
elif self.variadic:
name = str(self)
signode += addnodes.desc_sig_name(name, name)
else:
self.arg.describe_signature(signode, mode, env, symbol=symbol)
class ASTMacro(ASTBase):
def __init__(self, ident: ASTNestedName, args: List[ASTMacroParameter]) -> None:
self.ident = ident
self.args = args
@property
def name(self) -> ASTNestedName:
return self.ident
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.ident))
if self.args is not None:
res.append('(')
first = True
for arg in self.args:
if not first:
res.append(', ')
first = False
res.append(transform(arg))
res.append(')')
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.ident.describe_signature(signode, mode, env, symbol)
if self.args is None:
return
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
class ASTStruct(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTUnion(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnum(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnumerator(ASTBase):
def __init__(self, name: ASTNestedName, init: ASTInitializer) -> None:
self.name = name
self.init = init
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.name))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, 'markType', env, symbol)
class ASTDeclaration(ASTBaseBase):
def __init__(self, objectType: str, directiveType: str,
declaration: Union[DeclarationType, ASTFunctionParameter],
semicolon: bool = False) -> None:
self.objectType = objectType
self.directiveType = directiveType
self.declaration = declaration
self.semicolon = semicolon
self.symbol: Symbol = None
# set by CObject._add_enumerator_to_parent
self.enumeratorScopedSymbol: Symbol = None
def clone(self) -> "ASTDeclaration":
return ASTDeclaration(self.objectType, self.directiveType,
self.declaration.clone(), self.semicolon)
@property
def name(self) -> ASTNestedName:
decl = cast(DeclarationType, self.declaration)
return decl.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
if self.objectType != 'function':
return None
decl = cast(ASTType, self.declaration)
return decl.function_params
def get_id(self, version: int, prefixed: bool = True) -> str:
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
return self.enumeratorScopedSymbol.declaration.get_id(version, prefixed)
id_ = self.declaration.get_id(version, self.objectType, self.symbol)
if prefixed:
return _id_prefix[version] + id_
else:
return id_
def get_newest_id(self) -> str:
return self.get_id(_max_id, True)
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.declaration)
if self.semicolon:
res += ';'
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", options: Dict) -> None:
verify_description_mode(mode)
assert self.symbol
# The caller of the domain added a desc_signature node.
# Always enable multiline:
signode['is_multiline'] = True
# Put each line in a desc_signature_line node.
mainDeclNode = addnodes.desc_signature_line()
mainDeclNode.sphinx_line_type = 'declarator'
mainDeclNode['add_permalink'] = not self.symbol.isRedeclaration
signode += mainDeclNode
if self.objectType == 'member':
pass
elif self.objectType == 'function':
pass
elif self.objectType == 'macro':
pass
elif self.objectType == 'struct':
mainDeclNode += addnodes.desc_sig_keyword('struct', 'struct')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'union':
mainDeclNode += addnodes.desc_sig_keyword('union', 'union')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'enum':
mainDeclNode += addnodes.desc_sig_keyword('enum', 'enum')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'enumerator':
mainDeclNode += addnodes.desc_sig_keyword('enumerator', 'enumerator')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'type':
decl = cast(ASTType, self.declaration)
prefix = decl.get_type_declaration_prefix()
mainDeclNode += addnodes.desc_sig_keyword(prefix, prefix)
mainDeclNode += addnodes.desc_sig_space()
else:
assert False
self.declaration.describe_signature(mainDeclNode, mode, env, self.symbol)
if self.semicolon:
mainDeclNode += addnodes.desc_sig_punctuation(';', ';')
class SymbolLookupResult:
def __init__(self, symbols: Iterator["Symbol"], parentSymbol: "Symbol",
ident: ASTIdentifier) -> None:
self.symbols = symbols
self.parentSymbol = parentSymbol
self.ident = ident
class LookupKey:
def __init__(self, data: List[Tuple[ASTIdentifier, str]]) -> None:
self.data = data
def __str__(self) -> str:
return '[{}]'.format(', '.join("({}, {})".format(
ident, id_) for ident, id_ in self.data))
class Symbol:
debug_indent = 0
debug_indent_string = " "
debug_lookup = False
debug_show_tree = False
def __copy__(self):
assert False # shouldn't happen
def __deepcopy__(self, memo):
if self.parent:
assert False # shouldn't happen
else:
# the domain base class makes a copy of the initial data, which is fine
return Symbol(None, None, None, None, None)
@staticmethod
def debug_print(*args: Any) -> None:
print(Symbol.debug_indent_string * Symbol.debug_indent, end="")
print(*args)
def _assert_invariants(self) -> None:
if not self.parent:
# parent == None means global scope, so declaration means a parent
assert not self.declaration
assert not self.docname
else:
if self.declaration:
assert self.docname
def __setattr__(self, key: str, value: Any) -> None:
if key == "children":
assert False
else:
return super().__setattr__(key, value)
def __init__(self, parent: "Symbol", ident: ASTIdentifier,
declaration: ASTDeclaration, docname: str, line: int) -> None:
self.parent = parent
# declarations in a single directive are linked together
self.siblingAbove: Symbol = None
self.siblingBelow: Symbol = None
self.ident = ident
self.declaration = declaration
self.docname = docname
self.line = line
self.isRedeclaration = False
self._assert_invariants()
# Remember to modify Symbol.remove if modifications to the parent change.
self._children: List[Symbol] = []
self._anonChildren: List[Symbol] = []
# note: _children includes _anonChildren
if self.parent:
self.parent._children.append(self)
if self.declaration:
self.declaration.symbol = self
# Do symbol addition after self._children has been initialised.
self._add_function_params()
def _fill_empty(self, declaration: ASTDeclaration, docname: str, line: int) -> None:
self._assert_invariants()
assert self.declaration is None
assert self.docname is None
assert self.line is None
assert declaration is not None
assert docname is not None
assert line is not None
self.declaration = declaration
self.declaration.symbol = self
self.docname = docname
self.line = line
self._assert_invariants()
# and symbol addition should be done as well
self._add_function_params()
def _add_function_params(self) -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_function_params:")
# Note: we may be called from _fill_empty, so the symbols we want
# to add may actually already be present (as empty symbols).
# add symbols for function parameters, if any
if self.declaration is not None and self.declaration.function_params is not None:
for p in self.declaration.function_params:
if p.arg is None:
continue
nn = p.arg.name
if nn is None:
continue
# (comparing to the template params: we have checked that we are a declaration)
decl = ASTDeclaration('functionParam', None, p)
assert not nn.rooted
assert len(nn.names) == 1
self._add_symbols(nn, decl, self.docname, self.line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def remove(self) -> None:
if self.parent is None:
return
assert self in self.parent._children
self.parent._children.remove(self)
self.parent = None
def clear_doc(self, docname: str) -> None:
for sChild in self._children:
sChild.clear_doc(docname)
if sChild.declaration and sChild.docname == docname:
sChild.declaration = None
sChild.docname = None
sChild.line = None
if sChild.siblingAbove is not None:
sChild.siblingAbove.siblingBelow = sChild.siblingBelow
if sChild.siblingBelow is not None:
sChild.siblingBelow.siblingAbove = sChild.siblingAbove
sChild.siblingAbove = None
sChild.siblingBelow = None
def get_all_symbols(self) -> Iterator["Symbol"]:
yield self
for sChild in self._children:
yield from sChild.get_all_symbols()
@property
def children(self) -> Iterator["Symbol"]:
yield from self._children
@property
def children_recurse_anon(self) -> Iterator["Symbol"]:
for c in self._children:
yield c
if not c.ident.is_anon():
continue
yield from c.children_recurse_anon
def get_lookup_key(self) -> "LookupKey":
# The pickle files for the environment and for each document are distinct.
# The environment has all the symbols, but the documents has xrefs that
# must know their scope. A lookup key is essentially a specification of
# how to find a specific symbol.
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
key = []
for s in symbols:
if s.declaration is not None:
# TODO: do we need the ID?
key.append((s.ident, s.declaration.get_newest_id()))
else:
key.append((s.ident, None))
return LookupKey(key)
def get_full_nested_name(self) -> ASTNestedName:
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
names = []
for s in symbols:
names.append(s.ident)
return ASTNestedName(names, rooted=False)
def _find_first_named_symbol(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool) -> "Symbol":
# TODO: further simplification from C++ to C
if Symbol.debug_lookup:
Symbol.debug_print("_find_first_named_symbol ->")
res = self._find_named_symbols(ident, matchSelf, recurseInAnon,
searchInSiblings=False)
try:
return next(res)
except StopIteration:
return None
def _find_named_symbols(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool,
searchInSiblings: bool) -> Iterator["Symbol"]:
# TODO: further simplification from C++ to C
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_find_named_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("ident: ", ident)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
def candidates() -> Generator["Symbol", None, None]:
s = self
if Symbol.debug_lookup:
Symbol.debug_print("searching in self:")
print(s.to_string(Symbol.debug_indent + 1), end="")
while True:
if matchSelf:
yield s
if recurseInAnon:
yield from s.children_recurse_anon
else:
yield from s._children
if s.siblingAbove is None:
break
s = s.siblingAbove
if Symbol.debug_lookup:
Symbol.debug_print("searching in sibling:")
print(s.to_string(Symbol.debug_indent + 1), end="")
for s in candidates():
if Symbol.debug_lookup:
Symbol.debug_print("candidate:")
print(s.to_string(Symbol.debug_indent + 1), end="")
if s.ident == ident:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("matches")
Symbol.debug_indent -= 3
yield s
if Symbol.debug_lookup:
Symbol.debug_indent += 2
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
def _symbol_lookup(self, nestedName: ASTNestedName,
onMissingQualifiedSymbol: Callable[["Symbol", ASTIdentifier], "Symbol"], # NOQA
ancestorLookupType: str, matchSelf: bool,
recurseInAnon: bool, searchInSiblings: bool) -> SymbolLookupResult:
# TODO: further simplification from C++ to C
# ancestorLookupType: if not None, specifies the target type of the lookup
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_symbol_lookup:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("nestedName: ", nestedName)
Symbol.debug_print("ancestorLookupType:", ancestorLookupType)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
names = nestedName.names
# find the right starting point for lookup
parentSymbol = self
if nestedName.rooted:
while parentSymbol.parent:
parentSymbol = parentSymbol.parent
if ancestorLookupType is not None:
# walk up until we find the first identifier
firstName = names[0]
while parentSymbol.parent:
if parentSymbol.find_identifier(firstName,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=searchInSiblings):
break
parentSymbol = parentSymbol.parent
if Symbol.debug_lookup:
Symbol.debug_print("starting point:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# and now the actual lookup
for ident in names[:-1]:
symbol = parentSymbol._find_first_named_symbol(
ident, matchSelf=matchSelf, recurseInAnon=recurseInAnon)
if symbol is None:
symbol = onMissingQualifiedSymbol(parentSymbol, ident)
if symbol is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
# We have now matched part of a nested name, and need to match more
# so even if we should matchSelf before, we definitely shouldn't
# even more. (see also issue #2666)
matchSelf = False
parentSymbol = symbol
if Symbol.debug_lookup:
Symbol.debug_print("handle last name from:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# handle the last name
ident = names[-1]
symbols = parentSymbol._find_named_symbols(
ident, matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=searchInSiblings)
if Symbol.debug_lookup:
symbols = list(symbols) # type: ignore
Symbol.debug_indent -= 2
return SymbolLookupResult(symbols, parentSymbol, ident)
def _add_symbols(self, nestedName: ASTNestedName,
declaration: ASTDeclaration, docname: str, line: int) -> "Symbol":
# TODO: further simplification from C++ to C
# Used for adding a whole path of symbols, where the last may or may not
# be an actual declaration.
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("nn: ", nestedName)
Symbol.debug_print("decl: ", declaration)
Symbol.debug_print("location: {}:{}".format(docname, line))
def onMissingQualifiedSymbol(parentSymbol: "Symbol", ident: ASTIdentifier) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols, onMissingQualifiedSymbol:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", ident)
Symbol.debug_indent -= 2
return Symbol(parent=parentSymbol, ident=ident,
declaration=None, docname=None, line=None)
lookupResult = self._symbol_lookup(nestedName,
onMissingQualifiedSymbol,
ancestorLookupType=None,
matchSelf=False,
recurseInAnon=False,
searchInSiblings=False)
assert lookupResult is not None # we create symbols all the way, so that can't happen
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, no symbol:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", lookupResult.ident)
Symbol.debug_print("declaration: ", declaration)
Symbol.debug_print("location: {}:{}".format(docname, line))
Symbol.debug_indent -= 1
symbol = Symbol(parent=lookupResult.parentSymbol,
ident=lookupResult.ident,
declaration=declaration,
docname=docname, line=line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return symbol
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("number symbols:", len(symbols))
Symbol.debug_indent -= 1
if not declaration:
if Symbol.debug_lookup:
Symbol.debug_print("no declaration")
Symbol.debug_indent -= 2
# good, just a scope creation
# TODO: what if we have more than one symbol?
return symbols[0]
noDecl = []
withDecl = []
dupDecl = []
for s in symbols:
if s.declaration is None:
noDecl.append(s)
elif s.isRedeclaration:
dupDecl.append(s)
else:
withDecl.append(s)
if Symbol.debug_lookup:
Symbol.debug_print("#noDecl: ", len(noDecl))
Symbol.debug_print("#withDecl:", len(withDecl))
Symbol.debug_print("#dupDecl: ", len(dupDecl))
# With partial builds we may start with a large symbol tree stripped of declarations.
# Essentially any combination of noDecl, withDecl, and dupDecls seems possible.
# TODO: make partial builds fully work. What should happen when the primary symbol gets
# deleted, and other duplicates exist? The full document should probably be rebuild.
# First check if one of those with a declaration matches.
# If it's a function, we need to compare IDs,
# otherwise there should be only one symbol with a declaration.
def makeCandSymbol() -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_print("begin: creating candidate symbol")
symbol = Symbol(parent=lookupResult.parentSymbol,
ident=lookupResult.ident,
declaration=declaration,
docname=docname, line=line)
if Symbol.debug_lookup:
Symbol.debug_print("end: creating candidate symbol")
return symbol
if len(withDecl) == 0:
candSymbol = None
else:
candSymbol = makeCandSymbol()
def handleDuplicateDeclaration(symbol: "Symbol", candSymbol: "Symbol") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("redeclaration")
Symbol.debug_indent -= 1
Symbol.debug_indent -= 2
# Redeclaration of the same symbol.
# Let the new one be there, but raise an error to the client
# so it can use the real symbol as subscope.
# This will probably result in a duplicate id warning.
candSymbol.isRedeclaration = True
raise _DuplicateSymbolError(symbol, declaration)
if declaration.objectType != "function":
assert len(withDecl) <= 1
handleDuplicateDeclaration(withDecl[0], candSymbol)
# (not reachable)
# a function, so compare IDs
candId = declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("candId:", candId)
for symbol in withDecl:
oldId = symbol.declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("oldId: ", oldId)
if candId == oldId:
handleDuplicateDeclaration(symbol, candSymbol)
# (not reachable)
# no candidate symbol found with matching ID
# if there is an empty symbol, fill that one
if len(noDecl) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("no match, no empty, candSybmol is not None?:", candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
return candSymbol
else:
return makeCandSymbol()
else:
if Symbol.debug_lookup:
Symbol.debug_print(
"no match, but fill an empty declaration, candSybmol is not None?:",
candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
candSymbol.remove()
# assert len(noDecl) == 1
# TODO: enable assertion when we at some point find out how to do cleanup
# for now, just take the first one, it should work fine ... right?
symbol = noDecl[0]
# If someone first opened the scope, and then later
# declares it, e.g,
# .. namespace:: Test
# .. namespace:: nullptr
# .. class:: Test
symbol._fill_empty(declaration, docname, line)
return symbol
def merge_with(self, other: "Symbol", docnames: List[str],
env: "BuildEnvironment") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("merge_with:")
assert other is not None
for otherChild in other._children:
ourChild = self._find_first_named_symbol(
ident=otherChild.ident, matchSelf=False,
recurseInAnon=False)
if ourChild is None:
# TODO: hmm, should we prune by docnames?
self._children.append(otherChild)
otherChild.parent = self
otherChild._assert_invariants()
continue
if otherChild.declaration and otherChild.docname in docnames:
if not ourChild.declaration:
ourChild._fill_empty(otherChild.declaration,
otherChild.docname, otherChild.line)
elif ourChild.docname != otherChild.docname:
name = str(ourChild.declaration)
msg = __("Duplicate C declaration, also defined at %s:%s.\n"
"Declaration is '.. c:%s:: %s'.")
msg = msg % (ourChild.docname, ourChild.line,
ourChild.declaration.directiveType, name)
logger.warning(msg, location=(otherChild.docname, otherChild.line))
else:
# Both have declarations, and in the same docname.
# This can apparently happen, it should be safe to
# just ignore it, right?
pass
ourChild.merge_with(otherChild, docnames, env)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def add_name(self, nestedName: ASTNestedName) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_name:")
res = self._add_symbols(nestedName, declaration=None, docname=None, line=None)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def add_declaration(self, declaration: ASTDeclaration,
docname: str, line: int) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_declaration:")
assert declaration is not None
assert docname is not None
assert line is not None
nestedName = declaration.name
res = self._add_symbols(nestedName, declaration, docname, line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def find_identifier(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool, searchInSiblings: bool
) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_identifier:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", ident)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings:", searchInSiblings)
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
current = self
while current is not None:
if Symbol.debug_lookup:
Symbol.debug_indent += 2
Symbol.debug_print("trying:")
print(current.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
if matchSelf and current.ident == ident:
return current
children = current.children_recurse_anon if recurseInAnon else current._children
for s in children:
if s.ident == ident:
return s
if not searchInSiblings:
break
current = current.siblingAbove
return None
def direct_lookup(self, key: "LookupKey") -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("direct_lookup:")
Symbol.debug_indent += 1
s = self
for name, id_ in key.data:
res = None
for cand in s._children:
if cand.ident == name:
res = cand
break
s = res
if Symbol.debug_lookup:
Symbol.debug_print("name: ", name)
Symbol.debug_print("id: ", id_)
if s is not None:
print(s.to_string(Symbol.debug_indent + 1), end="")
else:
Symbol.debug_print("not found")
if s is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return s
def find_declaration(self, nestedName: ASTNestedName, typ: str,
matchSelf: bool, recurseInAnon: bool) -> "Symbol":
# templateShorthand: missing template parameter lists for templates is ok
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_declaration:")
def onMissingQualifiedSymbol(parentSymbol: "Symbol",
ident: ASTIdentifier) -> "Symbol":
return None
lookupResult = self._symbol_lookup(nestedName,
onMissingQualifiedSymbol,
ancestorLookupType=typ,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=False)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
if lookupResult is None:
return None
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
return None
return symbols[0]
def to_string(self, indent: int) -> str:
res = [Symbol.debug_indent_string * indent]
if not self.parent:
res.append('::')
else:
if self.ident:
res.append(str(self.ident))
else:
res.append(str(self.declaration))
if self.declaration:
res.append(": ")
if self.isRedeclaration:
res.append('!!duplicate!! ')
res.append(str(self.declaration))
if self.docname:
res.append('\t(')
res.append(self.docname)
res.append(')')
res.append('\n')
return ''.join(res)
def dump(self, indent: int) -> str:
res = [self.to_string(indent)]
for c in self._children:
res.append(c.dump(indent + 1))
return ''.join(res)
class DefinitionParser(BaseParser):
@property
def language(self) -> str:
return 'C'
@property
def id_attributes(self):
return self.config.c_id_attributes
@property
def paren_attributes(self):
return self.config.c_paren_attributes
def _parse_string(self) -> str:
if self.current_char != '"':
return None
startPos = self.pos
self.pos += 1
escape = False
while True:
if self.eof:
self.fail("Unexpected end during inside string.")
elif self.current_char == '"' and not escape:
self.pos += 1
break
elif self.current_char == '\\':
escape = True
else:
escape = False
self.pos += 1
return self.definition[startPos:self.pos]
def _parse_literal(self) -> ASTLiteral:
# -> integer-literal
# | character-literal
# | floating-literal
# | string-literal
# | boolean-literal -> "false" | "true"
self.skip_ws()
if self.skip_word('true'):
return ASTBooleanLiteral(True)
if self.skip_word('false'):
return ASTBooleanLiteral(False)
pos = self.pos
if self.match(float_literal_re):
self.match(float_literal_suffix_re)
return ASTNumberLiteral(self.definition[pos:self.pos])
for regex in [binary_literal_re, hex_literal_re,
integer_literal_re, octal_literal_re]:
if self.match(regex):
self.match(integers_literal_suffix_re)
return ASTNumberLiteral(self.definition[pos:self.pos])
string = self._parse_string()
if string is not None:
return ASTStringLiteral(string)
# character-literal
if self.match(char_literal_re):
prefix = self.last_match.group(1) # may be None when no prefix
data = self.last_match.group(2)
try:
return ASTCharLiteral(prefix, data)
except UnicodeDecodeError as e:
self.fail("Can not handle character literal. Internal error was: %s" % e)
except UnsupportedMultiCharacterCharLiteral:
self.fail("Can not handle character literal"
" resulting in multiple decoded characters.")
return None
def _parse_paren_expression(self) -> ASTExpression:
# "(" expression ")"
if self.current_char != '(':
return None
self.pos += 1
res = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' in end of parenthesized expression.")
return ASTParenExpr(res)
def _parse_primary_expression(self) -> ASTExpression:
# literal
# "(" expression ")"
# id-expression -> we parse this with _parse_nested_name
self.skip_ws()
res: ASTExpression = self._parse_literal()
if res is not None:
return res
res = self._parse_paren_expression()
if res is not None:
return res
nn = self._parse_nested_name()
if nn is not None:
return ASTIdExpression(nn)
return None
def _parse_initializer_list(self, name: str, open: str, close: str
) -> Tuple[List[ASTExpression], bool]:
# Parse open and close with the actual initializer-list in between
# -> initializer-clause '...'[opt]
# | initializer-list ',' initializer-clause '...'[opt]
# TODO: designators
self.skip_ws()
if not self.skip_string_and_ws(open):
return None, None
if self.skip_string(close):
return [], False
exprs = []
trailingComma = False
while True:
self.skip_ws()
expr = self._parse_expression()
self.skip_ws()
exprs.append(expr)
self.skip_ws()
if self.skip_string(close):
break
if not self.skip_string_and_ws(','):
self.fail("Error in %s, expected ',' or '%s'." % (name, close))
if self.current_char == close and close == '}':
self.pos += 1
trailingComma = True
break
return exprs, trailingComma
def _parse_paren_expression_list(self) -> ASTParenExprList:
# -> '(' expression-list ')'
# though, we relax it to also allow empty parens
# as it's needed in some cases
#
# expression-list
# -> initializer-list
exprs, trailingComma = self._parse_initializer_list("parenthesized expression-list",
'(', ')')
if exprs is None:
return None
return ASTParenExprList(exprs)
def _parse_braced_init_list(self) -> ASTBracedInitList:
# -> '{' initializer-list ','[opt] '}'
# | '{' '}'
exprs, trailingComma = self._parse_initializer_list("braced-init-list", '{', '}')
if exprs is None:
return None
return ASTBracedInitList(exprs, trailingComma)
def _parse_postfix_expression(self) -> ASTPostfixExpr:
# -> primary
# | postfix "[" expression "]"
# | postfix "[" braced-init-list [opt] "]"
# | postfix "(" expression-list [opt] ")"
# | postfix "." id-expression // taken care of in primary by nested name
# | postfix "->" id-expression
# | postfix "++"
# | postfix "--"
prefix = self._parse_primary_expression()
# and now parse postfixes
postFixes: List[ASTPostfixOp] = []
while True:
self.skip_ws()
if self.skip_string_and_ws('['):
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of postfix expression.")
postFixes.append(ASTPostfixArray(expr))
continue
if self.skip_string('->'):
if self.skip_string('*'):
# don't steal the arrow
self.pos -= 3
else:
name = self._parse_nested_name()
postFixes.append(ASTPostfixMemberOfPointer(name))
continue
if self.skip_string('++'):
postFixes.append(ASTPostfixInc())
continue
if self.skip_string('--'):
postFixes.append(ASTPostfixDec())
continue
lst = self._parse_paren_expression_list()
if lst is not None:
postFixes.append(ASTPostfixCallExpr(lst))
continue
break
return ASTPostfixExpr(prefix, postFixes)
def _parse_unary_expression(self) -> ASTExpression:
# -> postfix
# | "++" cast
# | "--" cast
# | unary-operator cast -> (* | & | + | - | ! | ~) cast
# The rest:
# | "sizeof" unary
# | "sizeof" "(" type-id ")"
# | "alignof" "(" type-id ")"
self.skip_ws()
for op in _expression_unary_ops:
# TODO: hmm, should we be able to backtrack here?
if op[0] in 'cn':
res = self.skip_word(op)
else:
res = self.skip_string(op)
if res:
expr = self._parse_cast_expression()
return ASTUnaryOpExpr(op, expr)
if self.skip_word_and_ws('sizeof'):
if self.skip_string_and_ws('('):
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'sizeof'.")
return ASTSizeofType(typ)
expr = self._parse_unary_expression()
return ASTSizeofExpr(expr)
if self.skip_word_and_ws('alignof'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'alignof'.")
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'alignof'.")
return ASTAlignofExpr(typ)
return self._parse_postfix_expression()
def _parse_cast_expression(self) -> ASTExpression:
# -> unary | "(" type-id ")" cast
pos = self.pos
self.skip_ws()
if self.skip_string('('):
try:
typ = self._parse_type(False)
if not self.skip_string(')'):
self.fail("Expected ')' in cast expression.")
expr = self._parse_cast_expression()
return ASTCastExpr(typ, expr)
except DefinitionError as exCast:
self.pos = pos
try:
return self._parse_unary_expression()
except DefinitionError as exUnary:
errs = []
errs.append((exCast, "If type cast expression"))
errs.append((exUnary, "If unary expression"))
raise self._make_multi_error(errs,
"Error in cast expression.") from exUnary
else:
return self._parse_unary_expression()
def _parse_logical_or_expression(self) -> ASTExpression:
# logical-or = logical-and ||
# logical-and = inclusive-or &&
# inclusive-or = exclusive-or |
# exclusive-or = and ^
# and = equality &
# equality = relational ==, !=
# relational = shift <, >, <=, >=
# shift = additive <<, >>
# additive = multiplicative +, -
# multiplicative = pm *, /, %
# pm = cast .*, ->*
def _parse_bin_op_expr(self, opId):
if opId + 1 == len(_expression_bin_ops):
def parser() -> ASTExpression:
return self._parse_cast_expression()
else:
def parser() -> ASTExpression:
return _parse_bin_op_expr(self, opId + 1)
exprs = []
ops = []
exprs.append(parser())
while True:
self.skip_ws()
pos = self.pos
oneMore = False
for op in _expression_bin_ops[opId]:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
if op == '&' and self.current_char == '&':
# don't split the && 'token'
self.pos -= 1
# and btw. && has lower precedence, so we are done
break
try:
expr = parser()
exprs.append(expr)
ops.append(op)
oneMore = True
break
except DefinitionError:
self.pos = pos
if not oneMore:
break
return ASTBinOpExpr(exprs, ops)
return _parse_bin_op_expr(self, 0)
def _parse_conditional_expression_tail(self, orExprHead: Any) -> ASTExpression:
# -> "?" expression ":" assignment-expression
return None
def _parse_assignment_expression(self) -> ASTExpression:
# -> conditional-expression
# | logical-or-expression assignment-operator initializer-clause
# -> conditional-expression ->
# logical-or-expression
# | logical-or-expression "?" expression ":" assignment-expression
# | logical-or-expression assignment-operator initializer-clause
exprs = []
ops = []
orExpr = self._parse_logical_or_expression()
exprs.append(orExpr)
# TODO: handle ternary with _parse_conditional_expression_tail
while True:
oneMore = False
self.skip_ws()
for op in _expression_assignment_ops:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
expr = self._parse_logical_or_expression()
exprs.append(expr)
ops.append(op)
oneMore = True
if not oneMore:
break
return ASTAssignmentExpr(exprs, ops)
def _parse_constant_expression(self) -> ASTExpression:
# -> conditional-expression
orExpr = self._parse_logical_or_expression()
# TODO: use _parse_conditional_expression_tail
return orExpr
def _parse_expression(self) -> ASTExpression:
# -> assignment-expression
# | expression "," assignment-expression
# TODO: actually parse the second production
return self._parse_assignment_expression()
def _parse_expression_fallback(
self, end: List[str],
parser: Callable[[], ASTExpression],
allow: bool = True) -> ASTExpression:
# Stupidly "parse" an expression.
# 'end' should be a list of characters which ends the expression.
# first try to use the provided parser
prevPos = self.pos
try:
return parser()
except DefinitionError as e:
# some places (e.g., template parameters) we really don't want to use fallback,
# and for testing we may want to globally disable it
if not allow or not self.allowFallbackExpressionParsing:
raise
self.warn("Parsing of expression failed. Using fallback parser."
" Error was:\n%s" % e)
self.pos = prevPos
# and then the fallback scanning
assert end is not None
self.skip_ws()
startPos = self.pos
if self.match(_string_re):
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
brackets = {'(': ')', '{': '}', '[': ']'}
symbols: List[str] = []
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
break
if self.current_char in brackets.keys():
symbols.append(brackets[self.current_char])
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
self.pos += 1
if len(end) > 0 and self.eof:
self.fail("Could not find end of expression starting at %d."
% startPos)
value = self.definition[startPos:self.pos].strip()
return ASTFallbackExpr(value.strip())
def _parse_nested_name(self) -> ASTNestedName:
names: List[Any] = []
self.skip_ws()
rooted = False
if self.skip_string('.'):
rooted = True
while 1:
self.skip_ws()
if not self.match(identifier_re):
self.fail("Expected identifier in nested name.")
identifier = self.matched_text
# make sure there isn't a keyword
if identifier in _keywords:
self.fail("Expected identifier in nested name, "
"got keyword: %s" % identifier)
if self.matched_text in self.config.c_extra_keywords:
msg = "Expected identifier, got user-defined keyword: %s." \
+ " Remove it from c_extra_keywords to allow it as identifier.\n" \
+ "Currently c_extra_keywords is %s."
self.fail(msg % (self.matched_text,
str(self.config.c_extra_keywords)))
ident = ASTIdentifier(identifier)
names.append(ident)
self.skip_ws()
if not self.skip_string('.'):
break
return ASTNestedName(names, rooted)
def _parse_simple_type_specifier(self) -> Optional[str]:
if self.match(_simple_type_specifiers_re):
return self.matched_text
for t in ('bool', 'complex', 'imaginary'):
if t in self.config.c_extra_keywords:
if self.skip_word(t):
return t
return None
def _parse_simple_type_specifiers(self) -> ASTTrailingTypeSpecFundamental:
names: List[str] = []
self.skip_ws()
while True:
t = self._parse_simple_type_specifier()
if t is None:
break
names.append(t)
self.skip_ws()
if len(names) == 0:
return None
return ASTTrailingTypeSpecFundamental(names)
def _parse_trailing_type_spec(self) -> ASTTrailingTypeSpec:
# fundamental types, https://en.cppreference.com/w/c/language/type
# and extensions
self.skip_ws()
res = self._parse_simple_type_specifiers()
if res is not None:
return res
# prefixed
prefix = None
self.skip_ws()
for k in ('struct', 'enum', 'union'):
if self.skip_word_and_ws(k):
prefix = k
break
nestedName = self._parse_nested_name()
return ASTTrailingTypeSpecName(prefix, nestedName)
def _parse_parameters(self, paramMode: str) -> ASTParameters:
self.skip_ws()
if not self.skip_string('('):
if paramMode == 'function':
self.fail('Expecting "(" in parameters.')
else:
return None
args = []
self.skip_ws()
if not self.skip_string(')'):
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTFunctionParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in parameters.')
break
# note: it seems that function arguments can always be named,
# even in function pointers and similar.
arg = self._parse_type_with_init(outer=None, named='single')
# TODO: parse default parameters # TODO: didn't we just do that?
args.append(ASTFunctionParameter(arg))
self.skip_ws()
if self.skip_string(','):
continue
elif self.skip_string(')'):
break
else:
self.fail(
'Expecting "," or ")" in parameters, '
'got "%s".' % self.current_char)
attrs = []
while True:
attr = self._parse_attribute()
if attr is None:
break
attrs.append(attr)
return ASTParameters(args, attrs)
def _parse_decl_specs_simple(self, outer: str, typed: bool) -> ASTDeclSpecsSimple:
"""Just parse the simple ones."""
storage = None
threadLocal = None
inline = None
restrict = None
volatile = None
const = None
attrs = []
while 1: # accept any permutation of a subset of some decl-specs
self.skip_ws()
if not storage:
if outer == 'member':
if self.skip_word('auto'):
storage = 'auto'
continue
if self.skip_word('register'):
storage = 'register'
continue
if outer in ('member', 'function'):
if self.skip_word('static'):
storage = 'static'
continue
if self.skip_word('extern'):
storage = 'extern'
continue
if outer == 'member' and not threadLocal:
if self.skip_word('thread_local'):
threadLocal = 'thread_local'
continue
if self.skip_word('_Thread_local'):
threadLocal = '_Thread_local'
continue
if outer == 'function' and not inline:
inline = self.skip_word('inline')
if inline:
continue
if not restrict and typed:
restrict = self.skip_word('restrict')
if restrict:
continue
if not volatile and typed:
volatile = self.skip_word('volatile')
if volatile:
continue
if not const and typed:
const = self.skip_word('const')
if const:
continue
attr = self._parse_attribute()
if attr:
attrs.append(attr)
continue
break
return ASTDeclSpecsSimple(storage, threadLocal, inline,
restrict, volatile, const, attrs)
def _parse_decl_specs(self, outer: str, typed: bool = True) -> ASTDeclSpecs:
if outer:
if outer not in ('type', 'member', 'function'):
raise Exception('Internal error, unknown outer "%s".' % outer)
leftSpecs = self._parse_decl_specs_simple(outer, typed)
rightSpecs = None
if typed:
trailing = self._parse_trailing_type_spec()
rightSpecs = self._parse_decl_specs_simple(outer, typed)
else:
trailing = None
return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing)
def _parse_declarator_name_suffix(
self, named: Union[bool, str], paramMode: str, typed: bool
) -> ASTDeclarator:
assert named in (True, False, 'single')
# now we should parse the name, and then suffixes
if named == 'single':
if self.match(identifier_re):
if self.matched_text in _keywords:
self.fail("Expected identifier, "
"got keyword: %s" % self.matched_text)
if self.matched_text in self.config.c_extra_keywords:
msg = "Expected identifier, got user-defined keyword: %s." \
+ " Remove it from c_extra_keywords to allow it as identifier.\n" \
+ "Currently c_extra_keywords is %s."
self.fail(msg % (self.matched_text,
str(self.config.c_extra_keywords)))
identifier = ASTIdentifier(self.matched_text)
declId = ASTNestedName([identifier], rooted=False)
else:
declId = None
elif named:
declId = self._parse_nested_name()
else:
declId = None
arrayOps = []
while 1:
self.skip_ws()
if typed and self.skip_string('['):
self.skip_ws()
static = False
const = False
volatile = False
restrict = False
while True:
if not static:
if self.skip_word_and_ws('static'):
static = True
continue
if not const:
if self.skip_word_and_ws('const'):
const = True
continue
if not volatile:
if self.skip_word_and_ws('volatile'):
volatile = True
continue
if not restrict:
if self.skip_word_and_ws('restrict'):
restrict = True
continue
break
vla = False if static else self.skip_string_and_ws('*')
if vla:
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
size = None
else:
if self.skip_string(']'):
size = None
else:
def parser():
return self._parse_expression()
size = self._parse_expression_fallback([']'], parser)
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
arrayOps.append(ASTArray(static, const, volatile, restrict, vla, size))
else:
break
param = self._parse_parameters(paramMode)
if param is None and len(arrayOps) == 0:
# perhaps a bit-field
if named and paramMode == 'type' and typed:
self.skip_ws()
if self.skip_string(':'):
size = self._parse_constant_expression()
return ASTDeclaratorNameBitField(declId=declId, size=size)
return ASTDeclaratorNameParam(declId=declId, arrayOps=arrayOps,
param=param)
def _parse_declarator(self, named: Union[bool, str], paramMode: str,
typed: bool = True) -> ASTDeclarator:
# 'typed' here means 'parse return type stuff'
if paramMode not in ('type', 'function'):
raise Exception(
"Internal error, unknown paramMode '%s'." % paramMode)
prevErrors = []
self.skip_ws()
if typed and self.skip_string('*'):
self.skip_ws()
restrict = False
volatile = False
const = False
attrs = []
while 1:
if not restrict:
restrict = self.skip_word_and_ws('restrict')
if restrict:
continue
if not volatile:
volatile = self.skip_word_and_ws('volatile')
if volatile:
continue
if not const:
const = self.skip_word_and_ws('const')
if const:
continue
attr = self._parse_attribute()
if attr is not None:
attrs.append(attr)
continue
break
next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorPtr(next=next,
restrict=restrict, volatile=volatile, const=const,
attrs=attrs)
if typed and self.current_char == '(': # note: peeking, not skipping
# maybe this is the beginning of params, try that first,
# otherwise assume it's noptr->declarator > ( ptr-declarator )
pos = self.pos
try:
# assume this is params
res = self._parse_declarator_name_suffix(named, paramMode,
typed)
return res
except DefinitionError as exParamQual:
msg = "If declarator-id with parameters"
if paramMode == 'function':
msg += " (e.g., 'void f(int arg)')"
prevErrors.append((exParamQual, msg))
self.pos = pos
try:
assert self.current_char == '('
self.skip_string('(')
# TODO: hmm, if there is a name, it must be in inner, right?
# TODO: hmm, if there must be parameters, they must b
# inside, right?
inner = self._parse_declarator(named, paramMode, typed)
if not self.skip_string(')'):
self.fail("Expected ')' in \"( ptr-declarator )\"")
next = self._parse_declarator(named=False,
paramMode="type",
typed=typed)
return ASTDeclaratorParen(inner=inner, next=next)
except DefinitionError as exNoPtrParen:
self.pos = pos
msg = "If parenthesis in noptr-declarator"
if paramMode == 'function':
msg += " (e.g., 'void (*f(int arg))(double)')"
prevErrors.append((exNoPtrParen, msg))
header = "Error in declarator"
raise self._make_multi_error(prevErrors, header) from exNoPtrParen
pos = self.pos
try:
return self._parse_declarator_name_suffix(named, paramMode, typed)
except DefinitionError as e:
self.pos = pos
prevErrors.append((e, "If declarator-id"))
header = "Error in declarator or parameters"
raise self._make_multi_error(prevErrors, header) from e
def _parse_initializer(self, outer: str = None, allowFallback: bool = True
) -> ASTInitializer:
self.skip_ws()
if outer == 'member' and False: # TODO
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit, hasAssign=False)
if not self.skip_string('='):
return None
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit)
if outer == 'member':
fallbackEnd: List[str] = []
elif outer is None: # function parameter
fallbackEnd = [',', ')']
else:
self.fail("Internal error, initializer for outer '%s' not "
"implemented." % outer)
def parser():
return self._parse_assignment_expression()
value = self._parse_expression_fallback(fallbackEnd, parser, allow=allowFallback)
return ASTInitializer(value)
def _parse_type(self, named: Union[bool, str], outer: str = None) -> ASTType:
"""
named=False|'single'|True: 'single' is e.g., for function objects which
doesn't need to name the arguments, but otherwise is a single name
"""
if outer: # always named
if outer not in ('type', 'member', 'function'):
raise Exception('Internal error, unknown outer "%s".' % outer)
assert named
if outer == 'type':
# We allow type objects to just be a name.
prevErrors = []
startPos = self.pos
# first try without the type
try:
declSpecs = self._parse_decl_specs(outer=outer, typed=False)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=False)
self.assert_end(allowSemicolon=True)
except DefinitionError as exUntyped:
desc = "If just a name"
prevErrors.append((exUntyped, desc))
self.pos = startPos
try:
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, paramMode=outer)
except DefinitionError as exTyped:
self.pos = startPos
desc = "If typedef-like declaration"
prevErrors.append((exTyped, desc))
# Retain the else branch for easier debugging.
# TODO: it would be nice to save the previous stacktrace
# and output it here.
if True:
header = "Type must be either just a name or a "
header += "typedef-like declaration."
raise self._make_multi_error(prevErrors, header) from exTyped
else:
# For testing purposes.
# do it again to get the proper traceback (how do you
# reliably save a traceback when an exception is
# constructed?)
self.pos = startPos
typed = True
declSpecs = self._parse_decl_specs(outer=outer, typed=typed)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=typed)
elif outer == 'function':
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, paramMode=outer)
else:
paramMode = 'type'
if outer == 'member': # i.e., member
named = True
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=named, paramMode=paramMode)
return ASTType(declSpecs, decl)
def _parse_type_with_init(self, named: Union[bool, str], outer: str) -> ASTTypeWithInit:
if outer:
assert outer in ('type', 'member', 'function')
type = self._parse_type(outer=outer, named=named)
init = self._parse_initializer(outer=outer)
return ASTTypeWithInit(type, init)
def _parse_macro(self) -> ASTMacro:
self.skip_ws()
ident = self._parse_nested_name()
if ident is None:
self.fail("Expected identifier in macro definition.")
self.skip_ws()
if not self.skip_string_and_ws('('):
return ASTMacro(ident, None)
if self.skip_string(')'):
return ASTMacro(ident, [])
args = []
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTMacroParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in macro parameters.')
break
if not self.match(identifier_re):
self.fail("Expected identifier in macro parameters.")
nn = ASTNestedName([ASTIdentifier(self.matched_text)], rooted=False)
# Allow named variadic args:
# https://gcc.gnu.org/onlinedocs/cpp/Variadic-Macros.html
self.skip_ws()
if self.skip_string_and_ws('...'):
args.append(ASTMacroParameter(nn, False, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in macro parameters.')
break
args.append(ASTMacroParameter(nn))
if self.skip_string_and_ws(','):
continue
elif self.skip_string_and_ws(')'):
break
else:
self.fail("Expected identifier, ')', or ',' in macro parameter list.")
return ASTMacro(ident, args)
def _parse_struct(self) -> ASTStruct:
name = self._parse_nested_name()
return ASTStruct(name)
def _parse_union(self) -> ASTUnion:
name = self._parse_nested_name()
return ASTUnion(name)
def _parse_enum(self) -> ASTEnum:
name = self._parse_nested_name()
return ASTEnum(name)
def _parse_enumerator(self) -> ASTEnumerator:
name = self._parse_nested_name()
self.skip_ws()
init = None
if self.skip_string('='):
self.skip_ws()
def parser() -> ASTExpression:
return self._parse_constant_expression()
initVal = self._parse_expression_fallback([], parser)
init = ASTInitializer(initVal)
return ASTEnumerator(name, init)
def parse_pre_v3_type_definition(self) -> ASTDeclaration:
self.skip_ws()
declaration: DeclarationType = None
if self.skip_word('struct'):
typ = 'struct'
declaration = self._parse_struct()
elif self.skip_word('union'):
typ = 'union'
declaration = self._parse_union()
elif self.skip_word('enum'):
typ = 'enum'
declaration = self._parse_enum()
else:
self.fail("Could not parse pre-v3 type directive."
" Must start with 'struct', 'union', or 'enum'.")
return ASTDeclaration(typ, typ, declaration, False)
def parse_declaration(self, objectType: str, directiveType: str) -> ASTDeclaration:
if objectType not in ('function', 'member',
'macro', 'struct', 'union', 'enum', 'enumerator', 'type'):
raise Exception('Internal error, unknown objectType "%s".' % objectType)
if directiveType not in ('function', 'member', 'var',
'macro', 'struct', 'union', 'enum', 'enumerator', 'type'):
raise Exception('Internal error, unknown directiveType "%s".' % directiveType)
declaration: DeclarationType = None
if objectType == 'member':
declaration = self._parse_type_with_init(named=True, outer='member')
elif objectType == 'function':
declaration = self._parse_type(named=True, outer='function')
elif objectType == 'macro':
declaration = self._parse_macro()
elif objectType == 'struct':
declaration = self._parse_struct()
elif objectType == 'union':
declaration = self._parse_union()
elif objectType == 'enum':
declaration = self._parse_enum()
elif objectType == 'enumerator':
declaration = self._parse_enumerator()
elif objectType == 'type':
declaration = self._parse_type(named=True, outer='type')
else:
assert False
if objectType != 'macro':
self.skip_ws()
semicolon = self.skip_string(';')
else:
semicolon = False
return ASTDeclaration(objectType, directiveType, declaration, semicolon)
def parse_namespace_object(self) -> ASTNestedName:
return self._parse_nested_name()
def parse_xref_object(self) -> ASTNestedName:
name = self._parse_nested_name()
# if there are '()' left, just skip them
self.skip_ws()
self.skip_string('()')
self.assert_end()
return name
def parse_expression(self) -> Union[ASTExpression, ASTType]:
pos = self.pos
res: Union[ASTExpression, ASTType] = None
try:
res = self._parse_expression()
self.skip_ws()
self.assert_end()
except DefinitionError as exExpr:
self.pos = pos
try:
res = self._parse_type(False)
self.skip_ws()
self.assert_end()
except DefinitionError as exType:
header = "Error when parsing (type) expression."
errs = []
errs.append((exExpr, "If expression"))
errs.append((exType, "If type"))
raise self._make_multi_error(errs, header) from exType
return res
def _make_phony_error_name() -> ASTNestedName:
return ASTNestedName([ASTIdentifier("PhonyNameDueToError")], rooted=False)
class CObject(ObjectDescription[ASTDeclaration]):
"""
Description of a C language object.
"""
option_spec: OptionSpec = {
'noindexentry': directives.flag,
}
def _add_enumerator_to_parent(self, ast: ASTDeclaration) -> None:
assert ast.objectType == 'enumerator'
# find the parent, if it exists && is an enum
# then add the name to the parent scope
symbol = ast.symbol
assert symbol
assert symbol.ident is not None
parentSymbol = symbol.parent
assert parentSymbol
if parentSymbol.parent is None:
# TODO: we could warn, but it is somewhat equivalent to
# enumeratorss, without the enum
return # no parent
parentDecl = parentSymbol.declaration
if parentDecl is None:
# the parent is not explicitly declared
# TODO: we could warn, but?
return
if parentDecl.objectType != 'enum':
# TODO: maybe issue a warning, enumerators in non-enums is weird,
# but it is somewhat equivalent to enumeratorss, without the enum
return
if parentDecl.directiveType != 'enum':
return
targetSymbol = parentSymbol.parent
s = targetSymbol.find_identifier(symbol.ident, matchSelf=False, recurseInAnon=True,
searchInSiblings=False)
if s is not None:
# something is already declared with that name
return
declClone = symbol.declaration.clone()
declClone.enumeratorScopedSymbol = symbol
Symbol(parent=targetSymbol, ident=symbol.ident,
declaration=declClone,
docname=self.env.docname, line=self.get_source_info()[1])
def add_target_and_index(self, ast: ASTDeclaration, sig: str,
signode: TextElement) -> None:
ids = []
for i in range(1, _max_id + 1):
try:
id = ast.get_id(version=i)
ids.append(id)
except NoOldIdError:
assert i < _max_id
# let's keep the newest first
ids = list(reversed(ids))
newestId = ids[0]
assert newestId # shouldn't be None
name = ast.symbol.get_full_nested_name().get_display_string().lstrip('.')
if newestId not in self.state.document.ids:
# always add the newest id
assert newestId
signode['ids'].append(newestId)
# only add compatibility ids when there are no conflicts
for id in ids[1:]:
if not id: # is None when the element didn't exist in that version
continue
if id not in self.state.document.ids:
signode['ids'].append(id)
self.state.document.note_explicit_target(signode)
if 'noindexentry' not in self.options:
indexText = self.get_index_text(name)
self.indexnode['entries'].append(('single', indexText, newestId, '', None))
@property
def object_type(self) -> str:
raise NotImplementedError()
@property
def display_object_type(self) -> str:
return self.object_type
def get_index_text(self, name: str) -> str:
return _('%s (C %s)') % (name, self.display_object_type)
def parse_definition(self, parser: DefinitionParser) -> ASTDeclaration:
return parser.parse_declaration(self.object_type, self.objtype)
def parse_pre_v3_type_definition(self, parser: DefinitionParser) -> ASTDeclaration:
return parser.parse_pre_v3_type_definition()
def describe_signature(self, signode: TextElement, ast: ASTDeclaration,
options: Dict) -> None:
ast.describe_signature(signode, 'lastIsName', self.env, options)
def run(self) -> List[Node]:
env = self.state.document.settings.env # from ObjectDescription.run
if 'c:parent_symbol' not in env.temp_data:
root = env.domaindata['c']['root_symbol']
env.temp_data['c:parent_symbol'] = root
env.ref_context['c:parent_key'] = root.get_lookup_key()
# When multiple declarations are made in the same directive
# they need to know about each other to provide symbol lookup for function parameters.
# We use last_symbol to store the latest added declaration in a directive.
env.temp_data['c:last_symbol'] = None
return super().run()
def handle_signature(self, sig: str, signode: TextElement) -> ASTDeclaration:
parentSymbol: Symbol = self.env.temp_data['c:parent_symbol']
parser = DefinitionParser(sig, location=signode, config=self.env.config)
try:
try:
ast = self.parse_definition(parser)
parser.assert_end()
except DefinitionError as eOrig:
if not self.env.config['c_allow_pre_v3']:
raise
if self.objtype != 'type':
raise
try:
ast = self.parse_pre_v3_type_definition(parser)
parser.assert_end()
except DefinitionError:
raise eOrig
self.object_type = ast.objectType # type: ignore
if self.env.config['c_warn_on_allowed_pre_v3']:
msg = "{}: Pre-v3 C type directive '.. c:type:: {}' converted to " \
"'.. c:{}:: {}'." \
"\nThe original parsing error was:\n{}"
msg = msg.format(RemovedInSphinx60Warning.__name__,
sig, ast.objectType, ast, eOrig)
logger.warning(msg, location=signode)
except DefinitionError as e:
logger.warning(e, location=signode)
# It is easier to assume some phony name than handling the error in
# the possibly inner declarations.
name = _make_phony_error_name()
symbol = parentSymbol.add_name(name)
self.env.temp_data['c:last_symbol'] = symbol
raise ValueError from e
try:
symbol = parentSymbol.add_declaration(
ast, docname=self.env.docname, line=self.get_source_info()[1])
# append the new declaration to the sibling list
assert symbol.siblingAbove is None
assert symbol.siblingBelow is None
symbol.siblingAbove = self.env.temp_data['c:last_symbol']
if symbol.siblingAbove is not None:
assert symbol.siblingAbove.siblingBelow is None
symbol.siblingAbove.siblingBelow = symbol
self.env.temp_data['c:last_symbol'] = symbol
except _DuplicateSymbolError as e:
# Assume we are actually in the old symbol,
# instead of the newly created duplicate.
self.env.temp_data['c:last_symbol'] = e.symbol
msg = __("Duplicate C declaration, also defined at %s:%s.\n"
"Declaration is '.. c:%s:: %s'.")
msg = msg % (e.symbol.docname, e.symbol.line, self.display_object_type, sig)
logger.warning(msg, location=signode)
if ast.objectType == 'enumerator':
self._add_enumerator_to_parent(ast)
# note: handle_signature may be called multiple time per directive,
# if it has multiple signatures, so don't mess with the original options.
options = dict(self.options)
self.describe_signature(signode, ast, options)
return ast
def before_content(self) -> None:
lastSymbol: Symbol = self.env.temp_data['c:last_symbol']
assert lastSymbol
self.oldParentSymbol = self.env.temp_data['c:parent_symbol']
self.oldParentKey: LookupKey = self.env.ref_context['c:parent_key']
self.env.temp_data['c:parent_symbol'] = lastSymbol
self.env.ref_context['c:parent_key'] = lastSymbol.get_lookup_key()
def after_content(self) -> None:
self.env.temp_data['c:parent_symbol'] = self.oldParentSymbol
self.env.ref_context['c:parent_key'] = self.oldParentKey
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id for C objects.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return 'c.' + name
class CMemberObject(CObject):
object_type = 'member'
@property
def display_object_type(self) -> str:
# the distinction between var and member is only cosmetic
assert self.objtype in ('member', 'var')
return self.objtype
_function_doc_field_types = [
TypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='expr', typenames=('type',)),
GroupedField('retval', label=_('Return values'),
names=('retvals', 'retval'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
]
class CFunctionObject(CObject):
object_type = 'function'
doc_field_types = _function_doc_field_types.copy()
class CMacroObject(CObject):
object_type = 'macro'
doc_field_types = _function_doc_field_types.copy()
class CStructObject(CObject):
object_type = 'struct'
class CUnionObject(CObject):
object_type = 'union'
class CEnumObject(CObject):
object_type = 'enum'
class CEnumeratorObject(CObject):
object_type = 'enumerator'
class CTypeObject(CObject):
object_type = 'type'
class CNamespaceObject(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting stuff in
namespace foo.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
rootSymbol = self.env.domaindata['c']['root_symbol']
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
symbol = rootSymbol
stack: List[Symbol] = []
else:
parser = DefinitionParser(self.arguments[0],
location=self.get_location(),
config=self.env.config)
try:
name = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_location())
name = _make_phony_error_name()
symbol = rootSymbol.add_name(name)
stack = [symbol]
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['c:parent_key'] = symbol.get_lookup_key()
return []
class CNamespacePushObject(SphinxDirective):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
return []
parser = DefinitionParser(self.arguments[0],
location=self.get_location(),
config=self.env.config)
try:
name = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_location())
name = _make_phony_error_name()
oldParent = self.env.temp_data.get('c:parent_symbol', None)
if not oldParent:
oldParent = self.env.domaindata['c']['root_symbol']
symbol = oldParent.add_name(name)
stack = self.env.temp_data.get('c:namespace_stack', [])
stack.append(symbol)
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['c:parent_key'] = symbol.get_lookup_key()
return []
class CNamespacePopObject(SphinxDirective):
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
stack = self.env.temp_data.get('c:namespace_stack', None)
if not stack or len(stack) == 0:
logger.warning("C namespace pop on empty stack. Defaulting to global scope.",
location=self.get_location())
stack = []
else:
stack.pop()
if len(stack) > 0:
symbol = stack[-1]
else:
symbol = self.env.domaindata['c']['root_symbol']
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['cp:parent_key'] = symbol.get_lookup_key()
return []
class AliasNode(nodes.Element):
def __init__(self, sig: str, aliasOptions: dict,
document: Any, env: "BuildEnvironment" = None,
parentKey: LookupKey = None) -> None:
super().__init__()
self.sig = sig
self.aliasOptions = aliasOptions
self.document = document
if env is not None:
if 'c:parent_symbol' not in env.temp_data:
root = env.domaindata['c']['root_symbol']
env.temp_data['c:parent_symbol'] = root
env.ref_context['c:parent_key'] = root.get_lookup_key()
self.parentKey = env.ref_context['c:parent_key']
else:
assert parentKey is not None
self.parentKey = parentKey
def copy(self) -> 'AliasNode':
return self.__class__(self.sig, self.aliasOptions, self.document,
env=None, parentKey=self.parentKey)
class AliasTransform(SphinxTransform):
default_priority = ReferencesResolver.default_priority - 1
def _render_symbol(self, s: Symbol, maxdepth: int, skipThis: bool,
aliasOptions: dict, renderOptions: dict,
document: Any) -> List[Node]:
if maxdepth == 0:
recurse = True
elif maxdepth == 1:
recurse = False
else:
maxdepth -= 1
recurse = True
nodes: List[Node] = []
if not skipThis:
signode = addnodes.desc_signature('', '')
nodes.append(signode)
s.declaration.describe_signature(signode, 'markName', self.env, renderOptions)
if recurse:
if skipThis:
childContainer: Union[List[Node], addnodes.desc] = nodes
else:
content = addnodes.desc_content()
desc = addnodes.desc()
content.append(desc)
desc.document = document
desc['domain'] = 'c'
# 'desctype' is a backwards compatible attribute
desc['objtype'] = desc['desctype'] = 'alias'
desc['noindex'] = True
childContainer = desc
for sChild in s.children:
if sChild.declaration is None:
continue
childNodes = self._render_symbol(
sChild, maxdepth=maxdepth, skipThis=False,
aliasOptions=aliasOptions, renderOptions=renderOptions,
document=document)
childContainer.extend(childNodes)
if not skipThis and len(desc.children) != 0:
nodes.append(content)
return nodes
def apply(self, **kwargs: Any) -> None:
for node in self.document.traverse(AliasNode):
node = cast(AliasNode, node)
sig = node.sig
parentKey = node.parentKey
try:
parser = DefinitionParser(sig, location=node,
config=self.env.config)
name = parser.parse_xref_object()
except DefinitionError as e:
logger.warning(e, location=node)
name = None
if name is None:
# could not be parsed, so stop here
signode = addnodes.desc_signature(sig, '')
signode.clear()
signode += addnodes.desc_name(sig, sig)
node.replace_self(signode)
continue
rootSymbol: Symbol = self.env.domains['c'].data['root_symbol']
parentSymbol: Symbol = rootSymbol.direct_lookup(parentKey)
if not parentSymbol:
print("Target: ", sig)
print("ParentKey: ", parentKey)
print(rootSymbol.dump(1))
assert parentSymbol # should be there
s = parentSymbol.find_declaration(
name, 'any',
matchSelf=True, recurseInAnon=True)
if s is None:
signode = addnodes.desc_signature(sig, '')
node.append(signode)
signode.clear()
signode += addnodes.desc_name(sig, sig)
logger.warning("Could not find C declaration for alias '%s'." % name,
location=node)
node.replace_self(signode)
continue
# Declarations like .. var:: int Missing::var
# may introduce symbols without declarations.
# But if we skip the root then it is ok to start recursion from it.
if not node.aliasOptions['noroot'] and s.declaration is None:
signode = addnodes.desc_signature(sig, '')
node.append(signode)
signode.clear()
signode += addnodes.desc_name(sig, sig)
logger.warning(
"Can not render C declaration for alias '%s'. No such declaration." % name,
location=node)
node.replace_self(signode)
continue
nodes = self._render_symbol(s, maxdepth=node.aliasOptions['maxdepth'],
skipThis=node.aliasOptions['noroot'],
aliasOptions=node.aliasOptions,
renderOptions=dict(), document=node.document)
node.replace_self(nodes)
class CAliasObject(ObjectDescription):
option_spec: OptionSpec = {
'maxdepth': directives.nonnegative_int,
'noroot': directives.flag,
}
def run(self) -> List[Node]:
"""
On purpose this doesn't call the ObjectDescription version, but is based on it.
Each alias signature may expand into multiple real signatures if 'noroot'.
The code is therefore based on the ObjectDescription version.
"""
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
node = addnodes.desc()
node.document = self.state.document
node['domain'] = self.domain
# 'desctype' is a backwards compatible attribute
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = True
self.names: List[str] = []
aliasOptions = {
'maxdepth': self.options.get('maxdepth', 1),
'noroot': 'noroot' in self.options,
}
if aliasOptions['noroot'] and aliasOptions['maxdepth'] == 1:
logger.warning("Error in C alias declaration."
" Requested 'noroot' but 'maxdepth' 1."
" When skipping the root declaration,"
" need 'maxdepth' 0 for infinite or at least 2.",
location=self.get_location())
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
node.append(AliasNode(sig, aliasOptions, self.state.document, env=self.env))
return [node]
class CXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element,
has_explicit_title: bool, title: str, target: str) -> Tuple[str, str]:
refnode.attributes.update(env.ref_context)
if not has_explicit_title:
# major hax: replace anon names via simple string manipulation.
# Can this actually fail?
title = anon_identifier_re.sub("[anonymous]", str(title))
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
return title, target
def run(self) -> Tuple[List[Node], List[system_message]]:
if not self.env.config['c_allow_pre_v3']:
return super().run()
text = self.text.replace('\n', ' ')
parser = DefinitionParser(text, location=self.get_location(),
config=self.env.config)
try:
parser.parse_xref_object()
# it succeeded, so let it through
return super().run()
except DefinitionError as eOrig:
# try as if it was an c:expr
parser.pos = 0
try:
ast = parser.parse_expression()
except DefinitionError:
# that didn't go well, just default back
return super().run()
classes = ['xref', 'c', 'c-texpr']
parentSymbol = self.env.temp_data.get('cpp:parent_symbol', None)
if parentSymbol is None:
parentSymbol = self.env.domaindata['c']['root_symbol']
signode = nodes.inline(classes=classes)
ast.describe_signature(signode, 'markType', self.env, parentSymbol)
if self.env.config['c_warn_on_allowed_pre_v3']:
msg = "{}: Pre-v3 C type role ':c:type:`{}`' converted to ':c:expr:`{}`'."
msg += "\nThe original parsing error was:\n{}"
msg = msg.format(RemovedInSphinx60Warning.__name__, text, text, eOrig)
logger.warning(msg, location=self.get_location())
return [signode], []
class CExprRole(SphinxRole):
def __init__(self, asCode: bool) -> None:
super().__init__()
if asCode:
# render the expression as inline code
self.class_type = 'c-expr'
else:
# render the expression as inline text
self.class_type = 'c-texpr'
def run(self) -> Tuple[List[Node], List[system_message]]:
text = self.text.replace('\n', ' ')
parser = DefinitionParser(text, location=self.get_location(),
config=self.env.config)
# attempt to mimic XRefRole classes, except that...
try:
ast = parser.parse_expression()
except DefinitionError as ex:
logger.warning('Unparseable C expression: %r\n%s', text, ex,
location=self.get_location())
# see below
return [addnodes.desc_inline('c', text, text, classes=[self.class_type])], []
parentSymbol = self.env.temp_data.get('c:parent_symbol', None)
if parentSymbol is None:
parentSymbol = self.env.domaindata['c']['root_symbol']
# ...most if not all of these classes should really apply to the individual references,
# not the container node
signode = addnodes.desc_inline('c', classes=[self.class_type])
ast.describe_signature(signode, 'markType', self.env, parentSymbol)
return [signode], []
class CDomain(Domain):
"""C language domain."""
name = 'c'
label = 'C'
object_types = {
# 'identifier' is the one used for xrefs generated in signatures, not in roles
'member': ObjType(_('member'), 'var', 'member', 'data', 'identifier'),
'var': ObjType(_('variable'), 'var', 'member', 'data', 'identifier'),
'function': ObjType(_('function'), 'func', 'identifier', 'type'),
'macro': ObjType(_('macro'), 'macro', 'identifier'),
'struct': ObjType(_('struct'), 'struct', 'identifier', 'type'),
'union': ObjType(_('union'), 'union', 'identifier', 'type'),
'enum': ObjType(_('enum'), 'enum', 'identifier', 'type'),
'enumerator': ObjType(_('enumerator'), 'enumerator', 'identifier'),
'type': ObjType(_('type'), 'identifier', 'type'),
# generated object types
'functionParam': ObjType(_('function parameter'), 'identifier', 'var', 'member', 'data'), # noqa
}
directives = {
'member': CMemberObject,
'var': CMemberObject,
'function': CFunctionObject,
'macro': CMacroObject,
'struct': CStructObject,
'union': CUnionObject,
'enum': CEnumObject,
'enumerator': CEnumeratorObject,
'type': CTypeObject,
# scope control
'namespace': CNamespaceObject,
'namespace-push': CNamespacePushObject,
'namespace-pop': CNamespacePopObject,
# other
'alias': CAliasObject
}
roles = {
'member': CXRefRole(),
'data': CXRefRole(),
'var': CXRefRole(),
'func': CXRefRole(fix_parens=True),
'macro': CXRefRole(),
'struct': CXRefRole(),
'union': CXRefRole(),
'enum': CXRefRole(),
'enumerator': CXRefRole(),
'type': CXRefRole(),
'expr': CExprRole(asCode=True),
'texpr': CExprRole(asCode=False)
}
initial_data: Dict[str, Union[Symbol, Dict[str, Tuple[str, str, str]]]] = {
'root_symbol': Symbol(None, None, None, None, None),
'objects': {}, # fullname -> docname, node_id, objtype
}
def clear_doc(self, docname: str) -> None:
if Symbol.debug_show_tree:
print("clear_doc:", docname)
print("\tbefore:")
print(self.data['root_symbol'].dump(1))
print("\tbefore end")
rootSymbol = self.data['root_symbol']
rootSymbol.clear_doc(docname)
if Symbol.debug_show_tree:
print("\tafter:")
print(self.data['root_symbol'].dump(1))
print("\tafter end")
print("clear_doc end:", docname)
def process_doc(self, env: BuildEnvironment, docname: str,
document: nodes.document) -> None:
if Symbol.debug_show_tree:
print("process_doc:", docname)
print(self.data['root_symbol'].dump(0))
print("process_doc end:", docname)
def process_field_xref(self, pnode: pending_xref) -> None:
pnode.attributes.update(self.env.ref_context)
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
if Symbol.debug_show_tree:
print("merge_domaindata:")
print("\tself:")
print(self.data['root_symbol'].dump(1))
print("\tself end")
print("\tother:")
print(otherdata['root_symbol'].dump(1))
print("\tother end")
print("merge_domaindata end")
self.data['root_symbol'].merge_with(otherdata['root_symbol'],
docnames, self.env)
ourObjects = self.data['objects']
for fullname, (fn, id_, objtype) in otherdata['objects'].items():
if fn in docnames:
if fullname not in ourObjects:
ourObjects[fullname] = (fn, id_, objtype)
# no need to warn on duplicates, the symbol merge already does that
def _resolve_xref_inner(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref,
contnode: Element) -> Tuple[Optional[Element], Optional[str]]:
parser = DefinitionParser(target, location=node, config=env.config)
try:
name = parser.parse_xref_object()
except DefinitionError as e:
logger.warning('Unparseable C cross-reference: %r\n%s', target, e,
location=node)
return None, None
parentKey: LookupKey = node.get("c:parent_key", None)
rootSymbol = self.data['root_symbol']
if parentKey:
parentSymbol: Symbol = rootSymbol.direct_lookup(parentKey)
if not parentSymbol:
print("Target: ", target)
print("ParentKey: ", parentKey)
print(rootSymbol.dump(1))
assert parentSymbol # should be there
else:
parentSymbol = rootSymbol
s = parentSymbol.find_declaration(name, typ,
matchSelf=True, recurseInAnon=True)
if s is None or s.declaration is None:
return None, None
# TODO: check role type vs. object type
declaration = s.declaration
displayName = name.get_display_string()
docname = s.docname
assert docname
return make_refnode(builder, fromdocname, docname,
declaration.get_newest_id(), contnode, displayName
), declaration.objectType
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref,
contnode: Element) -> Optional[Element]:
return self._resolve_xref_inner(env, fromdocname, builder, typ,
target, node, contnode)[0]
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
with logging.suppress_logging():
retnode, objtype = self._resolve_xref_inner(env, fromdocname, builder,
'any', target, node, contnode)
if retnode:
return [('c:' + self.role_for_objtype(objtype), retnode)]
return []
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
rootSymbol = self.data['root_symbol']
for symbol in rootSymbol.get_all_symbols():
if symbol.declaration is None:
continue
assert symbol.docname
fullNestedName = symbol.get_full_nested_name()
name = str(fullNestedName).lstrip('.')
dispname = fullNestedName.get_display_string().lstrip('.')
objectType = symbol.declaration.objectType
docname = symbol.docname
newestId = symbol.declaration.get_newest_id()
yield (name, dispname, objectType, docname, newestId, 1)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(CDomain)
app.add_config_value("c_id_attributes", [], 'env')
app.add_config_value("c_paren_attributes", [], 'env')
app.add_config_value("c_extra_keywords", _macroKeywords, 'env')
app.add_post_transform(AliasTransform)
app.add_config_value("c_allow_pre_v3", False, 'env')
app.add_config_value("c_warn_on_allowed_pre_v3", True, 'env')
return {
'version': 'builtin',
'env_version': 2,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_0_6337 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Gibbs sampling inference for (a special case of) STS models.
These methods implement Gibbs sampling steps for STS models that combine a
single LocalLevel component with a linear regression component, with conjugate
InverseGamma priors on the scale and a Gaussian prior on the weights. This model
class is somewhat general, in that we assume that any seasonal/holiday variation
can be encoded in the design matrix of the linear regression. The intent is to
support deployment of STS inference in latency-sensitive applications.
This Gibbs sampler tends to reach acceptable answers much more quickly than
fitting the same models by gradient-based methods (VI or HMC). Because it does
not marginalize out the linear Gaussian latents analytically, it may be more
prone to getting stuck at a single (perhaps suboptimal) posterior explanation;
however, in practice it often finds good solutions.
The speed advantage of Gibbs sampling in this model likely arises from a
combination of:
- Analytically sampling the regression weights once per sampling cycle, instead
of requiring a quadratically-expensive update at each timestep of Kalman
filtering (as in DynamicLinearRegression), or relying on gradient-based
approximate inference (as in LinearRegression).
- Exploiting conjugacy to sample the scale parameters directly.
- Specializing the Gibbs step for the latent level to the case of a
scalar process with identity transitions.
It would be possible to expand this sampler to support additional STS models,
potentially at a cost with respect to some of these performance advantages (and
additional code):
- To support general latent state-space models, one would augment the sampler
state to track all parameters in the model. Each component would need to
register Gibbs sampling steps for its parameters (assuming conjugate priors),
as a function of the sampled latent trajectory. The resampling steps for the
observation_noise_scale and level_scale parameters would then be replaced with
a generic loop over all parameters in the model.
- To support regression with non-Gaussian (e.g., spike-and-slab) priors, one
would need to write a custom prior-specific sampler, analogous to the current
`resample_weights` function.
- For specific models it may be possible to implement an efficient prior
sampling algorithm, analagous to `LocalLevelStateSpaceModel._joint_sample_n`.
This may be significantly faster than the generic sampler and can speed up
the posterior sampling step for the latent trajectory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import sts
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.distributions import normal_conjugate_posteriors
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.sts.internal import util as sts_util
# The sampler state stores current values for each model parameter,
# and auxiliary quantities such as the latent level. It should have the property
# that `model.make_state_space_model(num_timesteps, GibbsSamplerState(...))`
# behaves properly -- i.e., that the state contains all model
# parameters *in the same order* as they are listed in `model.parameters`. This
# is currently enforced by construction in `build_gibbs_fittable_model`.
GibbsSamplerState = collections.namedtuple('GibbsSamplerState', [
'observation_noise_scale', 'level_scale', 'weights', 'level', 'seed'])
def build_model_for_gibbs_fitting(observed_time_series,
design_matrix,
weights_prior,
level_variance_prior,
observation_noise_variance_prior):
"""Builds a StructuralTimeSeries model instance that supports Gibbs sampling.
To support Gibbs sampling, a model must have have conjugate priors on all
scale and weight parameters, and must be constructed so that
`model.parameters` matches the parameters and ordering specified by the
the `GibbsSamplerState` namedtuple. Currently, this includes (only) models
consisting of the sum of a LocalLevel and a LinearRegression component.
Args:
observed_time_series: optional `float` `Tensor` of shape [..., T, 1]`
(omitting the trailing unit dimension is also supported when `T > 1`),
specifying an observed time series. May optionally be an instance of
`tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify
timesteps with missing observations.
design_matrix: float `Tensor` of shape `concat([batch_shape,
[num_timesteps, num_features]])`. This may also optionally be
an instance of `tf.linalg.LinearOperator`.
weights_prior: An instance of `tfd.Normal` representing a scalar prior on
each regression weight. May have batch shape broadcastable to the batch
shape of `observed_time_series`.
level_variance_prior: An instance of `tfd.InverseGamma` representing a prior
on the level variance (`level_scale**2`) of a local level model. May have
batch shape broadcastable to the batch shape of `observed_time_series`.
observation_noise_variance_prior: An instance of `tfd.InverseGamma`
representing a prior on the observation noise variance (
`observation_noise_scale**2`). May have batch shape broadcastable to the
batch shape of `observed_time_series`.
Returns:
model: A `tfp.sts.StructuralTimeSeries` model instance.
"""
if not isinstance(weights_prior, tfd.Normal):
raise ValueError('Weights prior must be a univariate normal distribution.')
if not isinstance(level_variance_prior, tfd.InverseGamma):
raise ValueError(
'Level variance prior must be an inverse gamma distribution.')
if not isinstance(observation_noise_variance_prior, tfd.InverseGamma):
raise ValueError('Observation noise variance prior must be an inverse '
'gamma distribution.')
sqrt = tfb.Invert(tfb.Square()) # Converts variance priors to scale priors.
local_level = sts.LocalLevel(observed_time_series=observed_time_series,
level_scale_prior=sqrt(level_variance_prior),
name='local_level')
regression = sts.LinearRegression(design_matrix=design_matrix,
weights_prior=weights_prior,
name='regression')
model = sts.Sum([local_level, regression],
observed_time_series=observed_time_series,
observation_noise_scale_prior=sqrt(
observation_noise_variance_prior),
# The Gibbs sampling steps in this file do not account for an
# offset to the observed series. Instead, we assume the
# observed series has already been centered and
# scale-normalized.
constant_offset=0.)
model.supports_gibbs_sampling = True
return model
def _get_design_matrix(model):
"""Returns the design matrix for an STS model with a regression component."""
design_matrices = [component.design_matrix for component in model.components
if hasattr(component, 'design_matrix')]
if not design_matrices:
raise ValueError('Model does not contain a regression component.')
if len(design_matrices) > 1:
raise ValueError('Model contains multiple regression components.')
return design_matrices[0]
def fit_with_gibbs_sampling(model,
observed_time_series,
num_results=2000,
num_warmup_steps=200,
compile_steps_with_xla=False,
initial_state=None,
seed=None):
"""Fits parameters for an STS model using Gibbs sampling."""
if not hasattr(model, 'supports_gibbs_sampling'):
raise ValueError('This STS model does not support Gibbs sampling. Models '
'for Gibbs sampling must be created using the '
'method `build_model_for_gibbs_fitting`.')
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
dtype = observed_time_series.dtype
# The canonicalized time series always has trailing dimension `1`,
# because although LinearGaussianSSMs support vector observations, STS models
# describe scalar time series only. For our purposes it'll be cleaner to
# remove this dimension.
observed_time_series = observed_time_series[..., 0]
batch_shape = prefer_static.shape(observed_time_series)[:-1]
if initial_state is None:
initial_state = GibbsSamplerState(
observation_noise_scale=tf.ones(batch_shape, dtype=dtype),
level_scale=tf.ones(batch_shape, dtype=dtype),
weights=tf.zeros(prefer_static.concat([
batch_shape,
_get_design_matrix(model).shape[-1:]], axis=0), dtype=dtype),
level=tf.zeros_like(observed_time_series),
seed=None) # Set below.
if seed and isinstance(seed, six.integer_types):
tf.random.set_seed(seed)
# Always use the passed-in `seed` arg, ignoring any seed in the initial state.
seeded_state = initial_state._asdict()
seeded_state['seed'] = samplers.sanitize_seed(
seed, salt='initial_GibbsSamplerState')
initial_state = GibbsSamplerState(**seeded_state)
sampler_loop_body = _build_sampler_loop_body(
model, observed_time_series, is_missing,
compile_steps_with_xla=compile_steps_with_xla,
seed=seed) # This is still an `int` seed, because the InverseGamma
# sampler currently requires stateful semantics.
samples = tf.scan(sampler_loop_body,
np.arange(num_warmup_steps + num_results),
initial_state)
return tf.nest.map_structure(lambda x: x[num_warmup_steps:], samples)
def one_step_predictive(model,
posterior_samples,
num_forecast_steps=0,
original_mean=0.,
original_scale=1.,
thin_every=10):
"""Constructs a one-step-ahead predictive distribution at every timestep.
Unlike the generic `tfp.sts.one_step_predictive`, this method uses the
latent levels from Gibbs sampling to efficiently construct a predictive
distribution that mixes over posterior samples. The predictive distribution
may also include additional forecast steps.
This method returns the predictive distributions for each timestep given
previous timesteps and sampled model parameters, `p(observed_time_series[t] |
observed_time_series[:t], weights, observation_noise_scale)`. Note that the
posterior values of the weights and noise scale will in general be informed
by observations from all timesteps *including the step being predicted*, so
this is not a strictly kosher probabilistic quantity, but in general we assume
that it's close, i.e., that the step being predicted had very small individual
impact on the overall parameter posterior.
Args:
model: A `tfd.sts.StructuralTimeSeries` model instance. This must be of the
form constructed by `build_model_for_gibbs_sampling`.
posterior_samples: A `GibbsSamplerState` instance in which each element is a
`Tensor` with initial dimension of size `num_samples`.
num_forecast_steps: Python `int` number of additional forecast steps to
append.
Default value: `0`.
original_mean: Optional scalar float `Tensor`, added to the predictive
distribution to undo the effect of input normalization.
Default value: `0.`
original_scale: Optional scalar float `Tensor`, used to rescale the
predictive distribution to undo the effect of input normalization.
Default value: `1.`
thin_every: Optional Python `int` factor by which to thin the posterior
samples, to reduce complexity of the predictive distribution. For example,
if `thin_every=10`, every `10`th sample will be used.
Default value: `10`.
Returns:
predictive_dist: A `tfd.MixtureSameFamily` instance of event shape
`[num_timesteps + num_forecast_steps]` representing the predictive
distribution of each timestep given previous timesteps.
"""
dtype = dtype_util.common_dtype([
posterior_samples.level_scale.dtype,
posterior_samples.observation_noise_scale.dtype,
posterior_samples.level.dtype,
original_mean,
original_scale], dtype_hint=tf.float32)
num_observed_steps = prefer_static.shape(posterior_samples.level)[-1]
original_mean = tf.convert_to_tensor(original_mean, dtype=dtype)
original_scale = tf.convert_to_tensor(original_scale, dtype=dtype)
thinned_samples = tf.nest.map_structure(lambda x: x[::thin_every],
posterior_samples)
# The local level model expects that the level at step t+1 is equal
# to the level at step t (plus transition noise of scale 'level_scale', which
# we account for below).
if num_forecast_steps > 0:
num_batch_dims = prefer_static.rank_from_shape(
prefer_static.shape(thinned_samples.level)) - 2
forecast_level = tf.tile(thinned_samples.level[..., -1:],
tf.concat([tf.ones([num_batch_dims + 1],
dtype=tf.int32),
[num_forecast_steps]], axis=0))
level_pred = tf.concat([thinned_samples.level[..., :1], # t == 0
thinned_samples.level[..., :-1] # 1 <= t < T
] + ([forecast_level] if num_forecast_steps > 0
else []),
axis=-1)
design_matrix = _get_design_matrix(
model).to_dense()[:num_observed_steps + num_forecast_steps]
regression_effect = tf.linalg.matvec(design_matrix, thinned_samples.weights)
y_mean = ((level_pred + regression_effect) *
original_scale[..., tf.newaxis] + original_mean[..., tf.newaxis])
num_steps_from_last_observation = tf.concat([
tf.ones([num_observed_steps], dtype=dtype),
tf.range(1, num_forecast_steps + 1, dtype=dtype)], axis=0)
y_scale = (original_scale * tf.sqrt(
thinned_samples.observation_noise_scale[..., tf.newaxis]**2 +
thinned_samples.level_scale[..., tf.newaxis]**2 *
num_steps_from_last_observation))
num_posterior_draws = prefer_static.shape(y_mean)[0]
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
logits=tf.zeros([num_posterior_draws], dtype=y_mean.dtype)),
components_distribution=tfd.Normal(
loc=dist_util.move_dimension(y_mean, 0, -1),
scale=dist_util.move_dimension(y_scale, 0, -1)))
def _resample_weights(design_matrix, target_residuals, observation_noise_scale,
weights_prior_scale, is_missing=None, seed=None):
"""Samples regression weights from their conditional posterior.
This assumes a conjugate normal regression model,
```
weights ~ Normal(loc=0., covariance_matrix=weights_prior_scale**2 * I)
target_residuals ~ Normal(loc=matvec(design_matrix, weights),
covariance_matrix=observation_noise_scale**2 * I)
```
and returns a sample from `p(weights | target_residuals,
observation_noise_scale, design_matrix)`.
Args:
design_matrix: Float `Tensor` design matrix of shape
`[..., num_timesteps, num_features]`.
target_residuals: Float `Tensor` of shape `[..., num_observations]`
observation_noise_scale: Scalar float `Tensor` (with optional batch shape)
standard deviation of the iid observation noise.
weights_prior_scale: Scalar float `Tensor` (with optional batch shape)
specifying the standard deviation of the Normal prior on regression
weights.
is_missing: Optional `bool` `Tensor` of shape `[..., num_timesteps]`. A
`True` value indicates that the observation for that timestep is missing.
seed: Optional `Python` `int` seed controlling the sampled values.
Returns:
weights: Float `Tensor` of shape `[..., num_features]`, sampled from
the conditional posterior `p(weights | target_residuals,
observation_noise_scale, weights_prior_scale)`.
"""
if is_missing is not None:
# Replace design matrix with zeros at unobserved timesteps. This ensures
# they will not affect the posterior on weights.
design_matrix = tf.where(is_missing[..., tf.newaxis],
tf.zeros_like(design_matrix),
design_matrix)
design_shape = prefer_static.shape(design_matrix)
num_outputs = design_shape[-2]
num_features = design_shape[-1]
iid_prior_scale = tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_features, multiplier=weights_prior_scale)
iid_likelihood_scale = tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_outputs, multiplier=observation_noise_scale)
weights_mean, weights_prec = (
normal_conjugate_posteriors.mvn_conjugate_linear_update(
linear_transformation=design_matrix,
observation=target_residuals,
prior_scale=iid_prior_scale,
likelihood_scale=iid_likelihood_scale))
sampled_weights = weights_prec.cholesky().solvevec(
samplers.normal(
shape=prefer_static.shape(weights_mean),
dtype=design_matrix.dtype, seed=seed), adjoint=True)
return weights_mean + sampled_weights
# `resample_level` requires an explicit builder function because the compiled
# code can only accept `Tensor` arguments, but we need to pass in the
# initial state prior which is a tfd.Distribution.
def _build_resample_level_fn(initial_state_prior,
is_missing=None,
compile_with_xla=False):
"""Builds a method to sample the latent level from its Gibbs posterior."""
@tf.function(autograph=False, experimental_compile=compile_with_xla)
def resample_level(observed_residuals,
level_scale,
observation_noise_scale,
sample_shape=(),
seed=None):
"""Uses Durbin-Koopman sampling to resample the latent level.
Durbin-Koopman sampling [1] is an efficient algorithm to sample from the
posterior latents of a linear Gaussian state space model. This method
implements the algorithm, specialized to the case of a one-dimensional
latent local level model.
[1] Durbin, J. and Koopman, S.J. (2002) A simple and efficient simulation
smoother for state space time series analysis.
Args:
observed_residuals: Float `Tensor` of shape `[..., num_observations]`,
specifying the centered observations `(x - loc)`.
level_scale: Float scalar `Tensor` (may contain batch dimensions)
specifying the standard deviation of the level random walk steps.
observation_noise_scale: Float scalar `Tensor` (may contain batch
dimensions) specifying the standard deviation of the observation noise.
sample_shape: Optional `int` `Tensor` shape of samples to draw.
seed: `int` `Tensor` of shape `[2]` controlling stateless sampling.
Returns:
level: Float `Tensor` resampled latent level, of shape
`[..., num_timesteps]`, where `...` concatenates the sample shape
with any batch shape from `observed_time_series`.
"""
num_timesteps = prefer_static.shape(observed_residuals)[-1]
ssm = sts.LocalLevelStateSpaceModel(
num_timesteps=num_timesteps,
initial_state_prior=initial_state_prior,
observation_noise_scale=observation_noise_scale,
level_scale=level_scale)
return ssm.posterior_sample(observed_residuals[..., tf.newaxis],
sample_shape=sample_shape,
mask=is_missing,
seed=seed)[..., 0]
return resample_level
def _resample_scale(prior_concentration, prior_scale,
observed_residuals, is_missing=None, seed=None):
"""Samples a scale parameter from its conditional posterior.
We assume the conjugate InverseGamma->Normal model:
```
scale ~ Sqrt(InverseGamma(prior_concentration, prior_scale))
for i in [1, ..., num_observations]:
x[i] ~ Normal(loc, scale)
```
in which `loc` is known, and return a sample from `p(scale | x)`.
Args:
prior_concentration: Float `Tensor` concentration parameter of the
InverseGamma prior distribution.
prior_scale: Float `Tensor` scale parameter of the InverseGamma prior
distribution.
observed_residuals: Float `Tensor` of shape `[..., num_observations]`,
specifying the centered observations `(x - loc)`.
is_missing: Optional `bool` `Tensor` of shape `[..., num_observations]`. A
`True` value indicates that the corresponding observation is missing.
seed: Optional `Python` `int` seed controlling the sampled value.
Returns:
sampled_scale: A `Tensor` sample from the posterior `p(scale | x)`.
"""
if is_missing is not None:
num_missing = tf.reduce_sum(tf.cast(is_missing, observed_residuals.dtype),
axis=-1)
num_observations = prefer_static.shape(observed_residuals)[-1]
if is_missing is not None:
observed_residuals = tf.where(is_missing,
tf.zeros_like(observed_residuals),
observed_residuals)
num_observations -= num_missing
variance_posterior = tfd.InverseGamma(
concentration=prior_concentration + num_observations / 2.,
scale=prior_scale + tf.reduce_sum(
tf.square(observed_residuals), axis=-1) / 2.)
return tf.sqrt(variance_posterior.sample(seed=seed))
def _build_sampler_loop_body(model,
observed_time_series,
is_missing=None,
compile_steps_with_xla=False,
seed=None):
"""Builds a Gibbs sampler for the given model and observed data.
Args:
model: A `tf.sts.StructuralTimeSeries` model instance. This must be of the
form constructed by `build_model_for_gibbs_sampling`.
observed_time_series: Float `Tensor` time series of shape
`[..., num_timesteps]`.
is_missing: Optional `bool` `Tensor` of shape `[..., num_timesteps]`. A
`True` value indicates that the observation for that timestep is missing.
compile_steps_with_xla: Optional Python `bool`. If `True`, XLA compilation
is used to accelerate sampling steps when supported.
seed: Optional `Python` `int` seed controlling the sampled values.
Returns:
sampler_loop_body: Python callable that performs a single cycle of Gibbs
sampling. Its first argument is a `GibbsSamplerState`, and it returns a
new `GibbsSamplerState`. The second argument (passed by `tf.scan`) is
ignored.
"""
# Require that the model has exactly the parameters expected by
# `GibbsSamplerState`.
observation_noise_param, level_scale_param, weights_param = model.parameters
if (('observation_noise' not in observation_noise_param.name) or
('level_scale' not in level_scale_param.name) or
('weights' not in weights_param.name)):
raise ValueError('Model parameters {} do not match the expected sampler '
'state.'.format(model.parameters))
level_component = model.components[0]
if not isinstance(level_component, sts.LocalLevel):
raise ValueError('Expected the first model component to be an instance of '
'`tfp.sts.LocalLevel`; instead saw {}'.format(
level_component))
if is_missing is not None: # Ensure series does not contain NaNs.
observed_time_series = tf.where(is_missing,
tf.zeros_like(observed_time_series),
observed_time_series)
num_observed_steps = prefer_static.shape(observed_time_series)[-1]
design_matrix = _get_design_matrix(model).to_dense()[:num_observed_steps]
# Compile the functions that sample from Gibbs conditional posteriors.
# In principle, we should XLA-compile the entire loop body or even the entire
# `fit_with_gibbs_sampling` loop. However, XLA can't currently compile the
# gamma sampling op inside `_resample_scale` (b/141253568), so for now we
# leave that method uncompiled but compile the other two sampling steps.
# Empirically, the vast majority of sampling time is spent in
# `resample_level`, so compiling it gives us most of the wins.
# TODO(davmre): Wrap the entire sampling loop in `tf.function` while still
# XLA-compiling these pieces as appropriate.
# TODO(b/141253568): XLA-compile the entire sampling loop.
compiled_resample_level = _build_resample_level_fn(
initial_state_prior=level_component.initial_state_prior,
is_missing=is_missing,
compile_with_xla=compile_steps_with_xla)
compiled_resample_weights = tf.function(
_resample_weights, autograph=False,
experimental_compile=compile_steps_with_xla)
compiled_resample_scale = tf.function(
_resample_scale, autograph=False,
experimental_compile=False)
# Untransform scale priors -> variance priors by reaching thru Sqrt bijector.
level_scale_variance_prior = level_scale_param.prior.distribution
observation_noise_variance_prior = observation_noise_param.prior.distribution
# InverseGamma samplers are currently stateful, so we only need (and want)
# a single seed for each, shared across loop iterations.
strm = tfp_util.SeedStream(seed, salt='_sampler_loop_body')
observation_noise_scale_seed = strm()
level_scale_seed = strm()
def sampler_loop_body(previous_sample, _):
"""Runs one sampler iteration, resampling all model variables."""
(weights_seed,
level_seed,
loop_seed) = samplers.split_seed(
previous_sample.seed, n=3, salt='sampler_loop_body')
# We encourage a reasonable initialization by sampling the weights first,
# so at the first step they are regressed directly against the observed
# time series. If we instead sampled the level first it might 'explain away'
# some observed variation that we would ultimately prefer to explain through
# the regression weights, because the level can represent arbitrary
# variation, while the weights are limited to representing variation in the
# subspace given by the design matrix.
weights = compiled_resample_weights(
design_matrix=design_matrix,
target_residuals=(observed_time_series - previous_sample.level),
observation_noise_scale=previous_sample.observation_noise_scale,
weights_prior_scale=weights_param.prior.distribution.scale,
is_missing=is_missing,
seed=weights_seed)
regression_residuals = observed_time_series - tf.linalg.matvec(
design_matrix, weights)
level = compiled_resample_level(
observed_residuals=regression_residuals,
level_scale=previous_sample.level_scale,
observation_noise_scale=previous_sample.observation_noise_scale,
seed=level_seed)
# Estimate level scale from the empirical changes in level.
level_scale = compiled_resample_scale(
prior_scale=level_scale_variance_prior.scale,
prior_concentration=level_scale_variance_prior.concentration,
observed_residuals=level[..., 1:] - level[..., :-1],
is_missing=None, seed=level_scale_seed)
# Estimate noise scale from the residuals.
observation_noise_scale = compiled_resample_scale(
prior_scale=observation_noise_variance_prior.scale,
prior_concentration=observation_noise_variance_prior.concentration,
observed_residuals=regression_residuals - level,
is_missing=is_missing, seed=observation_noise_scale_seed)
return GibbsSamplerState(
observation_noise_scale=observation_noise_scale,
level_scale=level_scale,
weights=weights,
level=level,
seed=loop_seed)
return sampler_loop_body
|
the-stack_0_6338 | # *******************************************************************************************
# *******************************************************************************************
#
# File: gentest.py
# Date: 18th November 2020
# Purpose: Generates test code.
# Author: Paul Robson ([email protected])
#
# *******************************************************************************************
# *******************************************************************************************
import random
class GenerateTestCode(object):
def __init__(self,isFast,seed = 42,varCount = 10,fileName = "test.amo"):
if seed is None:
seed = random.randint(0,99999)
random.seed(seed)
print("Test using "+str(seed))
self.h = open(fileName,"w")
self.createAssert()
self.h.write("fast\n" if isFast else "slow\n")
self.h.write("proc main() {\n")
self.variables = {}
for i in range(0,varCount):
self.createVariable()
#
# Create variable, add to hash, output initialisation code.
#
def createVariable(self):
vName = ""
while vName == "" or vName in self.variables:
vName = "".join([chr(random.randint(0,25)+65) for x in range(0,random.randint(1,5))]).upper()
value = self.getRandom()
self.variables[vName] = value
self.h.write("\tvar {0} {1} !{0}\n".format(vName,value))
#
# Get one constant or variable
#
def pick(self):
if random.randint(0,3) == 0:
varNameList = [x for x in self.variables.keys()]
varName = varNameList[random.randint(0,len(varNameList)-1)]
return [varName,self.variables[varName]]
n = self.getRandom()
return [str(n),n]
#
# Get randomly ranged number
#
def getRandom(self):
return random.randint(0,255) if random.randint(0,1) == 0 else random.randint(0,65535)
#
# End the test code - check the variables and quit
#
def close(self):
for v in self.variables.keys():
self.createTest(v,str(self.variables[v]))
self.h.write("\texit.emulator()\n")
self.h.write("}\n")
self.h.close()
self.h = None
#
# Create assert procedure
#
def createAssert(self):
self.h.write("proc assert(n1,n2,s) {\n")
self.h.write("\tif (n1-n2 <> 0) { print.string(s);print.crlf();halt.program(); }\n")
self.h.write("}\n\n")
#
# Create one test
#
def createTest(self,n1,n2):
self.h.write('\tassert({0},{1},"{2}")\n'.format(n1,n2,n1+"="+n2))
#
# Check that assignments work.
#
def checkAssignment(self,n = 20):
for i in range(0,n):
varNameList = [x for x in self.variables.keys()]
varName = varNameList[random.randint(0,len(varNameList)-1)]
#
newValue = self.pick()
self.h.write("\t{0} !{1}\n".format(newValue[0],varName))
self.variables[varName] = newValue[1]
#
# Check Binary Arithmetic
#
def checkBinary(self,n = 20,opList = None):
allOps = "+-*/%&|^"
opList = opList if opList is not None else allOps
for i in range(0,n):
n1 = self.pick()
n2 = self.pick()
op = opList[random.randint(0,len(opList)-1)]
if (op != "/" and op != "%") or n2[1] != 0:
if op == "+":
r = (n1[1] + n2[1]) & 0xFFFF
elif op == "-":
r = (n1[1] - n2[1]) & 0xFFFF
elif op == "&":
r = (n1[1] & n2[1]) & 0xFFFF
elif op == "|":
r = (n1[1] | n2[1]) & 0xFFFF
elif op == "^":
r = (n1[1] ^ n2[1]) & 0xFFFF
elif op == "*":
r = (n1[1] * n2[1]) & 0xFFFF
elif op == "/":
r = int(n1[1] / n2[1]) & 0xFFFF
elif op == "%":
r = int(n1[1] % n2[1]) & 0xFFFF
else:
assert False
self.createTest(n1[0]+" "+op+" "+n2[0],str(r))
#
# Check unary arithmetic
#
def checkUnary(self,n = 20,opList = None):
allOps = "+-<>"
opList = opList if opList is not None else allOps
for i in range(0,n):
n1 = self.pick()
op = opList[random.randint(0,len(opList)-1)]
if op == "+":
r = (n1[1] + 1) & 0xFFFF
elif op == "-":
r = (n1[1] - 1) & 0xFFFF
elif op == "<":
r = (n1[1] << 1) & 0xFFFF
elif op == ">":
r = (n1[1] >> 1) & 0xFFFF
else:
assert False
self.createTest(n1[0]+" "+op+op,str(r))
if __name__ == "__main__":
gen = GenerateTestCode(True,None,20)
gen.checkAssignment(20)
gen.checkBinary(200)
gen.checkUnary(200)
gen.close() |
the-stack_0_6345 | #!/usr/bin/env python
import sys
from setuptools import setup
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call([sys.executable, "-m", "pip", "install", "setuptools-rust"])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
setup_requires = ["setuptools-rust>=0.10.1", "wheel"]
install_requires = []
setup(
name="py-rustlib",
version="0.1.0",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Rust",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
],
packages=["src"],
rust_extensions=[RustExtension("py_rustlib.py_rustlib")],
install_requires=install_requires,
setup_requires=setup_requires,
include_package_data=True,
zip_safe=False,
)
|
the-stack_0_6347 | import datetime
import os
import random
import re
import string
import sys
import unittest2
from mock import patch, Mock
import stripe
NOW = datetime.datetime.now()
DUMMY_CARD = {
'number': '4242424242424242',
'exp_month': NOW.month,
'exp_year': NOW.year + 4
}
DUMMY_DEBIT_CARD = {
'number': '4000056655665556',
'exp_month': NOW.month,
'exp_year': NOW.year + 4
}
DUMMY_CHARGE = {
'amount': 100,
'currency': 'usd',
'card': DUMMY_CARD
}
DUMMY_DISPUTE = {
'status': 'needs_response',
'currency': 'usd',
'metadata': {}
}
DUMMY_PLAN = {
'amount': 2000,
'interval': 'month',
'name': 'Amazing Gold Plan',
'currency': 'usd',
'id': ('stripe-test-gold-' +
''.join(random.choice(string.ascii_lowercase) for x in range(10)))
}
DUMMY_COUPON = {
'percent_off': 25,
'duration': 'repeating',
'duration_in_months': 5,
'metadata': {}
}
DUMMY_RECIPIENT = {
'name': 'John Doe',
'type': 'individual'
}
DUMMY_TRANSFER = {
'amount': 400,
'currency': 'usd',
'recipient': 'self'
}
DUMMY_APPLE_PAY_DOMAIN = {
'domain_name': 'test.com',
}
DUMMY_INVOICE_ITEM = {
'amount': 456,
'currency': 'usd',
}
SAMPLE_INVOICE = stripe.util.json.loads("""
{
"amount_due": 1305,
"attempt_count": 0,
"attempted": true,
"charge": "ch_wajkQ5aDTzFs5v",
"closed": true,
"customer": "cus_osllUe2f1BzrRT",
"date": 1338238728,
"discount": null,
"ending_balance": 0,
"id": "in_t9mHb2hpK7mml1",
"livemode": false,
"next_payment_attempt": null,
"object": "invoice",
"paid": true,
"period_end": 1338238728,
"period_start": 1338238716,
"starting_balance": -8695,
"subtotal": 10000,
"total": 10000,
"lines": {
"invoiceitems": [],
"prorations": [],
"subscriptions": [
{
"plan": {
"interval": "month",
"object": "plan",
"identifier": "expensive",
"currency": "usd",
"livemode": false,
"amount": 10000,
"name": "Expensive Plan",
"trial_period_days": null,
"id": "expensive"
},
"period": {
"end": 1340917128,
"start": 1338238728
},
"amount": 10000
}
]
}
}
""")
class StripeTestCase(unittest2.TestCase):
RESTORE_ATTRIBUTES = ('api_version', 'api_key')
def setUp(self):
super(StripeTestCase, self).setUp()
self._stripe_original_attributes = {}
for attr in self.RESTORE_ATTRIBUTES:
self._stripe_original_attributes[attr] = getattr(stripe, attr)
api_base = os.environ.get('STRIPE_API_BASE')
if api_base:
stripe.api_base = api_base
stripe.api_key = os.environ.get(
'STRIPE_API_KEY', 'tGN0bIwXnHdwOa85VABjPdSn8nWY7G7I')
def tearDown(self):
super(StripeTestCase, self).tearDown()
for attr in self.RESTORE_ATTRIBUTES:
setattr(stripe, attr, self._stripe_original_attributes[attr])
# Python < 2.7 compatibility
def assertRaisesRegexp(self, exception, regexp, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except exception as err:
if regexp is None:
return True
if isinstance(regexp, basestring):
regexp = re.compile(regexp)
if not regexp.search(str(err)):
raise self.failureException('"%s" does not match "%s"' %
(regexp.pattern, str(err)))
else:
raise self.failureException(
'%s was not raised' % (exception.__name__,))
class StripeUnitTestCase(StripeTestCase):
REQUEST_LIBRARIES = ['urlfetch', 'requests', 'pycurl']
if sys.version_info >= (3, 0):
REQUEST_LIBRARIES.append('urllib.request')
else:
REQUEST_LIBRARIES.append('urllib2')
def setUp(self):
super(StripeUnitTestCase, self).setUp()
self.request_patchers = {}
self.request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
patcher = patch("stripe.http_client.%s" % (lib,))
self.request_mocks[lib] = patcher.start()
self.request_patchers[lib] = patcher
def tearDown(self):
super(StripeUnitTestCase, self).tearDown()
for patcher in self.request_patchers.itervalues():
patcher.stop()
class StripeApiTestCase(StripeTestCase):
def setUp(self):
super(StripeApiTestCase, self).setUp()
self.requestor_patcher = patch('stripe.api_requestor.APIRequestor')
requestor_class_mock = self.requestor_patcher.start()
self.requestor_mock = requestor_class_mock.return_value
def tearDown(self):
super(StripeApiTestCase, self).tearDown()
self.requestor_patcher.stop()
def mock_response(self, res):
self.requestor_mock.request = Mock(return_value=(res, 'reskey'))
class StripeResourceTest(StripeApiTestCase):
def setUp(self):
super(StripeResourceTest, self).setUp()
self.mock_response({})
class MyResource(stripe.resource.APIResource):
pass
class MySingleton(stripe.resource.SingletonAPIResource):
pass
class MyListable(stripe.resource.ListableAPIResource):
pass
class MyCreatable(stripe.resource.CreateableAPIResource):
pass
class MyUpdateable(stripe.resource.UpdateableAPIResource):
pass
class MyDeletable(stripe.resource.DeletableAPIResource):
pass
class MyComposite(stripe.resource.ListableAPIResource,
stripe.resource.CreateableAPIResource,
stripe.resource.UpdateableAPIResource,
stripe.resource.DeletableAPIResource):
pass
|
the-stack_0_6349 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scatterpolar.textfont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
the-stack_0_6351 | import argparse, os, pathlib
parser = argparse.ArgumentParser(description='Convert training data to PEPREC')
parser.add_argument('-f', '--files', nargs='+', help='files contaning peptides')
parser.add_argument('-s', '--suffix', default='peprec', help='suffix for the output file names')
parser.add_argument('-o', '--output', default='.', help='where to save the output files')
if __name__ == '__main__':
args = parser.parse_args()
for infile in args.files:
dirname, basename = os.path.split(infile)
fname, ext = os.path.splitext(basename)
outfname = f'{fname}_{args.suffix}{ext}'
outdir = args.output if os.path.isabs(args.output) else os.path.abspath(args.output)
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
outfile = os.path.join(outdir, outfname)
print(f'Printing PEPREC to {outfile}')
with open(infile, 'r') as inf, open(outfile, 'w') as outf:
pepid = 0
outf.write('spec_id modifications peptide charge\n')
for line in inf:
try:
tokens = line.rstrip('\r\n').split('\t')
charge, seq, score, y_ions, y_ints, b_ions, b_ints, y_frac = tokens
pepid = '_'.join([seq, charge])
outf.write(f'{pepid} - {seq} {charge}\n')
except ValueError as e:
print("Unexpected number of tokens found on line!")
e.args += (line,)
raise |
the-stack_0_6353 | def async_migrations_ok() -> bool:
from posthog.async_migrations.runner import is_posthog_version_compatible
from posthog.models.async_migration import AsyncMigration, MigrationStatus
for migration in AsyncMigration.objects.all():
migration_completed_or_running = migration.status in [
MigrationStatus.CompletedSuccessfully,
MigrationStatus.Running,
]
migration_in_range = is_posthog_version_compatible(migration.posthog_min_version, migration.posthog_max_version)
if not migration_completed_or_running and migration_in_range:
return False
return True
|
the-stack_0_6354 | from experiment import Experiment
import logging
import time
from traitlets import Enum, Float, Int, Unicode
try:
from tqdm import trange
except ImportError:
trange = range
class Main(Experiment):
#
# Description of the experiment. Used in the help message.
#
description = Unicode("Basic experiment.")
#
# Overwrite results path format. Supported vars: base_path, script_name, git, date, time
#
results_path_format = Unicode("{base_path}/{script_name}/{date}_{time}")
#
# Parameters of experiment
#
epochs = Int(100, config=True, help="Number of epochs")
lr = Float(0.1, config=True, help="Learning rate of training")
loss_type = Enum(("mse", "l1"), config=True, default_value="mse", help="Loss type.")
def run(self):
"""Running the experiment"""
logging.info("Starting experiment")
logging.info("Using {} loss".format(self.loss_type))
loss = 100
for i in trange(self.epochs):
loss = loss * self.lr
time.sleep(.5)
logging.info("Experiment finished")
if __name__ == "__main__":
main = Main()
main.initialize()
main.start()
|
the-stack_0_6356 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test a node with the -disablewallet option.
- Test that validateaddress RPC works when running with -disablewallet
- Test that it is not possible to mine to an invalid address.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class DisableWalletTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-disablewallet"]]
def run_test (self):
# Make sure wallet is really disabled
assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo)
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mbTYaNZm7TaPt5Du65aPsL8FNTktufYydC')
assert(x['isvalid'] == True)
if __name__ == '__main__':
DisableWalletTest ().main ()
|
the-stack_0_6357 | import discord
from discord.ext import commands
class Pingmodule():
def __init__(self, bot):
self.bot = bot
async def on_message(self, message):
if self.bot.user in message.mentions:
await message.add_reaction(':ping:456793379808870401')
def setup(bot):
bot.add_cog(Pingmodule(bot))
|
the-stack_0_6358 | # SPDX-License-Identifier: BSD-3-Clause
from typing import ClassVar, Mapping, cast
from softfab.ControlPage import ControlPage
from softfab.Page import InvalidRequest, PageProcessor
from softfab.configlib import ConfigDB
from softfab.joblib import JobDB
from softfab.pageargs import DictArg, PageArgs, StrArg
from softfab.request import Request
from softfab.response import Response
from softfab.users import User, checkPrivilege
from softfab.xmlgen import xml
class LoadExecuteDefault_POST(ControlPage['LoadExecuteDefault_POST.Arguments',
'LoadExecuteDefault_POST.Processor']):
class Arguments(PageArgs):
config = StrArg()
prod = DictArg(StrArg())
local = DictArg(StrArg())
param = DictArg(StrArg())
comment = StrArg('')
class Processor(PageProcessor['LoadExecuteDefault_POST.Arguments']):
configDB: ClassVar[ConfigDB]
jobDB: ClassVar[JobDB]
async def process(self,
req: Request['LoadExecuteDefault_POST.Arguments'],
user: User
) -> None:
args = req.args
products = cast(Mapping[str, str], args.prod)
localAt = cast(Mapping[str, str], args.local)
params = cast(Mapping[str, str], args.param)
if 'notify' in params and ':' not in params['notify']:
raise InvalidRequest('Invalid value of \'notify\' parameter')
try:
jobConfig = self.configDB[args.config]
except KeyError:
raise InvalidRequest(
f'Configuration "{args.config}" does not exist'
)
else:
jobDB = self.jobDB
for job in jobConfig.createJobs(
user.name, None, products, params, localAt
):
job.comment += '\n' + args.comment
jobDB.add(job)
def checkAccess(self, user: User) -> None:
checkPrivilege(user, 'j/c', 'start jobs')
async def writeReply(self, response: Response, proc: Processor) -> None:
response.writeXML(xml.ok)
|
the-stack_0_6359 | #!/usr/bin/env python3
from pathlib import Path
from textwrap import indent
import hashlib
import json
import urllib.request
CMAKE_SHA256_URL_TEMPLATE = "https://cmake.org/files/v{minor}/cmake-{full}-SHA-256.txt"
CMAKE_URL_TEMPLATE = "https://github.com/Kitware/CMake/releases/download/v{full}/{file}"
CMAKE_VERSIONS = [
"3.19.6",
"3.19.5",
"3.18.6",
"3.17.5",
"3.16.9",
"3.15.7",
"3.14.7",
]
CMAKE_TARGETS = {
"Darwin-x86_64": [
"@platforms//cpu:x86_64",
"@platforms//os:macos",
],
"Linux-aarch64": [
"@platforms//cpu:aarch64",
"@platforms//os:linux",
],
"Linux-x86_64": [
"@platforms//cpu:x86_64",
"@platforms//os:linux",
],
"macos-universal": [
"@platforms//os:macos",
],
"win32-x86": [
"@platforms//cpu:x86_32",
"@platforms//os:windows",
],
"win64-x64": [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
],
}
NINJA_URL_TEMPLATE = "https://github.com/ninja-build/ninja/releases/download/v{full}/ninja-{target}.zip"
NINJA_TARGETS = {
"linux": [
"@platforms//cpu:x86_64",
"@platforms//os:linux",
],
"mac": [
"@platforms//cpu:x86_64",
"@platforms//os:macos",
],
"win": [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
],
}
NINJA_VERSIONS = (
"1.10.2",
"1.10.1",
"1.10.0",
"1.9.0",
"1.8.2",
)
REPO_DEFINITION = """\
maybe(
http_archive,
name = "{name}",
urls = [
"{url}",
],
sha256 = "{sha256}",
strip_prefix = "{prefix}",
build_file_content = {template}.format(
bin = "{bin}",
),
)
"""
TOOLCHAIN_REPO_DEFINITION = """\
# buildifier: leave-alone
maybe(
prebuilt_toolchains_repository,
name = "{name}",
repos = {repos},
tool = "{tool}",
)
"""
REGISTER_TOOLCHAINS = """\
native.register_toolchains(
{toolchains}
)
"""
BZL_FILE_TEMPLATE = """\
\"\"\" A U T O G E N E R A T E D -- D O N O T M O D I F Y
@generated
This file is generated by prebuilt_toolchains.py
\"\"\"
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@rules_foreign_cc//toolchains:prebuilt_toolchains_repository.bzl", "prebuilt_toolchains_repository")
_CMAKE_BUILD_FILE = \"\"\"\\
load("@rules_foreign_cc//toolchains/native_tools:native_tools_toolchain.bzl", "native_tool_toolchain")
package(default_visibility = ["//visibility:public"])
filegroup(
name = "cmake_data",
srcs = glob(
[
"**",
],
exclude = [
"WORKSPACE",
"WORKSPACE.bazel",
"BUILD",
"BUILD.bazel",
],
),
)
native_tool_toolchain(
name = "cmake_tool",
path = "bin/{bin}",
target = ":cmake_data",
)
\"\"\"
_NINJA_BUILD_FILE = \"\"\"\\
load("@rules_foreign_cc//toolchains/native_tools:native_tools_toolchain.bzl", "native_tool_toolchain")
package(default_visibility = ["//visibility:public"])
filegroup(
name = "ninja_bin",
srcs = ["{{bin}}"],
)
native_tool_toolchain(
name = "ninja_tool",
path = "$(execpath :ninja_bin)",
target = ":ninja_bin",
)
\"\"\"
# buildifier: disable=unnamed-macro
def prebuilt_toolchains(cmake_version, ninja_version):
\"\"\"Register toolchains for pre-built cmake and ninja binaries
Args:
cmake_version (string): The target cmake version
ninja_version (string): The target ninja-build version
\"\"\"
_cmake_toolchains(cmake_version)
_ninja_toolchains(ninja_version)
_make_toolchains()
def _cmake_toolchains(version):
{cmake_definitions}
def _ninja_toolchains(version):
{ninja_definitions}
def _make_toolchains():
{make_definitions}
"""
def get_cmake_definitions() -> str:
"""Define a set of repositories and calls for registering `cmake` toolchains
Returns:
str: The Implementation of `_cmake_toolchains`
"""
archives = []
for version in CMAKE_VERSIONS:
major, minor, _patch = version.split(".")
version_archives = []
version_toolchains = {}
minor_version = "{}.{}".format(major, minor)
for line in urllib.request.urlopen(CMAKE_SHA256_URL_TEMPLATE.format(minor=minor_version, full=version)).readlines():
line = line.decode("utf-8").strip("\n ")
# Only take tar and zip files. The rest can't be easily decompressed.
if not line.endswith(".tar.gz") and not line.endswith(".zip"):
continue
# Only include the targets we care about.
plat_target = None
for target in CMAKE_TARGETS.keys():
if target in line:
plat_target = target
break
if not plat_target:
continue
sha256, file = line.split()
name = file.replace(".tar.gz", "").replace(".zip", "")
bin = "cmake.exe" if "win" in file.lower() else "cmake"
if "Darwin" in file or "macos" in file:
prefix = name + "/CMake.app/Contents"
else:
prefix = name
version_archives.append(
REPO_DEFINITION.format(
name=name,
sha256=sha256,
prefix=prefix,
url=CMAKE_URL_TEMPLATE.format(
full=version,
file=file
),
build="cmake",
template="_CMAKE_BUILD_FILE",
bin=bin,
)
)
version_toolchains.update({plat_target: name})
archives.append("\n".join(
[
" if \"{}\" == version:".format(version),
] + [indent(archive, " " * 8) for archive in version_archives])
)
toolchains_repos = {}
for target, name in version_toolchains.items():
toolchains_repos.update({name: CMAKE_TARGETS[target]})
archives.append(indent(TOOLCHAIN_REPO_DEFINITION.format(
name="cmake_{}_toolchains".format(version),
repos=indent(json.dumps(toolchains_repos, indent=4), " " * 4).lstrip(),
tool="cmake",
), " " * 8))
archives.append(indent(REGISTER_TOOLCHAINS.format(
toolchains="\n".join(
[indent("\"@cmake_{}_toolchains//:{}_toolchain\",".format(
version,
repo
), " " * 4) for repo in toolchains_repos])
), " " * 8))
archives.extend([
indent("return", " " * 8),
"",
])
archives.append(
indent("fail(\"Unsupported version: \" + str(version))", " " * 4))
return "\n".join([archive.rstrip(" ") for archive in archives])
def get_ninja_definitions() -> str:
"""Define a set of repositories and calls for registering `ninja` toolchains
Returns:
str: The Implementation of `_ninja_toolchains`
"""
archives = []
for version in NINJA_VERSIONS:
version_archives = []
version_toolchains = {}
for target in NINJA_TARGETS.keys():
url = NINJA_URL_TEMPLATE.format(
full=version,
target=target,
)
# Get sha256 (can be slow)
remote = urllib.request.urlopen(url)
total_read = 0
max_file_size = 100*1024*1024
hash = hashlib.sha256()
while True:
data = remote.read(4096)
total_read += 4096
if not data or total_read > max_file_size:
break
hash.update(data)
sha256 = hash.hexdigest()
name = "ninja_{}_{}".format(version, target)
version_archives.append(
REPO_DEFINITION.format(
name=name,
url=url,
sha256=sha256,
prefix="",
build="ninja",
template="_NINJA_BUILD_FILE",
bin="ninja.exe" if "win" in target else "ninja",
)
)
version_toolchains.update({target: name})
archives.append("\n".join(
[
" if \"{}\" == version:".format(version),
] + [indent(archive, " " * 8) for archive in version_archives])
)
toolchains_repos = {}
for target, name in version_toolchains.items():
toolchains_repos.update({name: NINJA_TARGETS[target]})
archives.append(indent(TOOLCHAIN_REPO_DEFINITION.format(
name="ninja_{}_toolchains".format(version),
repos=indent(json.dumps(toolchains_repos, indent=4), " " * 4).lstrip(),
tool="ninja",
), " " * 8))
archives.append(indent(REGISTER_TOOLCHAINS.format(
toolchains="\n".join(
[indent("\"@ninja_{}_toolchains//:{}_toolchain\",".format(
version,
repo
), " " * 4) for repo in toolchains_repos])
), " " * 8))
archives.extend([
indent("return", " " * 8),
"",
])
archives.append(
indent("fail(\"Unsupported version: \" + str(version))", " " * 4))
return "\n".join(archives)
def get_make_definitions() -> str:
"""Define a set of repositories and calls for registering `make` toolchains
Returns:
str: The Implementation of `_make_toolchains`
"""
return indent(
"# There are currently no prebuilt make binaries\npass",
" " * 4)
def main():
"""The main entrypoint of the toolchains generator"""
repos_bzl_file = Path(__file__).parent.absolute() / \
"prebuilt_toolchains.bzl"
repos_bzl_file.write_text(BZL_FILE_TEMPLATE.format(
cmake_definitions=get_cmake_definitions(),
ninja_definitions=get_ninja_definitions(),
make_definitions=get_make_definitions(),
))
if __name__ == "__main__":
main()
|
the-stack_0_6362 | import random
MOZNOSTI_Z = 'ABCDEFGV'
MOZNOSTI_NA = 'ABCDEFGWXYZ'
NAPOVEDA = """
Příkazy:
? - Vypíše tuto nápovědu.
U - Otočí kartu balíčku (z U do V).
Nebo doplní balíček U, pokud je prázdný.
EC - Přemístí karty z E na C.
Za E dosaď odkud karty vzít: A-G nebo V.
Za C dosaď kam chceš karty dát: A-G nebo W-Z.
E2G - Přemístí 2 karty z E na C
Za E dosaď odkud kartu vzít: A-G nebo V.
Za 2 dosaď počet karet.
Za C dosaď kam chceš kartu dát: A-G nebo W-Z.
Ctrl+C - Ukončí hru
"""
def popis_karty(karta):
hodnota, barva, licem_nahoru = karta
if not licem_nahoru:
return '[???]'
if hodnota == 1:
znak_hodnoty = 'A'
elif hodnota == 10:
znak_hodnoty = 'X'
elif hodnota == 11:
znak_hodnoty = 'J'
elif hodnota == 12:
znak_hodnoty = 'Q'
elif hodnota == 13:
znak_hodnoty = 'K'
else:
znak_hodnoty = str(hodnota)
if barva == 'Pi':
znak_barvy = '♠ '
elif barva == 'Sr':
znak_barvy = ' ♥'
elif barva == 'Ka':
znak_barvy = ' ♦'
elif barva == 'Kr':
znak_barvy = '♣ '
return '[{}{}]'.format(znak_hodnoty, znak_barvy)
def popis_balicku(balicek):
if balicek:
return popis_karty(balicek[-1])
else:
return '[ ]'
def vypis_hru(hra):
balicky, cile, sloupce = hra
print()
print(' U V W X Y Z')
print('{} {} {} {} {} {}'.format(
popis_balicku(balicky[0]),
popis_balicku(balicky[1]),
popis_balicku(cile[0]),
popis_balicku(cile[1]),
popis_balicku(cile[2]),
popis_balicku(cile[3]),
))
print()
print(' A B C D E F G')
max_delka = 0
for sloupec in sloupce:
if max_delka < len(sloupec):
max_delka = len(sloupec)
for i in range(max_delka):
for sloupec in sloupce:
if i < len(sloupec):
print(popis_karty(sloupec[i]), end=' ')
else:
print(' ', end=' ')
print()
print()
def otoc_kartu(karta, nove_otoceni):
hodnota, barva, licem_nahoru = karta
return hodnota, barva, nove_otoceni
def udelej_hru():
balicek = []
for hodnota in range(1, 14):
for barva in 'Pi', 'Sr', 'Ka', 'Kr':
balicek.append((hodnota, barva, False))
random.shuffle(balicek)
sloupce = []
for cislo_sloupce in range(7):
novy_sloupec = []
sloupce.append(novy_sloupec)
for i in range(cislo_sloupce):
karta = balicek.pop()
novy_sloupec.append(karta)
karta = balicek.pop()
novy_sloupec.append(otoc_kartu(karta, True))
balicky = balicek, []
cile = [], [], [], []
sloupce = tuple(sloupce)
return balicky, cile, sloupce
def hrac_vyhral(hra):
balicky, cile, sloupce = hra
for balicek in balicky:
if balicek:
return False
for sloupec in sloupce:
if sloupec:
return False
return True
def nacti_tah():
"""Zeptá se uživatele, co dělat
Stará se o výpis nápovědy.
Může vrátit buď řetězec 'U' ("lízni z balíčku"), nebo trojici
(z, pocet, na), kde:
- `z` je číslo místa, ze kterého karty vezmou (A-G: 0-6; V: 7)
- `pocet` je počet karet, které se přemisťují
- `na` je číslo místa, kam se karty mají dát (A-G: 0-6, W-Z: 7-10)
Zadá-li uživatel špatný vstup, zeptá se znova.
"""
while True:
retezec = input('Zadej tah: ')
retezec = retezec.upper()
if retezec.startswith('?'):
print(NAPOVEDA)
elif retezec == 'U':
return 'U'
elif len(retezec) < 2:
print('Nerozumím tahu')
elif retezec[0] in MOZNOSTI_Z and retezec[-1] in MOZNOSTI_NA:
if len(retezec) == 2:
pocet = 1
else:
try:
pocet = int(retezec[1:-1])
except ValueError:
print('"{}" není číslo'.format(retezec[1:-1]))
continue
tah = (MOZNOSTI_Z.index(retezec[0]), pocet,
MOZNOSTI_NA.index(retezec[-1]))
print(popis_tahu(tah))
return tah
else:
print('Nerozumím tahu')
def popis_tahu(tah):
if tah == 'U':
return 'Balíček'
else:
z, pocet, na = tah
return '{} karet z {} na {}'.format(
pocet, MOZNOSTI_Z[z], MOZNOSTI_NA[na])
def priprav_tah(hra, tah):
"""Zkontroluje, že je tah podle pravidel
Jako argument bere hru, a tah získaný z funkce `nacti_tah`.
Vrací buď řetězec 'U' ("lízni z balíčku"), nebo trojici
(zdrojovy_balicek, pocet, cilovy_balicek), kde `*_balicek` jsou přímo
seznamy, ze kterých/na které se budou karty přemisťovat, a `pocet` je počet
karet k přemístění.
Není-li tah podle pravidel, vynkce vyvolá výjimku `ValueError` s nějakou
rozumnou chybovou hláškou.
"""
balicky, cile, sloupce = hra
if tah == 'U':
return 'U'
else:
z, pocet, na = tah
if z == 7:
if pocet != 1:
raise ValueError('Z balíčku se nedá brát víc karet najednou')
zdrojovy_balicek = balicky[1]
else:
zdrojovy_balicek = sloupce[z]
if len(zdrojovy_balicek) < pocet:
raise ValueError('Na to není v {} dost karet!'.format(MOZNOSTI_Z[z]))
karty = zdrojovy_balicek[-pocet:]
for hodnota, barva, licem_nahoru in karty:
if not licem_nahoru:
raise ValueError('Nemůžeš přesouvat karty, které jsou rubem nahoru!')
if na < 7:
cilovy_balicek = sloupce[na]
if cilovy_balicek:
zkontroluj_postupku([cilovy_balicek[-1]] + karty)
else:
if karty[0][0] != 13:
raise ValueError('Do prázdného sloupečku smí jen král, {} nesedí!'.format(
popis_karty(karty[0])))
zkontroluj_postupku(karty)
else:
if pocet != 1:
raise ValueError('Do cíle se nedá dávat víc karet najednou')
hodnota, barva, otoceni = karty[0]
cilovy_balicek = cile[na - 7]
if cilovy_balicek:
hodnota_p, barva_p, otoceni_p = cilovy_balicek[-1]
if barva != barva_p:
raise ValueError('Cílová hromádka musí mít jednu barvu; {} na {} nesedí'.format(
popis_karty(karty[0]), popis_karty(cilovy_balicek[-1])))
if hodnota != hodnota_p + 1:
raise ValueError('Do cíle musíš skládat karty postupně od nejnižších; {} na {} nejde'.format(
popis_karty(karty[0]), popis_karty(cilovy_balicek[-1])))
else:
if hodnota != 1:
raise ValueError('Do prázdného cíle smí jen eso!')
return zdrojovy_balicek, pocet, cilovy_balicek
def udelej_tah(hra, info):
balicky, cile, sloupce = hra
if info == 'U':
if balicky[0]:
karta = balicky[0].pop()
karta = otoc_kartu(karta, True)
print('Karta z balíčku:', popis_karty(karta))
balicky[1].append(karta)
else:
print('Otáčím balíček')
while balicky[1]:
karta = balicky[1].pop()
karta = otoc_kartu(karta, False)
balicky[0].append(karta)
else:
zdrojovy_balicek, pocet, cilovy_balicek = info
karty = zdrojovy_balicek[-pocet:]
print('Přesouvám:', end=' ')
for karta in karty:
print(popis_karty(karta), end=' ')
print()
del zdrojovy_balicek[-len(karty):]
cilovy_balicek.extend(karty)
if zdrojovy_balicek and not zdrojovy_balicek[-1][2]:
karta = zdrojovy_balicek.pop()
karta = otoc_kartu(karta, True)
print('Otočená karta:', popis_karty(karta))
zdrojovy_balicek.append(karta)
def druh_barvy(barva):
if barva == 'Pi':
return 'černá'
elif barva == 'Sr':
return 'červená'
elif barva == 'Ka':
return 'červená'
elif barva == 'Kr':
return 'černá'
def zkontroluj_postupku(karty):
for karta_a, karta_b in zip(karty[1:], karty):
hodnota_a, barva_a, lic_a = karta_a
hodnota_b, barva_b, lic_b = karta_b
if hodnota_a != hodnota_b - 1:
raise ValueError('Musíš dělat sestupné postupky; {} a {} nesedí'.format(
popis_karty(karta_a), popis_karty(karta_b)))
if druh_barvy(barva_a) == druh_barvy(barva_b):
raise ValueError('Musíš střídat barvy; {} je {} a {} taky'.format(
popis_karty(karta_a), druh_barvy(barva_a), popis_karty(karta_b)))
|
the-stack_0_6363 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cinderclient import client
from cinderclient import api_versions
from cinderclient.v2 import availability_zones
from cinderclient.v2 import cgsnapshots
from cinderclient.v2 import consistencygroups
from cinderclient.v2 import capabilities
from cinderclient.v2 import limits
from cinderclient.v2 import pools
from cinderclient.v2 import qos_specs
from cinderclient.v2 import quota_classes
from cinderclient.v2 import quotas
from cinderclient.v2 import services
from cinderclient.v2 import volumes
from cinderclient.v2 import volume_snapshots
from cinderclient.v2 import volume_types
from cinderclient.v2 import volume_type_access
from cinderclient.v2 import volume_encryption_types
from cinderclient.v2 import volume_backups
from cinderclient.v2 import volume_backups_restore
from cinderclient.v2 import volume_transfers
class Client(object):
"""Top-level object to access the OpenStack Volume API.
Create an instance with your creds::
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
Then call methods on its managers::
>>> client.volumes.list()
...
"""
def __init__(self, username=None, api_key=None, project_id=None,
auth_url='', insecure=False, timeout=None, tenant_id=None,
proxy_tenant_id=None, proxy_token=None, region_name=None,
endpoint_type='publicURL', extensions=None,
service_type='volumev2', service_name=None,
volume_service_name=None, bypass_url=None, retries=0,
http_log_debug=False, cacert=None, auth_system='keystone',
auth_plugin=None, session=None, api_version=None,
logger=None, **kwargs):
# FIXME(comstud): Rename the api_key argument above when we
# know it's not being used as keyword argument
password = api_key
self.version = '2.0'
self.limits = limits.LimitsManager(self)
# extensions
self.volumes = volumes.VolumeManager(self)
self.volume_snapshots = volume_snapshots.SnapshotManager(self)
self.volume_types = volume_types.VolumeTypeManager(self)
self.volume_type_access = \
volume_type_access.VolumeTypeAccessManager(self)
self.volume_encryption_types = \
volume_encryption_types.VolumeEncryptionTypeManager(self)
self.qos_specs = qos_specs.QoSSpecsManager(self)
self.quota_classes = quota_classes.QuotaClassSetManager(self)
self.quotas = quotas.QuotaSetManager(self)
self.backups = volume_backups.VolumeBackupManager(self)
self.restores = volume_backups_restore.VolumeBackupRestoreManager(self)
self.transfers = volume_transfers.VolumeTransferManager(self)
self.services = services.ServiceManager(self)
self.consistencygroups = consistencygroups.\
ConsistencygroupManager(self)
self.cgsnapshots = cgsnapshots.CgsnapshotManager(self)
self.availability_zones = \
availability_zones.AvailabilityZoneManager(self)
self.pools = pools.PoolManager(self)
self.capabilities = capabilities.CapabilitiesManager(self)
self.api_version = api_version or api_versions.APIVersion(self.version)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
if not logger:
logger = logging.getLogger(__name__)
self.client = client._construct_http_client(
username=username,
password=password,
project_id=project_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
tenant_id=tenant_id,
proxy_tenant_id=tenant_id,
proxy_token=proxy_token,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
bypass_url=bypass_url,
retries=retries,
http_log_debug=http_log_debug,
cacert=cacert,
auth_system=auth_system,
auth_plugin=auth_plugin,
session=session,
api_version=self.api_version,
logger=logger,
**kwargs)
def authenticate(self):
"""Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()
def get_volume_api_version_from_endpoint(self):
return self.client.get_volume_api_version_from_endpoint()
|
the-stack_0_6365 | from typing import List
import networkx as nx
from pycid.analyze.requisite_graph import requisite_graph
from pycid.core.cid import CID
def admits_voi(cid: CID, decision: str, node: str) -> bool:
r"""Return True if cid admits value of information for node.
- A CID admits value of information for a node X if:
i) X is not a descendant of the decision node, D.
ii) X is d-connected to U given Fa_D \ {X}, where U ∈ U ∩ Desc(D)
("Agent Incentives: a Causal Perspective" by Everitt, Carey, Langlois, Ortega, and Legg, 2020)
"""
if len(cid.agents) > 1:
raise ValueError(
f"This CID has {len(cid.agents)} agents. This incentive is currently only valid for CIDs with one agent."
)
if node not in cid.nodes:
raise KeyError(f"{node} is not present in the cid")
if decision not in cid.nodes:
raise KeyError(f"{decision} is not present in the cid")
if not cid.sufficient_recall():
raise ValueError("Voi only implemented graphs with sufficient recall")
if node in nx.descendants(cid, decision) or node == decision:
return False
cid2 = cid.copy_without_cpds()
cid2.add_edge(node, decision)
req_graph = requisite_graph(cid2)
return node in req_graph.get_parents(decision)
def admits_voi_list(cid: CID, decision: str) -> List[str]:
"""
Return the list of nodes with possible value of information for decision.
"""
non_descendants = set(cid.nodes) - set(nx.descendants(cid, decision)) - {decision}
return [x for x in non_descendants if admits_voi(cid, decision, x)]
def quantitative_voi(cid: CID, decision: str, node: str) -> float:
r"""
Returns the quantitative value of information (voi) of a variable corresponding to a node in a parameterised CID.
A node X ∈ V \ Desc(D) in a single-decision CID has quantitative voi equal to
EU_max[M(X->D)] - EU_max[M(X \ ->D)]
ie the maximum utility attainable in M(X->D) minus the maximum utility attainable in M(X \ ->D) where
- M(X->D) is the CID that contains the directed edge X -> D
- M(X \ ->D) is the CID without the directed edge X -> D.
("Agent Incentives: a Causal Perspective" by Everitt, Carey, Langlois, Ortega, and Legg, 2020)
"""
if node not in cid.nodes:
raise KeyError(f"{node} is not present in the cid")
if node in {decision}.union(set(nx.descendants(cid, decision))):
raise ValueError(
f"{node} is a decision node or is a descendent of the decision node. \
VOI only applies to nodes which are not descendents of the decision node."
)
new_cid = cid.copy()
new_cid.add_edge(node, decision)
new_cid.impute_optimal_policy()
ev1: float = new_cid.expected_utility({})
new_cid.remove_all_decision_rules()
new_cid.remove_edge(node, decision)
new_cid.impute_optimal_policy()
ev2: float = new_cid.expected_utility({})
return ev1 - ev2
|
the-stack_0_6366 | # Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import tempfile
import shutil
import zipfile
import io
import itertools
import struct
from abc import ABC, abstractmethod
from contextlib import closing
import av
import numpy as np
from pyunpack import Archive
from PIL import Image, ImageFile
import open3d as o3d
import pydicom
from cvat.apps.engine.utils import rotate_image
from cvat.apps.engine.models import DimensionType
# fixes: "OSError:broken data stream" when executing line 72 while loading images downloaded from the web
# see: https://stackoverflow.com/questions/42462431/oserror-broken-data-stream-when-reading-image-file
ImageFile.LOAD_TRUNCATED_IMAGES = True
from cvat.apps.engine.mime_types import mimetypes
from utils.dataset_manifest import VideoManifestManager, ImageManifestManager
def get_mime(name):
for type_name, type_def in MEDIA_TYPES.items():
if type_def['has_mime_type'](name):
return type_name
return 'unknown'
def create_tmp_dir():
return tempfile.mkdtemp(prefix='cvat-', suffix='.data')
def delete_tmp_dir(tmp_dir):
if tmp_dir:
shutil.rmtree(tmp_dir)
def files_to_ignore(directory):
ignore_files = ('__MSOSX', '._.DS_Store', '__MACOSX', '.DS_Store')
if not any(ignore_file in directory for ignore_file in ignore_files):
return True
return False
class IMediaReader(ABC):
def __init__(self, source_path, step, start, stop, dimension):
self._source_path = sorted(source_path)
self._step = step
self._start = start
self._stop = stop
self._dimension = dimension
@abstractmethod
def __iter__(self):
pass
@abstractmethod
def get_preview(self):
pass
@abstractmethod
def get_progress(self, pos):
pass
@staticmethod
def _get_preview(obj):
PREVIEW_SIZE = (256, 256)
if isinstance(obj, io.IOBase):
preview = Image.open(obj)
else:
preview = obj
preview.thumbnail(PREVIEW_SIZE)
return preview.convert('RGB')
@abstractmethod
def get_image_size(self, i):
pass
def __len__(self):
return len(self.frame_range)
@property
def frame_range(self):
return range(self._start, self._stop, self._step)
class ImageListReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path:
raise Exception('No image found')
if stop is None:
stop = len(source_path)
else:
stop = min(len(source_path), stop + 1)
step = max(step, 1)
assert stop > start
super().__init__(
source_path=source_path,
step=step,
start=start,
stop=stop,
dimension=dimension
)
def __iter__(self):
for i in range(self._start, self._stop, self._step):
yield (self.get_image(i), self.get_path(i), i)
def filter(self, callback):
source_path = list(filter(callback, self._source_path))
ImageListReader.__init__(
self,
source_path,
step=self._step,
start=self._start,
stop=self._stop,
dimension=self._dimension
)
def get_path(self, i):
return self._source_path[i]
def get_image(self, i):
return self._source_path[i]
def get_progress(self, pos):
return (pos - self._start + 1) / (self._stop - self._start)
def get_preview(self):
if self._dimension == DimensionType.DIM_3D:
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
else:
fp = open(self._source_path[0], "rb")
return self._get_preview(fp)
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(self._source_path[i])
return img.width, img.height
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
# FIXME
ImageListReader.__init__(self,
source_path=source_files,
step=step,
start=start,
stop=stop
)
self._dimension = dimension
@property
def absolute_source_paths(self):
return [self.get_path(idx) for idx, _ in enumerate(self._source_path)]
class DirectoryReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
image_paths = []
for source in source_path:
for root, _, files in os.walk(source):
paths = [os.path.join(root, f) for f in files]
paths = filter(lambda x: get_mime(x) == 'image', paths)
image_paths.extend(paths)
super().__init__(
source_path=image_paths,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
class ArchiveReader(DirectoryReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._archive_source = source_path[0]
extract_dir = source_path[1] if len(source_path) > 1 else os.path.dirname(source_path[0])
Archive(self._archive_source).extractall(extract_dir)
if extract_dir == os.path.dirname(source_path[0]):
os.remove(self._archive_source)
super().__init__(
source_path=[extract_dir],
step=step,
start=start,
stop=stop,
dimension=dimension
)
class PdfReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path:
raise Exception('No PDF found')
self._pdf_source = source_path[0]
_basename = os.path.splitext(os.path.basename(self._pdf_source))[0]
_counter = itertools.count()
def _make_name():
for page_num in _counter:
yield '{}{:09d}.jpeg'.format(_basename, page_num)
from pdf2image import convert_from_path
self._tmp_dir = os.path.dirname(source_path[0])
os.makedirs(self._tmp_dir, exist_ok=True)
# Avoid OOM: https://github.com/openvinotoolkit/cvat/issues/940
paths = convert_from_path(self._pdf_source,
last_page=stop, paths_only=True,
output_folder=self._tmp_dir, fmt="jpeg", output_file=_make_name())
os.remove(source_path[0])
super().__init__(
source_path=paths,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
class ZipReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._zip_source = zipfile.ZipFile(source_path[0], mode='r')
self.extract_dir = source_path[1] if len(source_path) > 1 else None
file_list = [f for f in self._zip_source.namelist() if files_to_ignore(f) and get_mime(f) == 'image']
super().__init__(file_list, step=step, start=start, stop=stop, dimension=dimension)
def __del__(self):
self._zip_source.close()
def get_preview(self):
if self._dimension == DimensionType.DIM_3D:
# TODO
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
return self._get_preview(fp)
io_image = io.BytesIO(self._zip_source.read(self._source_path[0]))
return self._get_preview(io_image)
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[i])))
return img.width, img.height
def get_image(self, i):
if self._dimension == DimensionType.DIM_3D:
return self.get_path(i)
return io.BytesIO(self._zip_source.read(self._source_path[i]))
def get_zip_filename(self):
return self._zip_source.filename
def get_path(self, i):
if self._zip_source.filename:
return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i]) \
if not self.extract_dir else os.path.join(self.extract_dir, self._source_path[i])
else: # necessary for mime_type definition
return self._source_path[i]
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().reconcile(
source_files=source_files,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
def extract(self):
self._zip_source.extractall(self.extract_dir if self.extract_dir else os.path.dirname(self._zip_source.filename))
if not self.extract_dir:
os.remove(self._zip_source.filename)
class VideoReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().__init__(
source_path=source_path,
step=step,
start=start,
stop=stop + 1 if stop is not None else stop,
dimension=dimension,
)
def _has_frame(self, i):
if i >= self._start:
if (i - self._start) % self._step == 0:
if self._stop is None or i < self._stop:
return True
return False
def _decode(self, container):
frame_num = 0
for packet in container.demux():
if packet.stream.type == 'video':
for image in packet.decode():
frame_num += 1
if self._has_frame(frame_num - 1):
if packet.stream.metadata.get('rotate'):
old_image = image
image = av.VideoFrame().from_ndarray(
rotate_image(
image.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
image.pts = old_image.pts
yield (image, self._source_path[0], image.pts)
def __iter__(self):
container = self._get_av_container()
source_video_stream = container.streams.video[0]
source_video_stream.thread_type = 'AUTO'
return self._decode(container)
def get_progress(self, pos):
duration = self._get_duration()
return pos / duration if duration else None
def _get_av_container(self):
if isinstance(self._source_path[0], io.BytesIO):
self._source_path[0].seek(0) # required for re-reading
return av.open(self._source_path[0])
def _get_duration(self):
container = self._get_av_container()
stream = container.streams.video[0]
duration = None
if stream.duration:
duration = stream.duration
else:
# may have a DURATION in format like "01:16:45.935000000"
duration_str = stream.metadata.get("DURATION", None)
tb_denominator = stream.time_base.denominator
if duration_str and tb_denominator:
_hour, _min, _sec = duration_str.split(':')
duration_sec = 60*60*float(_hour) + 60*float(_min) + float(_sec)
duration = duration_sec * tb_denominator
return duration
def get_preview(self):
container = self._get_av_container()
stream = container.streams.video[0]
preview = next(container.decode(stream))
return self._get_preview(preview.to_image() if not stream.metadata.get('rotate') \
else av.VideoFrame().from_ndarray(
rotate_image(
preview.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
).to_image()
)
def get_image_size(self, i):
image = (next(iter(self)))[0]
return image.width, image.height
class FragmentMediaReader:
def __init__(self, chunk_number, chunk_size, start, stop, step=1):
self._start = start
self._stop = stop + 1 # up to the last inclusive
self._step = step
self._chunk_number = chunk_number
self._chunk_size = chunk_size
self._start_chunk_frame_number = \
self._start + self._chunk_number * self._chunk_size * self._step
self._end_chunk_frame_number = min(self._start_chunk_frame_number \
+ (self._chunk_size - 1) * self._step + 1, self._stop)
self._frame_range = self._get_frame_range()
@property
def frame_range(self):
return self._frame_range
def _get_frame_range(self):
frame_range = []
for idx in range(self._start, self._stop, self._step):
if idx < self._start_chunk_frame_number:
continue
elif idx < self._end_chunk_frame_number and \
not ((idx - self._start_chunk_frame_number) % self._step):
frame_range.append(idx)
elif (idx - self._start_chunk_frame_number) % self._step:
continue
else:
break
return frame_range
class ImageDatasetManifestReader(FragmentMediaReader):
def __init__(self, manifest_path, **kwargs):
super().__init__(**kwargs)
self._manifest = ImageManifestManager(manifest_path)
self._manifest.init_index()
def __iter__(self):
for idx in self._frame_range:
yield self._manifest[idx]
class VideoDatasetManifestReader(FragmentMediaReader):
def __init__(self, manifest_path, **kwargs):
self.source_path = kwargs.pop('source_path')
super().__init__(**kwargs)
self._manifest = VideoManifestManager(manifest_path)
self._manifest.init_index()
def _get_nearest_left_key_frame(self):
if self._start_chunk_frame_number >= \
self._manifest[len(self._manifest) - 1].get('number'):
left_border = len(self._manifest) - 1
else:
left_border = 0
delta = len(self._manifest)
while delta:
step = delta // 2
cur_position = left_border + step
if self._manifest[cur_position].get('number') < self._start_chunk_frame_number:
cur_position += 1
left_border = cur_position
delta -= step + 1
else:
delta = step
if self._manifest[cur_position].get('number') > self._start_chunk_frame_number:
left_border -= 1
frame_number = self._manifest[left_border].get('number')
timestamp = self._manifest[left_border].get('pts')
return frame_number, timestamp
def __iter__(self):
start_decode_frame_number, start_decode_timestamp = self._get_nearest_left_key_frame()
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = next(stream for stream in container.streams if stream.type == 'video')
video_stream.thread_type = 'AUTO'
container.seek(offset=start_decode_timestamp, stream=video_stream)
frame_number = start_decode_frame_number - 1
for packet in container.demux(video_stream):
for frame in packet.decode():
frame_number += 1
if frame_number in self._frame_range:
if video_stream.metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
yield frame
elif frame_number < self._frame_range[-1]:
continue
else:
return
class IChunkWriter(ABC):
def __init__(self, quality, dimension=DimensionType.DIM_2D):
self._image_quality = quality
self._dimension = dimension
@staticmethod
def _compress_image(image_path, quality):
image = image_path.to_image() if isinstance(image_path, av.VideoFrame) else Image.open(image_path)
# Ensure image data fits into 8bit per pixel before RGB conversion as PIL clips values on conversion
if image.mode == "I":
# Image mode is 32bit integer pixels.
# Autoscale pixels by factor 2**8 / im_data.max() to fit into 8bit
im_data = np.array(image)
im_data = im_data * (2**8 / im_data.max())
image = Image.fromarray(im_data.astype(np.int32))
converted_image = image.convert('RGB')
image.close()
buf = io.BytesIO()
converted_image.save(buf, format='JPEG', quality=quality, optimize=True)
buf.seek(0)
width, height = converted_image.size
converted_image.close()
return width, height, buf
@abstractmethod
def save_as_chunk(self, images, chunk_path):
pass
class ZipChunkWriter(IChunkWriter):
def save_as_chunk(self, images, chunk_path):
with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:
for idx, (image, path, _) in enumerate(images):
arcname = '{:06d}{}'.format(idx, os.path.splitext(path)[1])
if isinstance(image, io.BytesIO):
zip_chunk.writestr(arcname, image.getvalue())
else:
zip_chunk.write(filename=image, arcname=arcname)
# return empty list because ZipChunkWriter write files as is
# and does not decode it to know img size.
return []
class ZipCompressedChunkWriter(IChunkWriter):
def save_as_chunk(self, images, chunk_path):
image_sizes = []
with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:
for idx, (image, _, _) in enumerate(images):
if self._dimension == DimensionType.DIM_2D:
w, h, image_buf = self._compress_image(image, self._image_quality)
extension = "jpeg"
else:
image_buf = open(image, "rb") if isinstance(image, str) else image
properties = ValidateDimension.get_pcd_properties(image_buf)
w, h = int(properties["WIDTH"]), int(properties["HEIGHT"])
extension = "pcd"
image_buf.seek(0, 0)
image_buf = io.BytesIO(image_buf.read())
image_sizes.append((w, h))
arcname = '{:06d}.{}'.format(idx, extension)
zip_chunk.writestr(arcname, image_buf.getvalue())
return image_sizes
class Mpeg4ChunkWriter(IChunkWriter):
def __init__(self, quality=67):
# translate inversed range [1:100] to [0:51]
quality = round(51 * (100 - quality) / 99)
super().__init__(quality)
self._output_fps = 25
try:
codec = av.codec.Codec('libopenh264', 'w')
self._codec_name = codec.name
self._codec_opts = {
'profile': 'constrained_baseline',
'qmin': str(self._image_quality),
'qmax': str(self._image_quality),
'rc_mode': 'buffer',
}
except av.codec.codec.UnknownCodecError:
codec = av.codec.Codec('libx264', 'w')
self._codec_name = codec.name
self._codec_opts = {
"crf": str(self._image_quality),
"preset": "ultrafast",
}
def _create_av_container(self, path, w, h, rate, options, f='mp4'):
# x264 requires width and height must be divisible by 2 for yuv420p
if h % 2:
h += 1
if w % 2:
w += 1
container = av.open(path, 'w',format=f)
video_stream = container.add_stream(self._codec_name, rate=rate)
video_stream.pix_fmt = "yuv420p"
video_stream.width = w
video_stream.height = h
video_stream.options = options
return container, video_stream
def save_as_chunk(self, images, chunk_path):
if not images:
raise Exception('no images to save')
input_w = images[0][0].width
input_h = images[0][0].height
output_container, output_v_stream = self._create_av_container(
path=chunk_path,
w=input_w,
h=input_h,
rate=self._output_fps,
options=self._codec_opts,
)
self._encode_images(images, output_container, output_v_stream)
output_container.close()
return [(input_w, input_h)]
@staticmethod
def _encode_images(images, container, stream):
for frame, _, _ in images:
# let libav set the correct pts and time_base
frame.pts = None
frame.time_base = None
for packet in stream.encode(frame):
container.mux(packet)
# Flush streams
for packet in stream.encode():
container.mux(packet)
class Mpeg4CompressedChunkWriter(Mpeg4ChunkWriter):
def __init__(self, quality):
super().__init__(quality)
if self._codec_name == 'libx264':
self._codec_opts = {
'profile': 'baseline',
'coder': '0',
'crf': str(self._image_quality),
'wpredp': '0',
'flags': '-loop',
}
def save_as_chunk(self, images, chunk_path):
if not images:
raise Exception('no images to save')
input_w = images[0][0].width
input_h = images[0][0].height
downscale_factor = 1
while input_h / downscale_factor >= 1080:
downscale_factor *= 2
output_h = input_h // downscale_factor
output_w = input_w // downscale_factor
output_container, output_v_stream = self._create_av_container(
path=chunk_path,
w=output_w,
h=output_h,
rate=self._output_fps,
options=self._codec_opts,
)
self._encode_images(images, output_container, output_v_stream)
output_container.close()
return [(input_w, input_h)]
class DicomListExtractor(ImageListReader):
def __init__(self, source_path, dest_path, image_quality, step=1, start=0, stop=0):
if not source_path:
raise Exception('No Dicom found')
import pydicom
super().__init__(
source_path=sorted(source_path),
dest_path=dest_path,
image_quality=image_quality,
step=1,
start=0,
stop=0,
)
self._dimensions = []
series = dict()
self._jpeg_source_paths = []
for i, source in enumerate(self._source_path):
dcm = pydicom.read_file(source)
series_time = dcm.get("SeriesTime", "")
if series_time not in series:
series[series_time] = Series(i, dcm.get("SeriesDescription", ""))
else:
series[series_time].stop_frame = i
img = _normalize_image(dcm.pixel_array)
pilImg = Image.fromarray(img)
self._dimensions.append(pilImg.size)
jpeg_source_path = os.path.splitext(source)[0] + '.jpg'
pilImg.save(jpeg_source_path, 'JPEG')
self._jpeg_source_paths.append(jpeg_source_path)
# SeriesTimeで昇順に並べかえたSeriesのリストを取得
self._series = [v for _, v in sorted(series.items())]
...
def _normalize_image(img, min_percent = 0, max_percent = 99, gamma = 1.2):
vmin = np.percentile(img, min_percent)
vmax = np.percentile(img, max_percent)
img = ((img - vmin) / (vmax - vmin))
img[img < 0] = 0
img = pow(img, gamma) * 255
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def _is_archive(path):
mime = mimetypes.guess_type(path)
mime_type = mime[0]
encoding = mime[1]
supportedArchives = ['application/x-rar-compressed',
'application/x-tar', 'application/x-7z-compressed', 'application/x-cpio',
'gzip', 'bzip2']
return mime_type in supportedArchives or encoding in supportedArchives
def _is_video(path):
mime = mimetypes.guess_type(path)
return mime[0] is not None and mime[0].startswith('video')
def _is_image(path):
mime = mimetypes.guess_type(path)
# Exclude vector graphic images because Pillow cannot work with them
return mime[0] is not None and mime[0].startswith('image') and \
not mime[0].startswith('image/svg')
def _is_dir(path):
return os.path.isdir(path)
def _is_pdf(path):
mime = mimetypes.guess_type(path)
return mime[0] == 'application/pdf'
def _is_zip(path):
mime = mimetypes.guess_type(path)
mime_type = mime[0]
encoding = mime[1]
supportedArchives = ['application/zip']
return mime_type in supportedArchives or encoding in supportedArchives
# 'has_mime_type': function receives 1 argument - path to file.
# Should return True if file has specified media type.
# 'extractor': class that extracts images from specified media.
# 'mode': 'annotation' or 'interpolation' - mode of task that should be created.
# 'unique': True or False - describes how the type can be combined with other.
# True - only one item of this type and no other is allowed
# False - this media types can be combined with other which have unique == False
MEDIA_TYPES = {
'image': {
'has_mime_type': _is_image,
'extractor': ImageListReader,
'mode': 'annotation',
'unique': False,
},
'video': {
'has_mime_type': _is_video,
'extractor': VideoReader,
'mode': 'interpolation',
'unique': True,
},
'archive': {
'has_mime_type': _is_archive,
'extractor': ArchiveReader,
'mode': 'annotation',
'unique': True,
},
'directory': {
'has_mime_type': _is_dir,
'extractor': DirectoryReader,
'mode': 'annotation',
'unique': False,
},
'pdf': {
'has_mime_type': _is_pdf,
'extractor': PdfReader,
'mode': 'annotation',
'unique': True,
},
'zip': {
'has_mime_type': _is_zip,
'extractor': ZipReader,
'mode': 'annotation',
'unique': True,
}
}
class ValidateDimension:
def __init__(self, path=None):
self.dimension = DimensionType.DIM_2D
self.path = path
self.related_files = {}
self.image_files = {}
self.converted_files = []
@staticmethod
def get_pcd_properties(fp, verify_version=False):
kv = {}
pcd_version = ["0.7", "0.6", "0.5", "0.4", "0.3", "0.2", "0.1",
".7", ".6", ".5", ".4", ".3", ".2", ".1"]
try:
for line in fp:
line = line.decode("utf-8")
if line.startswith("#"):
continue
k, v = line.split(" ", maxsplit=1)
kv[k] = v.strip()
if "DATA" in line:
break
if verify_version:
if "VERSION" in kv and kv["VERSION"] in pcd_version:
return True
return None
return kv
except AttributeError:
return None
@staticmethod
def convert_bin_to_pcd(path, delete_source=True):
list_pcd = []
with open(path, "rb") as f:
size_float = 4
byte = f.read(size_float * 4)
while byte:
x, y, z, _ = struct.unpack("ffff", byte)
list_pcd.append([x, y, z])
byte = f.read(size_float * 4)
np_pcd = np.asarray(list_pcd)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np_pcd)
pcd_filename = path.replace(".bin", ".pcd")
o3d.io.write_point_cloud(pcd_filename, pcd)
if delete_source:
os.remove(path)
return pcd_filename
def set_path(self, path):
self.path = path
def bin_operation(self, file_path, actual_path):
pcd_path = ValidateDimension.convert_bin_to_pcd(file_path)
self.converted_files.append(pcd_path)
return pcd_path.split(actual_path)[-1][1:]
@staticmethod
def pcd_operation(file_path, actual_path):
with open(file_path, "rb") as file:
is_pcd = ValidateDimension.get_pcd_properties(file, verify_version=True)
return file_path.split(actual_path)[-1][1:] if is_pcd else file_path
def process_files(self, root, actual_path, files):
pcd_files = {}
for file in files:
file_name, file_extension = os.path.splitext(file)
file_path = os.path.abspath(os.path.join(root, file))
if file_extension == ".bin":
path = self.bin_operation(file_path, actual_path)
pcd_files[file_name] = path
self.related_files[path] = []
elif file_extension == ".pcd":
path = ValidateDimension.pcd_operation(file_path, actual_path)
if path == file_path:
self.image_files[file_name] = file_path
else:
pcd_files[file_name] = path
self.related_files[path] = []
else:
if _is_image(file_path):
self.image_files[file_name] = file_path
return pcd_files
def validate(self):
"""
Validate the directory structure for kitty and point cloud format.
"""
if not self.path:
return
actual_path = self.path
for root, _, files in os.walk(actual_path):
if not files_to_ignore(root):
continue
self.process_files(root, actual_path, files)
if len(self.related_files.keys()):
self.dimension = DimensionType.DIM_3D
|
the-stack_0_6370 | from favourites_list import *
from positional_list import *
class FavouritesListMTF(FavouritesList):
"""List of elements odered with move-to-front heuristic."""
# we override _move_up provide move to front semantics.
def _move_up(self,p):
"""Move accesses item at Position p to frony of the list."""
if p != self._data.first():
self._data.add_first(self._data.delete(p)) # delete/insert
# we overide top because list is no longer sorted
def top(self,k):
"""Generate sequence of top k elements in terms of access count."""
if not 1<=k<=len(self):
raise ValueError("Illegal value for k")
# we begin to make a copy of original list
temp = PositionalList()
for item in self._data: # positional list supports iteration
temp.add_last(item)
# we repeatedly find, report and remove element with largest count
for j in range(k):
# find and report next highest from temp
highPos = temp.first()
walk = temp.after(highPos)
while walk is not None:
if walk.element()._count > highPos.element()._count:
highPos = walk
walk = temp.after(walk)
# we have found the element with highest count
yield highPos.element()._value # report element to user
temp.delete(highPos) # remove form temp list
|
the-stack_0_6371 | from setuptools import setup, find_packages
import os
version = '0.2.1'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('README.txt')
+ '\n' +
read('js', 'gridster', 'test_gridster.txt')
+ '\n' +
read('CHANGES.txt'))
setup(
name='js.gridster',
version=version,
description="Fanstatic packaging of gridster",
long_description=long_description,
classifiers=[],
keywords='fanstatic jquery gridster',
author='Marco Scheidhuber',
author_email='[email protected]',
url='https://github.com/j23d/js.gridster',
license='BSD',
packages=find_packages(),
namespace_packages=['js'],
include_package_data=True,
zip_safe=False,
install_requires=[
'fanstatic',
'setuptools',
],
entry_points={
'fanstatic.libraries': [
'gridster = js.gridster:library',
],
},
)
|
the-stack_0_6372 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import abstractmethod
from collections import Hashable
from functools import wraps
from aif360.datasets import Dataset
from aif360.decorating_metaclass import ApplyDecorator
def _make_key(args, kwargs, unhashable, kwd_mark=(object(),)):
"""Simplified version of functools."""
key = args
if kwargs:
key += kwd_mark
for item in kwargs.items():
if not isinstance(item[1], Hashable):
return unhashable
key += item
return key
def memoize(func):
"""Based off functools.lru_cache (not available in Python 2).
A little inefficient but we're just storing floats.
"""
sentinal = object()
unhashable = object()
cache = {}
@wraps(func)
def wrapper(*args, **kwargs):
key = _make_key(args, kwargs, unhashable)
if key is unhashable:
return func(*args, **kwargs)
result = cache.get(key, sentinal)
if result is not sentinal:
return result
result = func(*args, **kwargs)
cache[key] = result
return result
return wrapper
BaseClass = ApplyDecorator(memoize)
class Metric(BaseClass):
"""Base class for metrics."""
@abstractmethod
def __init__(self, dataset):
"""Initialize a `Metrics` object.
Args:
dataset (Dataset): Dataset on which to evaluate metrics.
"""
if isinstance(dataset, Dataset):
self.dataset = dataset
else:
raise TypeError("dataset must be of Dataset class")
|
the-stack_0_6374 | from flask import Flask, jsonify, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# Connect to Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cafes.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# Cafe TABLE Configuration
class Cafe(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
map_url = db.Column(db.String(500), nullable=False)
img_url = db.Column(db.String(500), nullable=False)
location = db.Column(db.String(250), nullable=False)
seats = db.Column(db.String(250), nullable=False)
has_toilet = db.Column(db.Boolean, nullable=False)
has_wifi = db.Column(db.Boolean, nullable=False)
has_sockets = db.Column(db.Boolean, nullable=False)
can_take_calls = db.Column(db.Boolean, nullable=False)
coffee_price = db.Column(db.String(250), nullable=True)
def to_dict(self):
"""Convert a database table to a dictionary."""
return {column.name: getattr(self, column.name) for column in self.__table__.columns}
@app.route("/")
def home():
return render_template("index.html")
# HTTP GET - Read Records
@app.route("/random", methods=["GET"])
def get_random():
"""Return a random cafe from the database."""
random_cafe = db.session.query(Cafe).order_by(db.func.random()).first()
return jsonify(random_cafe.to_dict())
@app.route("/all", methods=["GET"])
def get_all():
"""Return all cafes from the database."""
return jsonify(cafes=[cafe.to_dict() for cafe in db.session.query(Cafe).all()])
@app.route("/search", methods=["GET"])
def search():
"""Search for a cafe by location."""
try:
location = request.args.get("loc").capitalize()
except AttributeError:
return jsonify(error={"No Search Terms": "Please provide a valid search term."}), 400
else:
results = db.session.query(Cafe).filter_by(location=location).all()
if results:
return jsonify(cafes=[cafe.to_dict() for cafe in results])
else:
return jsonify(error={"Not Found": "Sorry, we don't have a cafe at that location."})
# HTTP POST - Create Record
@app.route("/add", methods=["POST"])
def add_cafe():
"""Add a new cafe to the database."""
new_cafe = Cafe(
name=request.form.get("name"),
map_url=request.form.get("map_url"),
img_url=request.form.get("img_url"),
location=request.form.get("location"),
has_sockets=bool(request.form.get("sockets")),
has_toilet=bool(request.form.get("toilet")),
has_wifi=bool(request.form.get("wifi")),
can_take_calls=bool(request.form.get("calls")),
seats=request.form.get("seats"),
coffee_price=request.form.get("coffee_price"),
)
db.session.add(new_cafe)
db.session.commit()
return jsonify(response={"success": "Successfully added the new cafe."})
# HTTP PUT/PATCH - Update Record
@app.route("/update-price/<int:cafe_id>", methods=["PATCH"])
def update_price(cafe_id):
"""Update the coffee price of a cafe."""
try:
price = request.args.get("new_price")
except AttributeError:
return jsonify(error={"No Data": "Please provide a valid price."}), 400
else:
cafe = db.session.query(Cafe).filter_by(id=cafe_id).first()
if not cafe:
return jsonify(error={"Not Found": "Sorry, a cafe with that id does not exist."}), 404
else:
cafe.coffee_price = price
db.session.commit()
return jsonify(response={"success": "Successfully updated the price of the cafe."})
# HTTP DELETE - Delete Record
@app.route("/report-closed/<cafe_id>", methods=["DELETE"])
def report_closed(cafe_id):
"""Report a closed cafe and delete it from the database."""
try:
api_key = request.args.get("api_key")
except AttributeError:
return jsonify(error={"No Data": "Please provide a valid API key."}), 400
else:
if api_key != "secretkey":
return jsonify(error={"Invalid API Key": "Please provide a valid API Key."}), 403
else:
cafe = db.session.query(Cafe).filter_by(id=cafe_id).first()
if not cafe:
return jsonify(error={"Not Found": "Sorry, a cafe with that id does not exist."}), 404
else:
db.session.delete(cafe)
db.session.commit()
return jsonify(response={"success": "Successfully reported and deleted the cafe."})
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_6377 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Optional
import numpy as np
from parameterized import parameterized
from monai.apps.pathology.transforms import TileOnGrid
from tests.utils import TEST_NDARRAYS, assert_allclose
TEST_CASES = []
for tile_count in [16, 64]:
for tile_size in [8, 32]:
for filter_mode in ["min", "max", "random"]:
for background_val in [255, 0]:
TEST_CASES.append(
[
{
"tile_count": tile_count,
"tile_size": tile_size,
"filter_mode": filter_mode,
"random_offset": False,
"background_val": background_val,
}
]
)
for tile_size in [8, 16]:
for step in [4, 8]:
TEST_CASES.append([{"tile_count": 16, "step": step, "tile_size": tile_size}])
TESTS = []
for p in TEST_NDARRAYS:
for tc in TEST_CASES:
TESTS.append([p, *tc])
TEST_CASES2 = []
for tile_count in [16, 64]:
for tile_size in [8, 32]:
for filter_mode in ["min", "max", "random"]:
for background_val in [255, 0]:
TEST_CASES2.append(
[
{
"tile_count": tile_count,
"tile_size": tile_size,
"filter_mode": filter_mode,
"random_offset": True,
"background_val": background_val,
}
]
)
TESTS2 = []
for p in TEST_NDARRAYS:
for tc in TEST_CASES2:
TESTS2.append([p, *tc])
def make_image(
tile_count: int,
tile_size: int,
step: int = 0,
random_offset: bool = False,
filter_mode: Optional[str] = None,
seed=123,
**kwargs,
):
tile_count = int(np.sqrt(tile_count))
pad = 0
if random_offset:
pad = 3
if step == 0:
step = tile_size
image = np.random.randint(
200,
size=[3, (tile_count - 1) * step + tile_size + pad, (tile_count - 1) * step + tile_size + pad],
dtype=np.uint8,
)
imlarge = image
random_state = np.random.RandomState(seed)
if random_offset:
pad_h = image.shape[1] % tile_size
pad_w = image.shape[2] % tile_size
offset = (random_state.randint(pad_h) if pad_h > 0 else 0, random_state.randint(pad_w) if pad_w > 0 else 0)
image = image[:, offset[0] :, offset[1] :]
tiles_list = []
for x in range(tile_count):
for y in range(tile_count):
tiles_list.append(image[:, x * step : x * step + tile_size, y * step : y * step + tile_size])
tiles = np.stack(tiles_list, axis=0) # type: ignore
if (filter_mode == "min" or filter_mode == "max") and len(tiles) > tile_count ** 2:
tiles = tiles[np.argsort(tiles.sum(axis=(1, 2, 3)))]
return imlarge, tiles
class TestTileOnGrid(unittest.TestCase):
@parameterized.expand(TESTS)
def test_tile_patch_single_call(self, in_type, input_parameters):
img, tiles = make_image(**input_parameters)
input_img = in_type(img)
tiler = TileOnGrid(**input_parameters)
output = tiler(input_img)
assert_allclose(output, tiles, type_test=False)
@parameterized.expand(TESTS2)
def test_tile_patch_random_call(self, in_type, input_parameters):
img, tiles = make_image(**input_parameters, seed=123)
input_img = in_type(img)
tiler = TileOnGrid(**input_parameters)
tiler.set_random_state(seed=123)
output = tiler(input_img)
assert_allclose(output, tiles, type_test=False)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_6378 | from keras import activations, layers, models
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
from tfreplknet.drop import DropPath
@register_keras_serializable(package='TFRepLKNet')
class FFN(layers.Layer):
def __init__(self, ratio, dropout, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4)
self.ratio = ratio
self.dropout = dropout
@shape_type_conversion
def build(self, input_shape):
channels = input_shape[-1]
if channels is None:
raise ValueError('Channel dimension of the inputs should be defined. Found `None`.')
self.input_spec = layers.InputSpec(ndim=4, axes={-1: channels})
# noinspection PyAttributeOutsideInit
self.bn = layers.BatchNormalization(momentum=0.1, epsilon=1.001e-5, name='preffn_bn')
# noinspection PyAttributeOutsideInit
self.pw1 = models.Sequential([
layers.Conv2D(int(channels * self.ratio), 1, use_bias=False, name=f'{self.name}/pw1/conv'),
layers.BatchNormalization(momentum=0.1, epsilon=1.001e-5, name=f'{self.name}/pw1/bn')
], name='pw1')
# noinspection PyAttributeOutsideInit
self.pw2 = models.Sequential([
layers.Conv2D(channels, 1, use_bias=False, name=f'{self.name}/pw2/conv'),
layers.BatchNormalization(momentum=0.1, epsilon=1.001e-5, name=f'{self.name}/pw2/bn')
], name='pw2')
# noinspection PyAttributeOutsideInit
self.drop = DropPath(self.dropout)
super().build(input_shape)
def call(self, inputs, *args, **kwargs):
outputs = self.bn(inputs)
outputs = self.pw1(outputs)
outputs = activations.gelu(outputs)
outputs = self.pw2(outputs)
outputs = inputs + self.drop(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({
'ratio': self.ratio,
'dropout': self.dropout
})
return config
|
the-stack_0_6379 | add_library('video')
add_library('opencv_processing')
video = None
opencv = None
def setup():
size(720, 480, P2D)
video = Movie(this, "street.mov")
opencv = OpenCV(this, 720, 480)
opencv.startBackgroundSubtraction(5, 3, 0.5)
video.loop()
video.play()
def draw():
image(video, 0, 0)
opencv.loadImage(video)
opencv.updateBackground()
opencv.dilate()
opencv.erode()
noFill()
stroke(255, 0, 0)
strokeWeight(3)
for contour in opencv.findContours():
contour.draw()
def movieEvent(m):
m.read()
|
the-stack_0_6383 | # This file is a demo for the 'Isothermal_Monolith_Simulator' object
import sys
sys.path.append('../..')
from catalyst.isothermal_monolith_catalysis import *
# Read in the data (data is now a dictionary containing the data we want)
data = naively_read_data_file("inputfiles/SCR_all-ages_300C.txt",factor=5)
# Testing
sim = Isothermal_Monolith_Simulator()
sim.add_axial_dim(0,5)
sim.add_axial_dataset(5) # Location of observations (in cm)
sim.add_temporal_dim(0,137)
sim.add_temporal_dataset(data["time"]) #Temporal observations (in s)
sim.add_age_set(["Unaged"])
sim.add_data_age_set(["Unaged"]) # Data observations can be a sub-set
sim.add_temperature_set(["300C"])
sim.add_data_temperature_set(["300C"]) # Data observations can be a sub-set
sim.add_gas_species(["NH3","H2O","O2","NO","NO2","N2O","N2"])
sim.add_data_gas_species(["NH3","NO","NO2","N2O"]) # Data observations can be a sub-set
sim.set_data_values_for("NH3","Unaged","300C",5,data["time"],data["NH3_Unaged"])
sim.set_data_values_for("NO","Unaged","300C",5,data["time"],data["NO_Unaged"])
sim.set_data_values_for("NO2","Unaged","300C",5,data["time"],data["NO2_Unaged"])
sim.set_data_values_for("N2O","Unaged","300C",5,data["time"],data["N2O_Unaged"])
#Clear up memory space after we don't need the dictionary anymore
data.clear()
sim.add_surface_species(["q1","q2a","q2b","q3a","q3b","q3c","q4a","q4b"])
sim.add_surface_sites(["S1","S2","S3a","S3b","S3c"])
sim.add_reactions({"r1": ReactionType.EquilibriumArrhenius,
"r2a": ReactionType.EquilibriumArrhenius,
"r2b": ReactionType.EquilibriumArrhenius,
"r3a": ReactionType.EquilibriumArrhenius,
"r3b": ReactionType.EquilibriumArrhenius,
"r3c": ReactionType.EquilibriumArrhenius,
"r4a": ReactionType.EquilibriumArrhenius,
"r4b": ReactionType.EquilibriumArrhenius,
"r5": ReactionType.Arrhenius,
"r6": ReactionType.Arrhenius,
"r7f": ReactionType.Arrhenius,
"r7r": ReactionType.Arrhenius,
"r8": ReactionType.Arrhenius,
"r9": ReactionType.Arrhenius,
"r10": ReactionType.Arrhenius,
"r11": ReactionType.Arrhenius,
"r12": ReactionType.Arrhenius,
"r13a": ReactionType.Arrhenius,
"r14a": ReactionType.Arrhenius,
"r15af": ReactionType.Arrhenius,
"r15ar": ReactionType.Arrhenius,
"r16a": ReactionType.Arrhenius,
"r17a": ReactionType.Arrhenius,
"r18a": ReactionType.Arrhenius,
"r19a": ReactionType.Arrhenius,
"r20a": ReactionType.Arrhenius,
"r13b": ReactionType.Arrhenius,
"r14b": ReactionType.Arrhenius,
"r15bf": ReactionType.Arrhenius,
"r15br": ReactionType.Arrhenius,
"r16b": ReactionType.Arrhenius,
"r17b": ReactionType.Arrhenius,
"r18b": ReactionType.Arrhenius,
"r19b": ReactionType.Arrhenius,
"r20b": ReactionType.Arrhenius,
"r21": ReactionType.Arrhenius,
"r22": ReactionType.Arrhenius,
"r23f": ReactionType.Arrhenius,
"r23r": ReactionType.Arrhenius,
"r24": ReactionType.Arrhenius,
"r25": ReactionType.Arrhenius,
"r26": ReactionType.Arrhenius,
"r27": ReactionType.Arrhenius,
"r28": ReactionType.Arrhenius,
"r29": ReactionType.Arrhenius,
"r30": ReactionType.Arrhenius,
"r31f": ReactionType.Arrhenius,
"r31r": ReactionType.Arrhenius,
"r32": ReactionType.Arrhenius,
"r33": ReactionType.Arrhenius,
"r34": ReactionType.Arrhenius,
"r35": ReactionType.Arrhenius,
"r36": ReactionType.Arrhenius,
"r37": ReactionType.Arrhenius,
"r38": ReactionType.Arrhenius,
"r39f": ReactionType.Arrhenius,
"r39r": ReactionType.Arrhenius,
"r40": ReactionType.Arrhenius,
"r41": ReactionType.Arrhenius,
"r42": ReactionType.Arrhenius,
"r43": ReactionType.Arrhenius,
"r44": ReactionType.Arrhenius
})
sim.set_bulk_porosity(0.3309)
sim.set_washcoat_porosity(0.4)
sim.set_reactor_radius(1)
sim.set_space_velocity_all_runs(1000) #volumes/min
sim.set_cell_density(62) # 62 cells per cm^2 (~400 cpsi)
# Setting up site balances using dicts
s1_data = {"mol_occupancy": {"q1": 1, "q4a": 1}}
s2_data = {"mol_occupancy": {"q2a": 1, "q2b": 1, "q4b": 1}}
s3a_data = {"mol_occupancy": {"q3a": 1}}
s3b_data = {"mol_occupancy": {"q3b": 1}}
s3c_data = {"mol_occupancy": {"q3c": 1}}
sim.set_site_balance("S1",s1_data)
sim.set_site_balance("S2",s2_data)
sim.set_site_balance("S3a",s3a_data)
sim.set_site_balance("S3b",s3b_data)
sim.set_site_balance("S3c",s3c_data)
# Reaction specification information (must correspond to correct reaction type)
# EquilibriumArrhenius
r1_equ = {"parameters": {"A": 250000, "E": 0, "dH": -54547.9, "dS": -29.9943},
"mol_reactants": {"S1": 1, "NH3": 1},
"mol_products": {"q1": 1},
"rxn_orders": {"S1": 1, "NH3": 1, "q1": 1}
}
r2a_equ = {"parameters": {"A": 300000, "E": 0, "dH": -78073.843, "dS": -35.311574},
"mol_reactants": {"S2": 1, "NH3": 1},
"mol_products": {"q2a": 1},
"rxn_orders": {"S2": 1, "NH3": 1, "q2a": 1}
}
r2b_equ = {"parameters": {"A": 150000, "E": 0, "dH": -78064.167, "dS": -46.821878},
"mol_reactants": {"q2a": 1, "NH3": 1},
"mol_products": {"q2b": 1},
"rxn_orders": {"q2a": 1, "NH3": 1, "q2b": 1}
}
r3a_equ = {"parameters": {"A": 2500000, "E": 0, "dH": -91860.8, "dS": -28.9292},
"mol_reactants": {"S3a": 1, "NH3": 1},
"mol_products": {"q3a": 1},
"rxn_orders": {"S3a": 1, "NH3": 1, "q3a": 1}
}
r3b_equ = {"parameters": {"A": 2500000, "E": 0, "dH": -91860.8, "dS": -28.9292},
"mol_reactants": {"S3b": 1, "NH3": 1},
"mol_products": {"q3b": 1},
"rxn_orders": {"S3b": 1, "NH3": 1, "q3b": 1}
}
r3c_equ = {"parameters": {"A": 2500000, "E": 0, "dH": -91860.8, "dS": -28.9292},
"mol_reactants": {"S3c": 1, "NH3": 1},
"mol_products": {"q3c": 1},
"rxn_orders": {"S3c": 1, "NH3": 1, "q3c": 1}
}
r4a_equ = {"parameters": {"A": 44000, "E": 0, "dH": -32099.1, "dS": -24.2494},
"mol_reactants": {"S1": 1, "H2O": 1},
"mol_products": {"q4a": 1},
"rxn_orders": {"S1": 1, "H2O": 1, "q4a": 1}
}
r4b_equ = {"parameters": {"A": 70000, "E": 0, "dH": -28889.23, "dS": -26.674},
"mol_reactants": {"S2": 1, "H2O": 1},
"mol_products": {"q4b": 1},
"rxn_orders": {"S2": 1, "H2O": 1, "q4b": 1}
}
# Arrhenius Reactions
# ---------- q1 reactions ------------
r5 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "O2": 0.75},
"mol_products": {"S1": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q1": 1, "O2": 1}
}
r6 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "O2": 1.25},
"mol_products": {"S1": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "O2": 1}
}
r7f = {"parameters": {"A": 3122.066, "E": 0},
"mol_reactants": {"S1": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S1": 1, "NO2": 1},
"rxn_orders": {"S1": 1, "NO": 1, "O2": 1}
}
r7r = {"parameters": {"A": 0.328075, "E": 0},
"mol_reactants": {"S1": 1, "NO2": 1},
"mol_products": {"S1": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S1": 1, "NO2": 1}
}
r8 = {"parameters": {"A": 16782330, "E": 0},
"mol_reactants": {"q1": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S1": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO": 1, "O2": 1}
}
r9 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO2": 1},
"mol_products": {"S1": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q1": 1, "NO2": 1}
}
r10 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S1": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO2": 1, "O2": 1}
}
r11 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S1": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO": 1, "O2": 1}
}
r12 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S1": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO": 1, "NO2": 1}
}
# ---------- q2a reactions ------------
r13a = {"parameters": {"A": 17.98625, "E": 0},
"mol_reactants": {"q2a": 1, "O2": 0.75},
"mol_products": {"S2": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "O2": 1}
}
r14a = {"parameters": {"A": 12.1689, "E": 0},
"mol_reactants": {"q2a": 1, "O2": 1.25},
"mol_products": {"S2": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "O2": 1}
}
r15af = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S2": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S2": 1, "NO2": 1},
"rxn_orders": {"S2": 1, "NO": 1, "O2": 1}
}
r15ar = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S2": 1, "NO2": 1},
"mol_products": {"S2": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S2": 1, "NO2": 1}
}
r16a = {"parameters": {"A": 1.33E8, "E": 0},
"mol_reactants": {"q2a": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S2": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO": 1, "O2": 1}
}
r17a = {"parameters": {"A": 4465644, "E": 0},
"mol_reactants": {"q2a": 1, "NO2": 1},
"mol_products": {"S2": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q2a": 1, "NO2": 1}
}
r18a = {"parameters": {"A": 1.86E8, "E": 0},
"mol_reactants": {"q2a": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S2": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO2": 1, "O2": 1}
}
r19a = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2a": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S2": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO": 1, "O2": 1}
}
r20a = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2a": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S2": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO": 1, "NO2": 1}
}
# ---------- q2b reactions ------------
r13b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "O2": 0.75},
"mol_products": {"q2a": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "O2": 1}
}
r14b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "O2": 1.25},
"mol_products": {"q2a": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "O2": 1}
}
r15bf = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 1, "O2": 0.5},
"mol_products": {"q2a": 1, "NO2": 1},
"rxn_orders": {"q2b": 1, "NO": 1, "O2": 1}
}
r15br = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2a": 1, "NO2": 1},
"mol_products": {"q2b": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"q2b": 1, "NO2": 1}
}
r16b = {"parameters": {"A": 3.27E8, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 1, "O2": 0.25},
"mol_products": {"q2a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO": 1, "O2": 1}
}
r17b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "NO2": 1},
"mol_products": {"q2a": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q2b": 1, "NO2": 1}
}
r18b = {"parameters": {"A": 4.14E9, "E": 0},
"mol_reactants": {"q2b": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"q2a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO2": 1, "O2": 1}
}
r19b = {"parameters": {"A": 5395255, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 1, "O2": 0.75},
"mol_products": {"q2a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO": 1, "O2": 1}
}
r20b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"q2a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO": 1, "NO2": 1}
}
# ---------- q3a reactions ------------
r21 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "O2": 0.75},
"mol_products": {"S3a": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "O2": 1}
}
r22 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "O2": 1.25},
"mol_products": {"S3a": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "O2": 1}
}
r23f = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S3a": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S3a": 1, "NO2": 1},
"rxn_orders": {"S3a": 1, "NO": 1, "O2": 1}
}
r23r = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S3a": 1, "NO2": 1},
"mol_products": {"S3a": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S3a": 1, "NO2": 1}
}
r24 = {"parameters": {"A": 3.26E8, "E": 0},
"mol_reactants": {"q3a": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S3a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO": 1, "O2": 1}
}
r25 = {"parameters": {"A": 2911397, "E": 0},
"mol_reactants": {"q3a": 1, "NO2": 1},
"mol_products": {"S3a": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q3a": 1, "NO2": 1}
}
r26 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S3a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO2": 1, "O2": 1}
}
r27 = {"parameters": {"A": 6312962, "E": 0},
"mol_reactants": {"q3a": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S3a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO": 1, "O2": 1}
}
r28 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S3a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO": 1, "NO2": 1}
}
# ---------- q3b reactions ------------
r29 = {"parameters": {"A": 105.2508, "E": 0},
"mol_reactants": {"q3b": 1, "O2": 0.75},
"mol_products": {"S3b": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "O2": 1}
}
r30 = {"parameters": {"A": 98.4407, "E": 0},
"mol_reactants": {"q3b": 1, "O2": 1.25},
"mol_products": {"S3b": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "O2": 1}
}
r31f = {"parameters": {"A": 3053293, "E": 0},
"mol_reactants": {"S3b": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S3b": 1, "NO2": 1},
"rxn_orders": {"S3b": 1, "NO": 1, "O2": 1}
}
r31r = {"parameters": {"A": 3825.781, "E": 0},
"mol_reactants": {"S3b": 1, "NO2": 1},
"mol_products": {"S3b": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S3b": 1, "NO2": 1}
}
r32 = {"parameters": {"A": 6.24E9, "E": 0},
"mol_reactants": {"q3b": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S3b": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO": 1, "O2": 1}
}
r33 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3b": 1, "NO2": 1},
"mol_products": {"S3b": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q3b": 1, "NO2": 1}
}
r34 = {"parameters": {"A": 1.22E9, "E": 0},
"mol_reactants": {"q3b": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S3b": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO2": 1, "O2": 1}
}
r35 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3b": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S3b": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO": 1, "O2": 1}
}
r36 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3b": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S3b": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO": 1, "NO2": 1}
}
# ---------- q3c reactions ------------
r37 = {"parameters": {"A": 0.238904073, "E": 0},
"mol_reactants": {"q3c": 1, "O2": 0.75},
"mol_products": {"S3c": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "O2": 1}
}
r38 = {"parameters": {"A": 0.54633, "E": 0},
"mol_reactants": {"q3c": 1, "O2": 1.25},
"mol_products": {"S3c": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "O2": 1}
}
r39f = {"parameters": {"A": 3670639, "E": 0},
"mol_reactants": {"S3c": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S3c": 1, "NO2": 1},
"rxn_orders": {"S3c": 1, "NO": 1, "O2": 1}
}
r39r = {"parameters": {"A": 2244.256, "E": 0},
"mol_reactants": {"S3c": 1, "NO2": 1},
"mol_products": {"S3c": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S3c": 1, "NO2": 1}
}
r40 = {"parameters": {"A": 8.82E8, "E": 0},
"mol_reactants": {"q3c": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S3c": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO": 1, "O2": 1}
}
r41 = {"parameters": {"A": 2548900, "E": 0},
"mol_reactants": {"q3c": 1, "NO2": 1},
"mol_products": {"S3c": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q3c": 1, "NO2": 1}
}
r42 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3c": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S3c": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO2": 1, "O2": 1}
}
r43 = {"parameters": {"A": 17096289, "E": 0},
"mol_reactants": {"q3c": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S3c": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO": 1, "O2": 1}
}
r44 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3c": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S3c": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO": 1, "NO2": 1}
}
sim.set_reaction_info("r1", r1_equ)
sim.set_reaction_info("r2a", r2a_equ)
sim.set_reaction_info("r2b", r2b_equ)
sim.set_reaction_info("r3a", r3a_equ)
sim.set_reaction_info("r3b", r3b_equ)
sim.set_reaction_info("r3c", r3c_equ)
sim.set_reaction_info("r4a", r4a_equ)
sim.set_reaction_info("r4b", r4b_equ)
sim.set_reaction_info("r5", r5)
sim.set_reaction_info("r6", r6)
sim.set_reaction_info("r7f", r7f)
sim.set_reaction_info("r7r", r7r)
sim.set_reaction_info("r8", r8)
sim.set_reaction_info("r9", r9)
sim.set_reaction_info("r10", r10)
sim.set_reaction_info("r11", r11)
sim.set_reaction_info("r12", r12)
sim.set_reaction_info("r13a", r13a)
sim.set_reaction_info("r14a", r14a)
sim.set_reaction_info("r15af", r15af)
sim.set_reaction_info("r15ar", r15ar)
sim.set_reaction_info("r16a", r16a)
sim.set_reaction_info("r17a", r17a)
sim.set_reaction_info("r18a", r18a)
sim.set_reaction_info("r19a", r19a)
sim.set_reaction_info("r20a", r20a)
sim.set_reaction_info("r13b", r13b)
sim.set_reaction_info("r14b", r14b)
sim.set_reaction_info("r15bf", r15bf)
sim.set_reaction_info("r15br", r15br)
sim.set_reaction_info("r16b", r16b)
sim.set_reaction_info("r17b", r17b)
sim.set_reaction_info("r18b", r18b)
sim.set_reaction_info("r19b", r19b)
sim.set_reaction_info("r20b", r20b)
sim.set_reaction_info("r21", r21)
sim.set_reaction_info("r22", r22)
sim.set_reaction_info("r23f", r23f)
sim.set_reaction_info("r23r", r23r)
sim.set_reaction_info("r24", r24)
sim.set_reaction_info("r25", r25)
sim.set_reaction_info("r26", r26)
sim.set_reaction_info("r27", r27)
sim.set_reaction_info("r28", r28)
sim.set_reaction_info("r29", r29)
sim.set_reaction_info("r30", r30)
sim.set_reaction_info("r31f", r31f)
sim.set_reaction_info("r31r", r31r)
sim.set_reaction_info("r32", r32)
sim.set_reaction_info("r33", r33)
sim.set_reaction_info("r34", r34)
sim.set_reaction_info("r35", r35)
sim.set_reaction_info("r36", r36)
sim.set_reaction_info("r37", r37)
sim.set_reaction_info("r38", r38)
sim.set_reaction_info("r39f", r39f)
sim.set_reaction_info("r39r", r39r)
sim.set_reaction_info("r40", r40)
sim.set_reaction_info("r41", r41)
sim.set_reaction_info("r42", r42)
sim.set_reaction_info("r43", r43)
sim.set_reaction_info("r44", r44)
# ----------------- Unaged Site Densities -----------
sim.set_site_density("S1","Unaged",0.052619016)
sim.set_site_density("S2","Unaged",0.023125746)
sim.set_site_density("S3a","Unaged",0.01632)
sim.set_site_density("S3b","Unaged",0.003233)
sim.set_site_density("S3c","Unaged",0.006699)
sim.set_isothermal_temp("Unaged","300C",300+273.15)
# Build the constraints then discretize
sim.build_constraints()
sim.discretize_model(method=DiscretizationMethod.FiniteDifference,
tstep=137,elems=5,colpoints=2)
# Initial conditions and Boundary Conditions should be set AFTER discretization
# ---------------- Unaged ICs ------------------
sim.set_const_IC("O2","Unaged","300C",0.002126764)
sim.set_const_IC("H2O","Unaged","300C",0.001074836)
sim.set_const_IC("NH3","Unaged","300C",0)
sim.set_const_IC("NO","Unaged","300C",0)
sim.set_const_IC("NO2","Unaged","300C",0)
sim.set_const_IC("N2O","Unaged","300C",0)
sim.set_const_IC("N2","Unaged","300C",0.0184)
sim.set_const_IC("q1","Unaged","300C",0)
sim.set_const_IC("q2a","Unaged","300C",0)
sim.set_const_IC("q2b","Unaged","300C",0)
sim.set_const_IC("q3a","Unaged","300C",0)
sim.set_const_IC("q3b","Unaged","300C",0)
sim.set_const_IC("q3c","Unaged","300C",0)
sim.set_const_IC("q4a","Unaged","300C",0)
sim.set_const_IC("q4b","Unaged","300C",0)
# ---------------- Unaged BCs ------------------
sim.set_time_dependent_BC("O2","Unaged","300C",
time_value_pairs=[(2.258,4.253E-5),
(20.925,0.002126764)],
initial_value=0.002126764)
sim.set_time_dependent_BC("H2O","Unaged","300C",
time_value_pairs=[(4.25,0.001056024),
(21.758,0.001044021)],
initial_value=0.001074836)
sim.set_time_dependent_BC("NH3","Unaged","300C",
time_value_pairs=[(2.258,6.33114E-6),
(37.591,0),
(49.758,6.33114E-6),
(76.925,0),
(99.258,6.33114E-6),
(120.258,0)],
initial_value=0)
sim.set_time_dependent_BC("NO","Unaged","300C",
time_value_pairs=[(37.591, 6.33114E-6),
(86.758,3.1426E-6),
(129.425,6.33114E-6)],
initial_value=0)
sim.set_time_dependent_BC("NO2","Unaged","300C",
time_value_pairs=[(86.758,3.1426E-6),
(129.425,0)],
initial_value=0)
sim.set_const_BC("N2O","Unaged","300C",0)
sim.set_const_BC("N2","Unaged","300C",0.0184)
# Fix the kinetics to only run a simulation
sim.fix_reaction("r1")
sim.fix_reaction("r2a")
sim.fix_reaction("r2b")
sim.fix_reaction("r3a")
sim.fix_reaction("r3b")
sim.fix_reaction("r3c")
sim.fix_reaction("r4a")
sim.fix_reaction("r4b")
# Fix all reactions for simulation mode only
sim.fix_all_reactions()
sim.unfix_reaction("r13a")
sim.unfix_reaction("r14a")
sim.unfix_reaction("r29")
sim.unfix_reaction("r30")
sim.unfix_reaction("r37")
sim.unfix_reaction("r38")
sim.unfix_reaction("r1")
sim.unfix_reaction("r2a")
sim.unfix_reaction("r2b")
sim.unfix_reaction("r3a")
sim.unfix_reaction("r3b")
sim.unfix_reaction("r3c")
sim.unfix_reaction("r4a")
sim.unfix_reaction("r4b")
'''
sim.set_reaction_param_bounds("r1","dH",factor=0)
sim.set_reaction_param_bounds("r1","dS",factor=0)
sim.set_reaction_param_bounds("r2a","dH",factor=0)
sim.set_reaction_param_bounds("r2a","dS",factor=0)
sim.set_reaction_param_bounds("r2b","dH",factor=0)
sim.set_reaction_param_bounds("r2b","dS",factor=0)
sim.set_reaction_param_bounds("r3a","dH",factor=0)
sim.set_reaction_param_bounds("r3a","dS",factor=0)
sim.set_reaction_param_bounds("r3b","dH",factor=0)
sim.set_reaction_param_bounds("r3b","dS",factor=0)
sim.set_reaction_param_bounds("r3c","dH",factor=0)
sim.set_reaction_param_bounds("r3c","dS",factor=0)
sim.set_reaction_param_bounds("r4a","dH",factor=0)
sim.set_reaction_param_bounds("r4a","dS",factor=0)
sim.set_reaction_param_bounds("r4b","dH",factor=0)
sim.set_reaction_param_bounds("r4b","dS",factor=0)
'''
#sim.set_reaction_param_bounds("r13a","A",factor=5)
#sim.set_reaction_param_bounds("r14a","A",factor=5)
#sim.set_reaction_param_bounds("r29","A",factor=5)
#sim.set_reaction_param_bounds("r30","A",factor=5)
#sim.set_reaction_param_bounds("r37","A",factor=5)
#sim.set_reaction_param_bounds("r38","A",factor=5)
sim.initialize_auto_scaling()
sim.initialize_simulator()
sim.finalize_auto_scaling()
sim.run_solver()
sim.print_results_of_breakthrough(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"Unaged", "300C", file_name="Unaged_SCR_300C_breakthrough.txt")
sim.print_results_of_location(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"Unaged", "300C", 0, file_name="Unaged_SCR_300C_bypass.txt")
sim.print_results_of_integral_average(["q1","q2a","q2b","q3a","q3b","q3c"],
"Unaged", "300C", file_name="Unaged_SCR_300C_average_ads.txt")
sim.print_kinetic_parameter_info(file_name="300C_opt_params.txt")
sim.save_model_state(file_name="300C_model.json")
|
the-stack_0_6387 | """
Train the ESIM model on the preprocessed SNLI dataset.
"""
# Aurelien Coet, 2018.
from utils.utils_top_transformer import train, validate
from vaa.droped import TransformerESIM as ESIM
# from vaa.model_esim import ESIM
from vaa.model_transformer_top import TOP
# from vaa.model_bert_transformer import ESIM
import torch.nn as nn
import matplotlib.pyplot as plt
import os
import sys
import argparse
import json
import numpy as np
import pickle
import torch
import matplotlib
import itertools
matplotlib.use('Agg')
def transform_batch_data(data, batch_size=64, shuffle=True):
data_batch = dict()
data_batch['premises'] = dict()
data_batch['hypotheses'] = dict()
data_batch['labels'] = dict()
index = np.arange(len(data['labels']))
if shuffle:
np.random.shuffle(index)
idx = -1
for i in range(len(index)):
if i % batch_size == 0:
idx += 1
data_batch['premises'][idx] = []
data_batch['hypotheses'][idx] = []
data_batch['labels'][idx] = []
data_batch['premises'][idx].append(data['premises'][index[i]])
data_batch['hypotheses'][idx].append(data['hypotheses'][index[i]])
data_batch['labels'][idx].append(int(data['labels'][index[i]]))
return data_batch
def main(train_file,
valid_file,
test_file,
target_dir,
embedding_size=512,
hidden_size=512,
dropout=0.5,
num_classes=3,
epochs=64,
batch_size=32,
lr=0.0004,
patience=5,
max_grad_norm=10.0,
checkpoint_model0=None,
checkpoint_model1=None,
finetuning=False):
"""
Train the ESIM model on the Quora dataset.
Args:
train_file: A path to some preprocessed data that must be used
to train the model.
valid_file: A path to some preprocessed data that must be used
to validate the model.
embeddings_file: A path to some preprocessed word embeddings that
must be used to initialise the model.
target_dir: The path to a directory where the trained model must
be saved.
hidden_size: The size of the hidden layers in the model. Defaults
to 300.
dropout: The dropout rate to use in the model. Defaults to 0.5.
num_classes: The number of classes in the output of the model.
Defaults to 3.
epochs: The maximum number of epochs for training. Defaults to 64.
batch_size: The size of the batches for training. Defaults to 32.
lr: The learning rate for the optimizer. Defaults to 0.0004.
patience: The patience to use for early stopping. Defaults to 5.
checkpoint: A checkpoint from which to continue training. If None,
training starts from scratch. Defaults to None.
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(20 * "=", " Preparing for training ", 20 * "=")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# -------------------- Data loading ------------------- #
print("\t* Loading training data...")
with open(train_file, "rb") as pkl:
train_data = pickle.load(pkl)
print("\t* Loading validation data...")
with open(valid_file, "rb") as pkl:
valid_data = pickle.load(pkl)
valid_dataloader = transform_batch_data(valid_data, batch_size=batch_size, shuffle=False)
print("\t* Loading test data...")
with open(test_file, "rb") as pkl:
test_data = pickle.load(pkl)
test_dataloader = transform_batch_data(test_data, batch_size=batch_size, shuffle=False)
# -------------------- Model definition ------------------- #
print("\t* Building model...")
model = []
model1 = ESIM(embedding_size,
hidden_size,
dropout=0,
num_classes=num_classes,
device=device).to(device)
model2 = TOP(embedding_size,
hidden_size,
dropout=dropout,
num_classes=num_classes,
device=device).to(device)
model.append(model1)
model.append(model2)
# -------------------- Preparation for training ------------------- #
criterion = nn.CrossEntropyLoss()
if finetuning:
optimizer = torch.optim.Adam(itertools.chain(model[0].parameters(), model[1].parameters()), lr=lr)
else:
optimizer = torch.optim.Adam(model[1].parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode="max",
factor=0.5,
patience=0)
best_score = 0.0
start_epoch = 1
# Data for loss curves plot.
epochs_count = []
train_losses = []
valid_losses = []
# Continuing training from a checkpoint if one was given as argument.
if checkpoint_model0:
checkpoint = torch.load(checkpoint_model0)
start_epoch = checkpoint["epoch"] + 1
best_score = checkpoint["best_score"]
print("\t* Training will continue on existing model from epoch {}..."
.format(start_epoch))
model[0].load_state_dict(checkpoint["model"])
# optimizer.load_state_dict(checkpoint["optimizer"])
# epochs_count = checkpoint["epochs_count"]
# train_losses = checkpoint["train_losses"]
# valid_losses = checkpoint["valid_losses"]
if checkpoint_model1:
checkpoint = torch.load(checkpoint_model1)
start_epoch = checkpoint["epoch"] + 1
best_score = checkpoint["best_score"]
print("\t* Training will continue on existing model from epoch {}..."
.format(start_epoch))
model[1].load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
epochs_count = checkpoint["epochs_count"]
train_losses = checkpoint["train_losses"]
valid_losses = checkpoint["valid_losses"]
# Compute loss and accuracy before starting (or resuming) training.
_, valid_loss, valid_accuracy = validate(model,
valid_dataloader,
criterion)
print("\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%"
.format(valid_loss, (valid_accuracy*100)))
_, test_loss, test_accuracy = validate(model,
test_dataloader,
criterion)
print("\t* test loss before training: {:.4f}, accuracy: {:.4f}%"
.format(test_loss, (test_accuracy*100)))
# -------------------- Training epochs ------------------- #
print("\n",
20 * "=",
"Training ESIM model on device: {}".format(device),
20 * "=")
patience_counter = 0
for epoch in range(start_epoch, epochs+1):
train_dataloader = transform_batch_data(train_data, batch_size=batch_size, shuffle=True)
epochs_count.append(epoch)
print("* Training epoch {}:".format(epoch))
epoch_time, epoch_loss, epoch_accuracy = train(model,
train_dataloader,
optimizer,
criterion,
epoch,
max_grad_norm)
train_losses.append(epoch_loss)
print("-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%"
.format(epoch_time, epoch_loss, (epoch_accuracy*100)))
print("* Validation for epoch {}:".format(epoch))
epoch_time, epoch_loss, epoch_accuracy = validate(model,
valid_dataloader,
criterion)
valid_losses.append(epoch_loss)
print("-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n"
.format(epoch_time, epoch_loss, (epoch_accuracy*100)))
print("* Test for epoch {}:".format(epoch))
epoch_time, epoch_loss, epoch_accuracy = validate(model,
test_dataloader,
criterion)
print("-> Test. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n"
.format(epoch_time, epoch_loss, (epoch_accuracy*100)))
sys.stdout.flush() #刷新输出
# Update the optimizer's learning rate with the scheduler.
scheduler.step(epoch_accuracy)
# Early stopping on validation accuracy.
if epoch_accuracy < best_score:
patience_counter += 1
else:
best_score = epoch_accuracy
patience_counter = 0
# Save the best model. The optimizer is not saved to avoid having
# a checkpoint file that is too heavy to be shared. To resume
# training from the best model, use the 'esim_*.pth.tar'
# checkpoints instead.
torch.save({"epoch": epoch,
"model": model[0].state_dict(),
"best_score": best_score,
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "best_model0.pth.tar"))
torch.save({"epoch": epoch,
"model": model[1].state_dict(),
"best_score": best_score,
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "best_model1.pth.tar"))
# Save the model at each epoch.
torch.save({"epoch": epoch,
"model": model[0].state_dict(),
"best_score": best_score,
"optimizer": optimizer.state_dict(),
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "esim_model0{}.pth.tar".format(epoch)))
torch.save({"epoch": epoch,
"model": model[1].state_dict(),
"best_score": best_score,
"optimizer": optimizer.state_dict(),
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "esim_model1{}.pth.tar".format(epoch)))
if patience_counter >= patience:
print("-> Early stopping: patience limit reached, stopping...")
break
# Plotting of the loss curves for the train and validation sets.
fig = plt.figure()
plt.plot(epochs_count, train_losses, "-r")
plt.plot(epochs_count, valid_losses, "-b")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend(["Training loss", "Validation loss"])
plt.title("Cross entropy loss")
fig.savefig('quora_loss.png')
if __name__ == "__main__":
default_config = "../../config/training/quora_training_transformer.json"
parser = argparse.ArgumentParser(
description="Train the ESIM model on quora")
parser.add_argument("--config",
default=default_config,
help="Path to a json configuration file")
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir + '/scripts/training'
parser.add_argument("--checkpoint_model0",
default=os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/transformer/' +"best.pth.tar",
help="Path to a checkpoint file to resume training")
parser.add_argument("--checkpoint_model1",
default=None,#os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/bert/' +"esim_model1{}.pth.tar".format(2),
help="Path to a checkpoint file to resume training")
args = parser.parse_args()
if args.config == default_config:
config_path = os.path.join(script_dir, args.config)
else:
config_path = args.config
with open(os.path.normpath(config_path), 'r') as config_file:
config = json.load(config_file)
main(os.path.normpath(os.path.join(script_dir, config["train_data"])),
os.path.normpath(os.path.join(script_dir, config["valid_data"])),
os.path.normpath(os.path.join(script_dir, config["test_data"])),
os.path.normpath(os.path.join(script_dir, config["target_dir"])),
config["embedding_size"],
config["hidden_size"],
config["dropout"],
config["num_classes"],
config["epochs"],
config["batch_size"],
config["lr"],
config["patience"],
config["max_gradient_norm"],
args.checkpoint_model0,
args.checkpoint_model1,
finetuning=False)
|
the-stack_0_6390 | #!/usr/bin/python3
import pandas as pd
from os.path import join as oj
import os
def load_google_mobility(data_dir='.'):
''' Load in Google Community Mobility Reports
Parameters
----------
data_dir : str; path to the data directory containing 'google_mobility.csv'
Returns
-------
data frame
'''
# download directly from source to get daily updates
cur_dir = os.getcwd()
os.chdir(data_dir)
os.system("wget https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv -O google_mobility.csv")
raw = pd.read_csv('google_mobility.csv')
os.chdir(cur_dir)
return raw
if __name__ == '__main__':
raw = load_google_mobility()
print('loaded google_mobility successfully.')
|
the-stack_0_6391 | # Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import torch
from condensa.util import EventTimer
class DC(object):
"""Condensa direct compression optimizer."""
def compress(self,
w,
pi,
delta,
trainloader,
testloader,
valloader,
criterion):
"""
Performs model compression using direct optimization.
:param w: PyTorch model.
:type w: `torch.nn.Module`
:param pi: Compression function.
:param delta: Decompression function.
:param trainloader: Training dataloader.
:param testloader: Test dataloader.
:param valloader: Validation dataloader.
:param criterion: Loss criterion.
"""
statistics = dict()
timer_dc = EventTimer()
with torch.no_grad():
compressed = deepcopy(w)
pi(compressed)
statistics['total_elapsed'] = timer_dc.elapsed_seconds
return compressed, statistics
|
the-stack_0_6394 | from .exception import *
from .bitoperations import *
from copy import deepcopy
# Finding range sum [i,j] in a flat array
class FenwickTree():
"""Fenwick Tree is Binary Indexed tree.
"""
def __init__(self, values):
self.size = len(values)
self.values = values
self.tree = [0]
self.tree.extend(deepcopy(values))
#one-based array
for i in range(1,self.size):
parent = i + least_significan_bit(i)
if parent <= self.size:
self.tree[parent] += self.tree[i]
def prefix_sum(self, i):
sum = 0
while i > 0 :
sum += self.tree[i]
i &= ~least_significan_bit(i)
print(sum)
return sum
def sum(self, i, j):
if j < i:
raise ValueError("Make sure j >= i")
return self.prefix_sum(j) - self.prefix_sum(i-1)
def point_update(self, i, x):
while i <= self.size:
self.tree[i] = self.tree[i] + x
i += least_significan_bit(i)
return self.tree
def get(self, i):
return self.sum(i,i)
def set(self, i, val):
return self.point_update(i , (val - self.sum(i,i))) |
the-stack_0_6395 | #!/usr/bin/env python
# Simple checker for whether valgrind found errors
import sys
import xml.etree.ElementTree as ElementTree
e = ElementTree.parse(sys.argv[1])
states = [x.find('state').text for x in e.findall('status')]
errors = [x.find('kind').text for x in e.findall('error')]
if "RUNNING" not in states or "FINISHED" not in states:
raise Exception("Valgrind didn't run successfully, states seen: %s" % str(states))
if errors:
raise Exception("Valgrind found some errors: %s" % str(errors))
sys.exit(0)
|
the-stack_0_6397 | # coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.fastmath.ops."""
import collections
from absl.testing import parameterized
import gin
import jax.numpy as jnp
import numpy as onp
from tensorflow import test
from trax import fastmath
_TestNamedtuple = collections.namedtuple('_TestNamedtuple', ['x'])
class BackendTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
gin.clear_config()
def override_gin(self, bindings):
gin.parse_config_files_and_bindings(None, bindings)
def test_backend_imports_correctly(self):
backend = fastmath.backend()
self.assertEqual(jnp, backend['np'])
self.assertNotEqual(onp, backend['np'])
self.override_gin("backend.name = 'numpy'")
backend = fastmath.backend()
self.assertNotEqual(jnp, backend['np'])
self.assertEqual(onp, backend['np'])
def test_backend_can_be_set(self):
self.assertEqual(fastmath.backend_name(), 'jax')
fastmath.set_backend('tensorflow-numpy')
self.assertEqual(fastmath.backend_name(), 'tensorflow-numpy')
fastmath.set_backend(None)
self.assertEqual(fastmath.backend_name(), 'jax')
def test_numpy_backend_delegation(self):
# Assert that we are getting JAX's numpy backend.
backend = fastmath.backend()
numpy = fastmath.numpy
self.assertEqual(jnp, backend['np'])
# Assert that `numpy` calls the appropriate gin configured functions and
# properties.
self.assertTrue(numpy.isinf(numpy.inf))
self.assertEqual(jnp.isinf, numpy.isinf)
self.assertEqual(jnp.inf, numpy.inf)
# Assert that we will now get the pure numpy backend.
self.override_gin("backend.name = 'numpy'")
backend = fastmath.backend()
numpy = fastmath.numpy
self.assertEqual(onp, backend['np'])
# Assert that `numpy` calls the appropriate gin configured functions and
# properties.
self.assertTrue(numpy.isinf(numpy.inf))
self.assertEqual(onp.isinf, numpy.isinf)
self.assertEqual(onp.inf, numpy.inf)
@parameterized.named_parameters(
('_' + b.value, b) for b in (fastmath.Backend.JAX, fastmath.Backend.TFNP))
def test_fori_loop(self, backend):
with fastmath.use_backend(backend):
res = fastmath.fori_loop(2, 5, lambda i, x: x + i, 1)
self.assertEqual(res, 1 + 2 + 3 + 4)
def test_nested_map(self):
inp = {'a': ([0, 1], 2), 'b': _TestNamedtuple(3)}
out = {'a': ([1, 2], 3), 'b': _TestNamedtuple(4)}
self.assertEqual(fastmath.nested_map(lambda x: x + 1, inp), out)
def test_nested_stack(self):
inp = [
{'a': ([0, 1], 2), 'b': _TestNamedtuple(3)},
{'a': ([1, 2], 3), 'b': _TestNamedtuple(4)},
]
out = {'a': ([[0, 1], [1, 2]], [2, 3]), 'b': _TestNamedtuple([3, 4])}
onp.testing.assert_equal(fastmath.nested_stack(inp), out)
def test_names_match(self):
# Names match up.
for backend_enum, backend_obj in fastmath.ops._backend_dict.items():
self.assertEqual(backend_enum.value, backend_obj['name'])
# Every backend appears in the dictionary.
for backend_enum in fastmath.ops.Backend:
self.assertIn(backend_enum, fastmath.ops._backend_dict)
def test_use_backend_str(self):
with fastmath.use_backend('tensorflow-numpy'):
self.assertEqual(fastmath.backend_name(), 'tensorflow-numpy')
def test_use_backend_enum(self):
with fastmath.use_backend(fastmath.Backend.NUMPY):
self.assertEqual(fastmath.backend_name(), 'numpy')
if __name__ == '__main__':
test.main()
|
the-stack_0_6398 | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0001_squashed_0012_auto_20170921_1332'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('slug', models.SlugField()),
('title', models.CharField(max_length=255)),
('text', models.TextField()),
('url', models.URLField(max_length=255)),
('sponsor', models.ForeignKey(to='sponsors.Sponsor', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
the-stack_0_6399 | #!/usr/bin/env python3
header = '''
file {
name="/opt/rtcds/userapps/release/vis/common/medm/steppingmotor/OVERVIEW/STANDALONE_STEPPER_OVERVIEW.adl"
version=030107
}
display {
object {
x=1996
y=56
width=512
height=400
}
clr=14
bclr=11
cmap=""
gridSpacing=5
gridOn=0
snapToGrid=0
}
"color map" {
ncolors=65
colors {
ffffff,
ececec,
dadada,
c8c8c8,
bbbbbb,
aeaeae,
9e9e9e,
919191,
858585,
787878,
696969,
5a5a5a,
464646,
2d2d2d,
000000,
00d800,
1ebb00,
339900,
2d7f00,
216c00,
fd0000,
de1309,
be190b,
a01207,
820400,
5893ff,
597ee1,
4b6ec7,
3a5eab,
27548d,
fbf34a,
f9da3c,
eeb62b,
e19015,
cd6100,
ffb0ff,
d67fe2,
ae4ebc,
8b1a96,
610a75,
a4aaff,
8793e2,
6a73c1,
4d52a4,
343386,
c7bb6d,
b79d5c,
a47e3c,
7d5627,
58340f,
99ffff,
73dfff,
4ea5f9,
2a63e4,
0a00b8,
ebf1b5,
d4db9d,
bbc187,
a6a462,
8b8239,
73ff6b,
52da3b,
3cb420,
289315,
1a7309,
}
}
'''
channel_dict = {
'TEST_P0_GAS': 0,
'TEST_P1_GAS': 1,
'TEST_P2_GAS': 2,
'TEST_P3_GAS': 3,
'TEST_P4_GAS': 4,
'TEST_P5_GAS': 5
}
#common = '/opt/rtcds/userapps/release/vis/common'
common = './'
def top(x,y):
width = 300
height = 100
txt = '''
composite {{
object {{
x={x}
y={y}
width=300
height=30
}}
"composite name"=""
"composite file"="./OVERVIEW_TOP.adl"
}}
'''.format(common=common,x=x,y=y)
return txt,width,height
def mini(x,y,system,stage,dof,damp,bio,stepname,stepid,motor,label,mode='ERR'):
width = 480
height = 25
txt = '''
composite {{
object {{
x={x}
y={y}
width=550
height=30
}}
"composite name"=""
"composite file"="./OVERVIEW_MINI.adl;IFO=$(IFO),ifo=$(ifo),SYSTEM={system},STAGE={stage},DOF={dof},DAMP={damp},BIO={bio},STEPNAME={stepname},STEPID={stepid},MOTOR={motor},LABEL={label}"
}}
'''.format(common=common,x=x,y=y,system=system,stage=stage,dof=dof,damp=damp,bio=bio,stepname=stepname,stepid=stepid,label=label,motor=motor)
return txt,width,height
def head(x,y,system,mtype):
width = 300
height = 55
txt = '''
composite {{
object {{
x={x}
y={y}
width=300
height=55
}}
"composite name"=""
"composite file"="./HEAD_MINI.adl;IFO=$(IFO),ifo=$(ifo),SYSTEM={system},TYPE={mtype}"
}}
'''.format(common=common,x=x,y=y,system=system,mtype=mtype)
return txt,width,height
def foot(x,y,stepperid):
width = 300
height = 50
txt = '''
composite {{
object {{
x={x}
y={y}
width=300
height=30
}}
"composite name"=""
"composite file"="./FOOT_MINI.adl;IFO=$(IFO),ifo=$(ifo),STEPPERID={stepperid}"
}}
'''.format(common=common,x=x,y=y,stepperid=stepperid)
return txt,width,height
def mtype_is(system):
if 'TM' in system:
mtype = 'TM'
elif 'BS' == system:
mtype = 'BS'
elif 'SR' in system:
mtype = 'SR'
else:
mtype = None
return mtype
def damp_is(system,mode='ERR'):
if system in ['BS','SR2','SR3','SRM']:
damp = 'DCCTRL'
else:
damp = 'DAMP'
return damp
def bio_is(system):
if system in ['BS','SR2','SR3','SRM']:
bio = 'BIO'
else:
bio = 'BO'
return bio
def stepname_is(dof):
if dof == 'GAS':
return 'STEP_GAS'
else:
return 'STEP_IP'
def stepperid_is(system):
if system == 'PRM' or system == 'PR3':
return 'PR0'
else:
return system
def stepid_is(system,stage):
if stage == 'IP':
return system+'_IP'
else:
return stepperid_is(system)+'_GAS'
def motor_is(system,stage,dof):
if stage == 'IP':
return dof
else:
return channel_dict[system+'_'+stage+'_'+dof]
def label_is(stage,dof):
if stage == 'IP':
if dof == 'F0Y':
return 'F0_Y'
if dof == 'A':
return stage + '_H1'
if dof == 'B':
return stage + '_H2'
if dof == 'C':
return stage + '_H3'
return stage + '_' + dof
if __name__=='__main__':
systems = ['TEST'] # TEST
# ERROR mode
# TypeA
# K1:VIS-ITMY_IP_DAMP_L_INMON
# K1:VIS-ITMY_F0_DAMP_GAS_INMON
# TypeB
# K1:VIS-BS_IP_DCCTRL_L_INMON
# K1:VIS-BS_F0_DCCTRL_GAS_INMON
# TypeBp
# K1:VIS-PR2_BF_DAMP_GAS_INMON
#
# FB mode
# TypeA
# K1:VIS-ETMY_IP_SUMOUT_L_OUTMON
# K1:VIS-ETMY_F0_SUMOUT_GAS_OUTMON
# TypeB
# K1:VIS-BS_IP_DCCTRL_L_OUTMON
# K1:VIS-BS_F0_COILOUTF_GAS_OUTMON
# TypeBp
# K1:VIS-PR2_SF_DAMP_GAS_OUTMON
stages = {'TEST':['P0','P1','P2','P3','P4','P5']}
dofs = {'P0':['GAS'],
'P1':['GAS'],
'P2':['GAS'],
'P3':['GAS'],
'P4':['GAS'],
'P5':['GAS'],}
mode = 'ERR'
height = 10
width = 0
_h0 = height
_w0 = width
contents = header
_h = 0
_w = 0
with open('./STANDALONE_STEPPER_OVERVIEW.adl','w') as f:
txt,w0,h0 = top(width,height)
contents += txt
height += h0
_h0 = height
for num,system in enumerate(systems):
print('{0}'.format(system))
mtype = mtype_is(system)
stepperid = stepperid_is(system)
txt,w0,h0 = head(width,height,system,mtype)
contents += txt
_h = h0
for stage in stages[system]:
print(' - ',stage,dofs[stage])
for dof in dofs[stage]:
damp = damp_is(system)
bio = bio_is(system)
stepname = stepname_is(dof)
stepid = stepid_is(system,stage)
motor = motor_is(system,stage,dof)
label = label_is(stage, dof)
txt,w1,h1 = mini(width,height+_h,system,stage,dof,damp,bio,stepname,stepid,motor,label,mode=mode)
_h += h1
contents += txt
txt,w2,h2 = foot(width,height+_h,stepperid)
contents += txt
_h += h2
_w = max(w0,w1,w2) + 2
q,mod = divmod(num+1,4)
height = q*320 + _h0
width = mod*_w + _w0
f.write(contents)
|
the-stack_0_6400 | # Copyright (c) 2016 Ryan Rossiter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from tempest import exceptions
def read_role_sets_yaml(path):
# Reads in the role sets to use
try:
with open(path, 'r') as yaml_file:
role_sets = yaml.safe_load(yaml_file)
except IOError:
raise exceptions.InvalidConfiguration(
('The path for the role sets file: %s '
'could not be found.') % path)
return role_sets
class RoleSetProvider(object):
"""A class used to provide the role sets to be used."""
def __init__(self, role_sets_file):
super(RoleSetProvider, self).__init__()
role_sets = read_role_sets_yaml(role_sets_file)
self.role_sets = []
for name, roles in role_sets.items():
self.role_sets.append(RoleSet(name, roles))
self.role_sets = [RoleSet(n, r) for n, r in role_sets.items()]
def get_role_sets(self):
"""Gets the role sets to be used."""
return self.role_sets
class RoleSet(object):
"""An object used to hold the group of roles under a classificiation.
This associates a name to a group of OpenStack-defined roles. These
users are used to map to successes or failures in the test listing
file.
"""
def __init__(self, set_name, roles):
self._name = set_name
self._roles = roles
@property
def name(self):
return self._name
@property
def roles(self):
return self._roles
|
the-stack_0_6403 | from viadot.flows import DuckDBTransform
from viadot.tasks import DuckDBQuery, DuckDBToDF
import pytest
import pandas as pd
from unittest import mock
from viadot.sources import DuckDB
import os
TABLE = "test_table"
SCHEMA = "test_schema"
TABLE_MULTIPLE_PARQUETS = "test_multiple_parquets"
DATABASE_PATH = "test_db_123.duckdb"
@pytest.fixture(scope="session")
def duckdb():
duckdb = DuckDB(credentials=dict(database=DATABASE_PATH))
yield duckdb
os.remove(DATABASE_PATH)
def test_create_table_from_parquet(duckdb, TEST_PARQUET_FILE_PATH):
duckdb.create_table_from_parquet(
schema=SCHEMA, table=TABLE, path=TEST_PARQUET_FILE_PATH
)
def test_duckdb_query():
db_query = DuckDBQuery(credentials=dict(database=DATABASE_PATH))
result = db_query.run(f"select * from {SCHEMA}.{TABLE}")
assert type(result) == list
assert len(result) > 1
def test_duckdb_to_df():
instance = DuckDBToDF(
schema=SCHEMA, table=TABLE, credentials=dict(database=DATABASE_PATH)
)
test_df = instance.run()
assert test_df.shape > (1, 1)
assert type(test_df) == pd.core.frame.DataFrame
def test_duckdb_transform_init():
instance = DuckDBTransform("test_duckdb_transform", query="select * from test")
assert instance
def test_duckdb_transform_flow_run():
instance = DuckDBTransform(
"test_duckdb_transform",
query=f"select * from {SCHEMA}.{TABLE}",
credentials=dict(database=DATABASE_PATH),
)
result = instance.run()
assert result.is_successful()
|
the-stack_0_6404 | from unittest import TestCase, mock
from unittest.mock import MagicMock
from sklearn.ensemble import RandomForestClassifier
from source.analysis.classification.classifier_service import ClassifierService
from source.analysis.setup.data_split import DataSplit
from source.analysis.performance.raw_performance import RawPerformance
import numpy as np
from test.test_helper import TestHelper
class TestClassifierService(TestCase):
@mock.patch('source.analysis.classification.classifier_service.Pool')
@mock.patch('source.analysis.classification.classifier_service.cpu_count')
@mock.patch('source.analysis.classification.classifier_service.partial')
def test_runs_training_and_testing_in_parallel(self, mock_partial, mock_cpu_count, mock_pool_constructor):
expected_partial = "I am a partial"
mock_partial.return_value = expected_partial
mock_pool = MagicMock()
mock_pool_constructor.return_value = mock_pool
data_splits = [DataSplit(training_set=["subjectA", "subjectB", "subjectC"], testing_set=["subjectD"]),
DataSplit(training_set=["subjectA", "subjectB", "subjectD"], testing_set=["subjectC"])]
classifier = RandomForestClassifier()
subject_dictionary = {}
feature_set = {}
mock_pool.map.return_value = expected_pool_return = [3, 4]
expected_number_of_cpus = 32
mock_cpu_count.return_value = expected_number_of_cpus
results = ClassifierService.run_sw(data_splits, classifier, subject_dictionary, feature_set)
mock_partial.assert_called_once_with(ClassifierService.run_single_data_split_sw,
attributed_classifier=classifier,
subject_dictionary=subject_dictionary, feature_set=feature_set)
mock_pool_constructor.assert_called_once_with(expected_number_of_cpus)
mock_pool.map.assert_called_once_with(expected_partial, data_splits)
self.assertEqual(expected_pool_return, results)
@mock.patch.object(ClassifierService, 'get_class_weights')
@mock.patch('source.analysis.classification.classifier_service.ParameterSearch')
@mock.patch('source.analysis.classification.classifier_service.ClassifierInputBuilder.get_sleep_wake_inputs')
def test_run_sleep_wake(self, mock_get_sleep_wake_inputs, mock_parameter_search, mock_class_weights):
mock_classifier = MagicMock()
mock_classifier.classifier.predict_proba.return_value = class_probabilities = np.array([[0.1, 0.9], [0, 1]])
training_x = np.array([1, 2, 3, 4])
training_y = np.array([0, 0, 0, 0])
testing_x = np.array([5, 6, 7, 8])
testing_y = np.array([0, 1, 0, 1])
mock_get_sleep_wake_inputs.side_effect = [(training_x, training_y), (testing_x, testing_y)]
mock_parameter_search.run_search.return_value = {}
mock_class_weights.return_value = {0: 0.2, 1: 0.8}
subject_dictionary = {}
feature_set = {}
data_split = DataSplit(training_set=["subjectA", "subjectB", "subjectC"],
testing_set=["subject1"])
raw_performance = ClassifierService.run_single_data_split_sw(data_split, mock_classifier, subject_dictionary,
feature_set)
self.assertListEqual(testing_y.tolist(), raw_performance.true_labels.tolist())
self.assertListEqual(class_probabilities.tolist(), raw_performance.class_probabilities.tolist())
mock_class_weights.assert_called_once_with(training_y)
mock_parameter_search.run_search.assert_called_once_with(mock_classifier, training_x, training_y,
scoring='roc_auc')
mock_classifier.classifier.fit.assert_called_once_with(training_x, training_y)
mock_classifier.classifier.predict_proba.assert_called_once_with(testing_x)
|
the-stack_0_6405 | import contextlib
import time
from math import ceil, log
from mock import mock, MagicMock, Mock
from pyqryptonight.pyqryptonight import StringToUInt256
from qrl.core import config
from qrl.core.Block import Block
from qrl.core.ChainManager import ChainManager
from qrl.core.DifficultyTracker import DifficultyTracker
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.PoWValidator import PoWValidator
from qrl.core.State import State
from qrl.core.Transaction import SlaveTransaction
from qrl.core.qrlnode import QRLNode
from tests.misc.helper import get_alice_xmss, get_bob_xmss, set_qrl_dir
class MockedBlockchain(object):
MAXNUMBLOCKS = 1000
def __init__(self, qrlnode, time_mock, ntp_mock):
required_height = ceil(log(self.MAXNUMBLOCKS, 2))
required_height = int(required_height + required_height % 2)
self.qrlnode = qrlnode
self.time_mock = time_mock
self.ntp_mock = ntp_mock
self.alice_xmss = get_alice_xmss(xmss_height=required_height)
self.bob_xmss = get_bob_xmss()
def create_block(self, prev_hash, mining_address=None):
if not mining_address:
mining_address = self.alice_xmss.address
transactions = []
block_prev = self.qrlnode.get_block_from_hash(prev_hash)
block_idx = block_prev.block_number + 1
if block_idx == 1:
slave_tx = SlaveTransaction.create(slave_pks=[self.bob_xmss.pk],
access_types=[0],
fee=0,
xmss_pk=self.alice_xmss.pk)
slave_tx.sign(self.alice_xmss)
slave_tx._data.nonce = 1
transactions = [slave_tx]
self.time_mock.return_value = self.time_mock.return_value + 60
self.ntp_mock.return_value = self.ntp_mock.return_value + 60
block_new = Block.create(block_number=block_idx,
prevblock_headerhash=block_prev.headerhash,
transactions=transactions,
miner_address=mining_address)
while not PoWValidator().validate_mining_nonce(state=self.qrlnode._chain_manager.state,
blockheader=block_new.blockheader,
enable_logging=False):
block_new.set_nonces(block_new.mining_nonce + 1, 0)
return block_new
def add_block(self, block):
return self.qrlnode._chain_manager.add_block(block)
def add_new_block(self, mining_address=None):
block_prev = self.qrlnode.get_block_last()
block_new = self.create_block(prev_hash=block_prev.headerhash, mining_address=mining_address)
self.qrlnode._chain_manager.add_block(block_new)
@staticmethod
@contextlib.contextmanager
def create(num_blocks, mining_address=None):
start_time = time.time()
with mock.patch('qrl.core.misc.ntp.getTime') as ntp_mock, \
set_qrl_dir('no_data'), \
State() as state, \
mock.patch('time.time') as time_mock: # noqa
time_mock.return_value = start_time
ntp_mock.return_value = start_time
state.get_measurement = MagicMock(return_value=10000000)
genesis_difficulty = config.dev.genesis_difficulty
try:
config.dev.genesis_difficulty = 10
genesis_block = GenesisBlock()
chain_manager = ChainManager(state)
chain_manager.load(genesis_block)
chain_manager._difficulty_tracker = Mock()
dt = DifficultyTracker()
tmp_difficulty = StringToUInt256('2')
tmp_target = dt.get_target(tmp_difficulty)
chain_manager._difficulty_tracker.get = MagicMock(return_value=(tmp_difficulty, tmp_target))
qrlnode = QRLNode(state, mining_address=b'')
qrlnode.set_chain_manager(chain_manager)
mock_blockchain = MockedBlockchain(qrlnode, time_mock, ntp_mock)
for block_idx in range(1, num_blocks + 1):
mock_blockchain.add_new_block(mining_address)
yield mock_blockchain
finally:
config.dev.genesis_difficulty = genesis_difficulty
|
the-stack_0_6406 | """Unit tests for JWTAuthenticator"""
import datetime
from pathlib import Path
import pytest
import jwt
from karp.errors import ClientErrorCodes
from karp.domain.errors import AuthError
from karp.infrastructure.jwt.jwt_auth_service import JWTAuthenticator
from . import adapters
with open(Path(__file__).parent / ".." / "data/private_key.pem") as fp:
jwt_private_key = fp.read()
@pytest.fixture
def jwt_authenticator():
return JWTAuthenticator(
pubkey_path=Path("karp/tests/data/pubkey.pem"),
resource_uow=adapters.FakeResourceUnitOfWork(),
)
def test_authenticate_invalid_token(jwt_authenticator):
with pytest.raises(AuthError) as exc_info:
jwt_authenticator.authenticate("scheme", "invalid")
assert exc_info.value.code == ClientErrorCodes.AUTH_GENERAL_ERROR
def test_authenticate_expired_token(jwt_authenticator):
token = jwt.encode(
{"exp": datetime.datetime(2000, 1, 1)}, jwt_private_key, algorithm="RS256"
)
with pytest.raises(AuthError) as exc_info:
jwt_authenticator.authenticate("scheme", token)
assert exc_info.value.code == ClientErrorCodes.EXPIRED_JWT
|
the-stack_0_6407 |
from .engine import Engine
import pyglet
from pyglet import gl
from gem import vector
import ctypes as ct
import random
import math
class Rect(object):
def __init__(self, minVec, maxVec):
self.min = minVec
self.max = maxVec
def clone(self):
return Rect(self.min.clone(), self.max.clone())
def check_aabb(self, rect2):
return (self.max.x >= rect2.min.x and
rect2.max.x >= self.min.x and
self.max.y >= rect2.min.y and
rect2.max.y >= self.min.y)
class BoundingBoxMixin(object):
def __init__(self):
self.ctPoints = None
self.ctPointT = (gl.GLfloat * 16)
self.bbColor = (1.0, 1.0, 1.0)
def set_bb_color(self, r, g, b):
self.bbColor = (r, g, b)
def render_bounding_box(self):
gl.glLineWidth(1.0)
self.ctPoints = self.ctPointT(
self.rect.min.x, self.rect.min.y,
self.rect.max.x, self.rect.min.y,
self.rect.max.x, self.rect.min.y,
self.rect.max.x, self.rect.max.y,
self.rect.max.x, self.rect.max.y,
self.rect.min.x, self.rect.max.y,
self.rect.min.x, self.rect.max.y,
self.rect.min.x, self.rect.min.y,
)
point_ptr = ct.cast(self.ctPoints, ct.c_void_p)
gl.glColor3f(*self.bbColor)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointer(2, gl.GL_FLOAT, 0, point_ptr)
gl.glDrawArrays(gl.GL_LINES, 0, 8)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
class SelectionBox(BoundingBoxMixin):
def __init__(self):
super(SelectionBox, self).__init__()
self.rect = Rect(vector.Vector(2), vector.Vector(2))
def set_start(self, vec):
self.rect.min = vec.clone()
def set_end(self, vec):
self.rect.max = vec.clone()
def get_selected(self, objects):
selected = []
rect = self.rect.clone()
if self.rect.min.x > self.rect.max.x:
rect.min.x = self.rect.max.x
rect.max.x = self.rect.min.x
if self.rect.min.y > self.rect.max.y:
rect.min.y = self.rect.max.y
rect.max.y = self.rect.min.y
for obj in objects:
rec = obj.rect
if rect.check_aabb(rec):
selected.append(obj)
return selected
def render(self):
self.render_bounding_box()
class Unit(BoundingBoxMixin):
def __init__(self, imgPath, name):
super(Unit, self).__init__()
img = pyglet.image.load('data/player.png')
self.sprite = pyglet.sprite.Sprite(img)
self.position = vector.Vector(2)
self.rect = Rect(vector.Vector(2), vector.Vector(2))
self.width = self.sprite.width
self.height = self.sprite.height
self.size = vector.Vector(2, data=[self.width, self.height])
self.lenVelocity = vector.Vector(2, data=[random.random()*10, random.random()*10])
self.mass = 1.0
self.angVelocity = 0.0
self.angle = 0.0
self.momentOfInertia = (self.size.dot(self.size) * self.mass) / 12
self.torqe = vector.Vector(2)
self.set_bb_color(0.0, 0.0, 0.0)
self.update_rect()
def update_rect(self):
self.rect.min = self.position
self.rect.max.x = self.position.x + self.width
self.rect.max.y = self.position.y + self.height
def set_pos(self, vec):
self.position = vec.clone()
def update(self, dt):
self.sprite.x = self.position.x
self.sprite.y = self.position.y
self.sprite.rotation = math.degrees(self.angle)
self.update_rect()
def render(self):
self.sprite.draw()
class Particle(object):
def __init__(self, x,y):
pos = [x, y]
self.position = vector.Vector(2, data=pos)
self.velocity = vector.Vector(2, data=[random.random()*10, random.random()*10])
self.mass = 1.0 + random.random()
self.rect = Rect(self.position, self.position)
class Game(object):
def __init__(self):
self.engine = Engine()
self.engine.add_listener(self.process_events)
self.engine.register_run(self.do_run)
self.units = []
self.width = 0
self.height = 0
self.screenRect = Rect(vector.Vector(2), vector.Vector(2, data=[self.width, self.height]))
self.selecting = False
self.select = SelectionBox()
self.selected = None
self.unitsSelected = []
self.mousePos = vector.Vector(2)
self.currentClick = vector.Vector(2)
self.mouseButtons = []
self.points = []
#for i in range(10):
# self.points.append(Particle(random.random()*self.width, self.height))
self.ctPoints = None
self.keys = []
def process_events(self, event, data):
if event == 'mouse_move':
x, y = data
self.mousePos.x = x
self.mousePos.y = y
elif event == 'mouse_down':
button, modifiers = data
self.mouseButtons.append(button)
self.currentClick = self.mousePos.clone()
elif event == 'mouse_up':
button, modifiers = data
self.mouseButtons.remove(button)
if self.currentClick.x == self.mousePos.x and self.currentClick.y == self.mousePos.y:
self.unitsSelected = []
elif event == 'key_down':
self.keys.append(data[0])
elif event == 'key_up':
self.keys.remove(data[0])
elif event == 'resize':
width, height = data
self.resize(width, height)
elif event == 'on_close':
self.engine.stop()
def resize(self, width, height):
self.width = width
self.height = height
self.screenRect.max.x = width
self.screenRect.max.y = height
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, -1.0, 1.0)
gl.glMatrixMode(gl.GL_MODELVIEW)
def update(self, dt):
#if len(self.points) < 2000:
# for i in range(6):
# self.points.append(Particle(random.random()*self.width, self.height))
if pyglet.window.key.E in self.keys:
unit = Unit('data/player.png', 'unit')
unit.set_pos(self.mousePos)
self.units.append(unit)
elif pyglet.window.key.Q in self.keys:
for i in range(6):
self.points.append(Particle(self.mousePos.x, self.mousePos.y))
elif pyglet.window.key.M in self.keys:
speedPerTick = 100.0 * dt
for obj in self.unitsSelected:
objMin = obj.position
delta = self.mousePos - objMin
distance = delta.magnitude()
if distance > speedPerTick:
ratio = speedPerTick / distance
move = delta * ratio
final = objMin + move
else:
final = self.mousePos
obj.set_pos(final)
elif pyglet.window.key.DELETE in self.keys:
for obj in self.unitsSelected:
self.units.remove(obj)
self.unitsSelected = []
if 1 in self.mouseButtons:
if not self.selecting:
if self.currentClick != self.mousePos:
self.selecting = True
self.select.set_start(self.mousePos)
else:
if self.selecting:
self.selecting = False
if self.selecting:
self.select.set_end(self.mousePos)
self.unitsSelected = self.select.get_selected(self.units)
for unit in self.units:
unit.update(dt)
self.simulate_points(dt)
self.simulate_bodies(dt)
def simulate_points(self, dt):
for point in self.points:
if not self.screenRect.check_aabb(point.rect):
self.points.remove(point)
# point.__init__(random.random()*self.width, self.height)
force = vector.Vector(2, data=[0, point.mass * -9.81])
acceleration = force / point.mass
point.velocity += acceleration * dt
point.position += point.velocity * dt
def simulate_bodies(self, dt):
for unit in self.units:
# calc force
force = vector.Vector(2, data=[0, unit.mass * -9.81])
half = unit.size / 2
unit.torque = half.x * force.y - half.y * force.x
lenAcceleration = force / unit.mass
unit.lenVelocity += lenAcceleration * dt
unit.position += unit.lenVelocity * dt
angAcceleration = unit.torque / unit.momentOfInertia
unit.angVelocity += angAcceleration * dt
unit.angle += unit.angVelocity * dt
def render_points(self):
renderPoints = []
for point in self.points:
renderPoints.extend(point.position.vector)
self.ctPoints = (gl.GLfloat * len(renderPoints))(*renderPoints)
point_ptr = ct.cast(self.ctPoints, ct.c_void_p)
gl.glColor3f(1.0, 1.0, 1.0)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointer(2, gl.GL_FLOAT, 0, point_ptr)
gl.glDrawArrays(gl.GL_POINTS, 0, len(renderPoints)//2)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
def render(self):
self.engine.window.switch_to()
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glClearColor(0.5, 0.5, 0.5, 1.0)
self.render_points()
for unit in self.units:
if unit in self.unitsSelected:
unit.render_bounding_box()
unit.render()
if self.selecting:
self.select.render()
self.engine.window.flip()
def do_run(self, dt):
self.update(dt)
self.render()
def run(self):
self.engine.run()
def main():
game = Game()
game.run()
|
the-stack_0_6408 | #!/usr/bin/env python3
# This file is copied from GCoder.
#
# GCoder is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GCoder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import math
import datetime
import logging
from array import array
gcode_parsed_args = ["x", "y", "e", "f", "z", "i", "j"]
gcode_parsed_nonargs = ["g", "t", "m", "n"]
to_parse = "".join(gcode_parsed_args + gcode_parsed_nonargs)
gcode_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n|([%s])([-+]?[0-9]*\.?[0-9]*)" % to_parse)
gcode_strip_comment_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n")
m114_exp = re.compile("\([^\(\)]*\)|[/\*].*\n|([XYZ]):?([-+]?[0-9]*\.?[0-9]*)")
specific_exp = "(?:\([^\(\)]*\))|(?:;.*)|(?:[/\*].*\n)|(%s[-+]?[0-9]*\.?[0-9]*)"
move_gcodes = ["G0", "G1", "G2", "G3"]
class PyLine:
__slots__ = ('x', 'y', 'z', 'e', 'f', 'i', 'j',
'raw', 'command', 'is_move',
'relative', 'relative_e',
'current_x', 'current_y', 'current_z', 'extruding',
'current_tool',
'gcview_end_vertex')
def __init__(self, l):
self.raw = l
def __getattr__(self, name):
return None
class PyLightLine:
__slots__ = ('raw', 'command')
def __init__(self, l):
self.raw = l
def __getattr__(self, name):
return None
try:
from . import gcoder_line
Line = gcoder_line.GLine
LightLine = gcoder_line.GLightLine
except Exception as e:
logging.warning("Memory-efficient GCoder implementation unavailable: %s" % e)
Line = PyLine
LightLine = PyLightLine
def find_specific_code(line, code):
exp = specific_exp % code
bits = [bit for bit in re.findall(exp, line.raw) if bit]
if not bits: return None
else: return float(bits[0][1:])
def S(line):
return find_specific_code(line, "S")
def P(line):
return find_specific_code(line, "P")
def split(line):
split_raw = gcode_exp.findall(line.raw.lower())
if split_raw and split_raw[0][0] == "n":
del split_raw[0]
if not split_raw:
line.command = line.raw
line.is_move = False
logging.warning("raw G-Code line \"%s\" could not be parsed" % line.raw)
return [line.raw]
command = split_raw[0]
line.command = command[0].upper() + command[1]
line.is_move = line.command in move_gcodes
return split_raw
def parse_coordinates(line, split_raw, imperial = False, force = False):
# Not a G-line, we don't want to parse its arguments
if not force and line.command[0] != "G":
return
unit_factor = 25.4 if imperial else 1
for bit in split_raw:
code = bit[0]
if code not in gcode_parsed_nonargs and bit[1]:
setattr(line, code, unit_factor * float(bit[1]))
class Layer(list):
__slots__ = ("duration", "z")
def __init__(self, lines, z = None):
super(Layer, self).__init__(lines)
self.z = z
class GCode:
line_class = Line
lines = None
layers = None
all_layers = None
layer_idxs = None
line_idxs = None
append_layer = None
append_layer_id = None
imperial = False
relative = False
relative_e = False
current_tool = 0
# Home position: current absolute position counted from machine origin
home_x = 0
home_y = 0
home_z = 0
# Current position: current absolute position counted from machine origin
current_x = 0
current_y = 0
current_z = 0
# For E this is the absolute position from machine start
current_e = 0
current_e_multi=[0]
total_e = 0
total_e_multi=[0]
max_e = 0
max_e_multi=[0]
# Current feedrate
current_f = 0
# Offset: current offset between the machine origin and the machine current
# absolute coordinate system (as shifted by G92s)
offset_x = 0
offset_y = 0
offset_z = 0
offset_e = 0
offset_e_multi = [0]
# Expected behavior:
# - G28 X => X axis is homed, offset_x <- 0, current_x <- home_x
# - G92 Xk => X axis does not move, so current_x does not change
# and offset_x <- current_x - k,
# - absolute G1 Xk => X axis moves, current_x <- offset_x + k
# How to get...
# current abs X from machine origin: current_x
# current abs X in machine current coordinate system: current_x - offset_x
filament_length = None
filament_length_multi=[0]
duration = None
xmin = None
xmax = None
ymin = None
ymax = None
zmin = None
zmax = None
width = None
depth = None
height = None
est_layer_height = None
# abs_x is the current absolute X in machine current coordinate system
# (after the various G92 transformations) and can be used to store the
# absolute position of the head at a given time
def _get_abs_x(self):
return self.current_x - self.offset_x
abs_x = property(_get_abs_x)
def _get_abs_y(self):
return self.current_y - self.offset_y
abs_y = property(_get_abs_y)
def _get_abs_z(self):
return self.current_z - self.offset_z
abs_z = property(_get_abs_z)
def _get_abs_e(self):
return self.current_e - self.offset_e
abs_e = property(_get_abs_e)
def _get_abs_e_multi(self,i):
return self.current_e_multi[i] - self.offset_e_multi[i]
abs_e = property(_get_abs_e)
def _get_abs_pos(self):
return (self.abs_x, self.abs_y, self.abs_z)
abs_pos = property(_get_abs_pos)
def _get_current_pos(self):
return (self.current_x, self.current_y, self.current_z)
current_pos = property(_get_current_pos)
def _get_home_pos(self):
return (self.home_x, self.home_y, self.home_z)
def _set_home_pos(self, home_pos):
if home_pos:
self.home_x, self.home_y, self.home_z = home_pos
home_pos = property(_get_home_pos, _set_home_pos)
def _get_layers_count(self):
return len(self.all_zs)
layers_count = property(_get_layers_count)
def __init__(self, data = None, home_pos = None,
layer_callback = None, deferred = False):
if not deferred:
self.prepare(data, home_pos, layer_callback)
def prepare(self, data = None, home_pos = None, layer_callback = None):
self.home_pos = home_pos
if data:
line_class = self.line_class
self.lines = [line_class(l2) for l2 in
(l.strip() for l in data)
if l2]
self._preprocess(build_layers = True,
layer_callback = layer_callback)
else:
self.lines = []
self.append_layer_id = 0
self.append_layer = Layer([])
self.all_layers = [self.append_layer]
self.all_zs = set()
self.layers = {}
self.layer_idxs = array('I', [])
self.line_idxs = array('I', [])
def has_index(self, i):
return i < len(self)
def __len__(self):
return len(self.line_idxs)
def __iter__(self):
return self.lines.__iter__()
def prepend_to_layer(self, commands, layer_idx):
# Prepend commands in reverse order
commands = [c.strip() for c in commands[::-1] if c.strip()]
layer = self.all_layers[layer_idx]
# Find start index to append lines
# and end index to append new indices
start_index = self.layer_idxs.index(layer_idx)
for i in range(start_index, len(self.layer_idxs)):
if self.layer_idxs[i] != layer_idx:
end_index = i
break
else:
end_index = i + 1
end_line = self.line_idxs[end_index - 1]
for i, command in enumerate(commands):
gline = Line(command)
# Split to get command
split(gline)
# Force is_move to False
gline.is_move = False
# Insert gline at beginning of layer
layer.insert(0, gline)
# Insert gline at beginning of list
self.lines.insert(start_index, gline)
# Update indices arrays & global gcodes list
self.layer_idxs.insert(end_index + i, layer_idx)
self.line_idxs.insert(end_index + i, end_line + i + 1)
return commands[::-1]
def rewrite_layer(self, commands, layer_idx):
# Prepend commands in reverse order
commands = [c.strip() for c in commands[::-1] if c.strip()]
layer = self.all_layers[layer_idx]
# Find start index to append lines
# and end index to append new indices
start_index = self.layer_idxs.index(layer_idx)
for i in range(start_index, len(self.layer_idxs)):
if self.layer_idxs[i] != layer_idx:
end_index = i
break
else:
end_index = i + 1
self.layer_idxs = self.layer_idxs[:start_index] + array('I', len(commands) * [layer_idx]) + self.layer_idxs[end_index:]
self.line_idxs = self.line_idxs[:start_index] + array('I', range(len(commands))) + self.line_idxs[end_index:]
del self.lines[start_index:end_index]
del layer[:]
for i, command in enumerate(commands):
gline = Line(command)
# Split to get command
split(gline)
# Force is_move to False
gline.is_move = False
# Insert gline at beginning of layer
layer.insert(0, gline)
# Insert gline at beginning of list
self.lines.insert(start_index, gline)
return commands[::-1]
def append(self, command, store = True):
command = command.strip()
if not command:
return
gline = Line(command)
self._preprocess([gline])
if store:
self.lines.append(gline)
self.append_layer.append(gline)
self.layer_idxs.append(self.append_layer_id)
self.line_idxs.append(len(self.append_layer))
return gline
def _preprocess(self, lines = None, build_layers = False,
layer_callback = None):
"""Checks for imperial/relativeness settings and tool changes"""
if not lines:
lines = self.lines
imperial = self.imperial
relative = self.relative
relative_e = self.relative_e
current_tool = self.current_tool
current_x = self.current_x
current_y = self.current_y
current_z = self.current_z
offset_x = self.offset_x
offset_y = self.offset_y
offset_z = self.offset_z
# Extrusion computation
current_e = self.current_e
offset_e = self.offset_e
total_e = self.total_e
max_e = self.max_e
current_e_multi = self.current_e_multi[current_tool]
offset_e_multi = self.offset_e_multi[current_tool]
total_e_multi = self.total_e_multi[current_tool]
max_e_multi = self.max_e_multi[current_tool]
# Store this one out of the build_layers scope for efficiency
cur_layer_has_extrusion = False
# Initialize layers and other global computations
if build_layers:
# Bounding box computation
xmin = float("inf")
ymin = float("inf")
zmin = 0
xmax = float("-inf")
ymax = float("-inf")
zmax = float("-inf")
# Also compute extrusion-only values
xmin_e = float("inf")
ymin_e = float("inf")
xmax_e = float("-inf")
ymax_e = float("-inf")
# Duration estimation
# TODO:
# get device caps from firmware: max speed, acceleration/axis
# (including extruder)
# calculate the maximum move duration accounting for above ;)
lastx = lasty = lastz = laste = lastf = 0.0
lastdx = 0
lastdy = 0
x = y = e = f = 0.0
currenttravel = 0.0
moveduration = 0.0
totalduration = 0.0
acceleration = 2000.0 # mm/s^2
layerbeginduration = 0.0
# Initialize layers
all_layers = self.all_layers = []
all_zs = self.all_zs = set()
layer_idxs = self.layer_idxs = []
line_idxs = self.line_idxs = []
layer_id = 0
layer_line = 0
last_layer_z = None
prev_z = None
prev_base_z = (None, None)
cur_z = None
cur_lines = []
if self.line_class != Line:
get_line = lambda l: Line(l.raw)
else:
get_line = lambda l: l
for true_line in lines:
# # Parse line
# Use a heavy copy of the light line to preprocess
line = get_line(true_line)
split_raw = split(line)
if line.command:
# Update properties
if line.is_move:
line.relative = relative
line.relative_e = relative_e
line.current_tool = current_tool
elif line.command == "G20":
imperial = True
elif line.command == "G21":
imperial = False
elif line.command == "G90":
relative = False
relative_e = False
elif line.command == "G91":
relative = True
relative_e = True
elif line.command == "M82":
relative_e = False
elif line.command == "M83":
relative_e = True
elif line.command[0] == "T":
try:
current_tool = int(line.command[1:])
except:
pass #handle T? by treating it as no tool change
while(current_tool+1>len(self.current_e_multi)):
self.current_e_multi+=[0]
self.offset_e_multi+=[0]
self.total_e_multi+=[0]
self.max_e_multi+=[0]
current_e_multi = self.current_e_multi[current_tool]
offset_e_multi = self.offset_e_multi[current_tool]
total_e_multi = self.total_e_multi[current_tool]
max_e_multi = self.max_e_multi[current_tool]
if line.command[0] == "G":
parse_coordinates(line, split_raw, imperial)
# Compute current position
if line.is_move:
x = line.x
y = line.y
z = line.z
if line.f is not None:
self.current_f = line.f
if line.relative:
x = current_x + (x or 0)
y = current_y + (y or 0)
z = current_z + (z or 0)
else:
if x is not None: x = x + offset_x
if y is not None: y = y + offset_y
if z is not None: z = z + offset_z
if x is not None: current_x = x
if y is not None: current_y = y
if z is not None: current_z = z
elif line.command == "G28":
home_all = not any([line.x, line.y, line.z])
if home_all or line.x is not None:
offset_x = 0
current_x = self.home_x
if home_all or line.y is not None:
offset_y = 0
current_y = self.home_y
if home_all or line.z is not None:
offset_z = 0
current_z = self.home_z
elif line.command == "G92":
if line.x is not None: offset_x = current_x - line.x
if line.y is not None: offset_y = current_y - line.y
if line.z is not None: offset_z = current_z - line.z
line.current_x = current_x
line.current_y = current_y
line.current_z = current_z
# # Process extrusion
if line.e is not None:
if line.is_move:
if line.relative_e:
line.extruding = line.e > 0
total_e += line.e
current_e += line.e
total_e_multi += line.e
current_e_multi += line.e
else:
new_e = line.e + offset_e
line.extruding = new_e > current_e
total_e += new_e - current_e
current_e = new_e
new_e_multi = line.e + offset_e_multi
total_e_multi += new_e_multi - current_e_multi
current_e_multi = new_e_multi
max_e = max(max_e, total_e)
max_e_multi=max(max_e_multi, total_e_multi)
cur_layer_has_extrusion |= line.extruding
elif line.command == "G92":
offset_e = current_e - line.e
offset_e_multi = current_e_multi - line.e
self.current_e_multi[current_tool]=current_e_multi
self.offset_e_multi[current_tool]=offset_e_multi
self.max_e_multi[current_tool]=max_e_multi
self.total_e_multi[current_tool]=total_e_multi
# # Create layers and perform global computations
if build_layers:
# Update bounding box
if line.is_move:
if line.extruding:
if line.current_x is not None:
xmin_e = min(xmin_e, line.current_x)
xmax_e = max(xmax_e, line.current_x)
if line.current_y is not None:
ymin_e = min(ymin_e, line.current_y)
ymax_e = max(ymax_e, line.current_y)
if max_e <= 0:
if line.current_x is not None:
xmin = min(xmin, line.current_x)
xmax = max(xmax, line.current_x)
if line.current_y is not None:
ymin = min(ymin, line.current_y)
ymax = max(ymax, line.current_y)
# Compute duration
if line.command == "G0" or line.command == "G1":
x = line.x if line.x is not None else lastx
y = line.y if line.y is not None else lasty
z = line.z if line.z is not None else lastz
e = line.e if line.e is not None else laste
# mm/s vs mm/m => divide by 60
f = line.f / 60.0 if line.f is not None else lastf
# given last feedrate and current feedrate calculate the
# distance needed to achieve current feedrate.
# if travel is longer than req'd distance, then subtract
# distance to achieve full speed, and add the time it took
# to get there.
# then calculate the time taken to complete the remaining
# distance
# FIXME: this code has been proven to be super wrong when 2
# subsquent moves are in opposite directions, as requested
# speed is constant but printer has to fully decellerate
# and reaccelerate
# The following code tries to fix it by forcing a full
# reacceleration if this move is in the opposite direction
# of the previous one
dx = x - lastx
dy = y - lasty
if dx * lastdx + dy * lastdy <= 0:
lastf = 0
currenttravel = math.hypot(dx, dy)
if currenttravel == 0:
if line.z is not None:
currenttravel = abs(line.z) if line.relative else abs(line.z - lastz)
elif line.e is not None:
currenttravel = abs(line.e) if line.relative_e else abs(line.e - laste)
# Feedrate hasn't changed, no acceleration/decceleration planned
if f == lastf:
moveduration = currenttravel / f if f != 0 else 0.
else:
# FIXME: review this better
# this looks wrong : there's little chance that the feedrate we'll decelerate to is the previous feedrate
# shouldn't we instead look at three consecutive moves ?
distance = 2 * abs(((lastf + f) * (f - lastf) * 0.5) / acceleration) # multiply by 2 because we have to accelerate and decelerate
if distance <= currenttravel and lastf + f != 0 and f != 0:
moveduration = 2 * distance / (lastf + f) # This is distance / mean(lastf, f)
moveduration += (currenttravel - distance) / f
else:
moveduration = 2 * currenttravel / (lastf + f) # This is currenttravel / mean(lastf, f)
# FIXME: probably a little bit optimistic, but probably a much better estimate than the previous one:
# moveduration = math.sqrt(2 * distance / acceleration) # probably buggy : not taking actual travel into account
lastdx = dx
lastdy = dy
totalduration += moveduration
lastx = x
lasty = y
lastz = z
laste = e
lastf = f
elif line.command == "G4":
moveduration = P(line)
if moveduration:
moveduration /= 1000.0
totalduration += moveduration
# FIXME : looks like this needs to be tested with "lift Z on move"
if line.z is not None:
if line.command == "G92":
cur_z = line.z
elif line.is_move:
if line.relative and cur_z is not None:
cur_z += line.z
else:
cur_z = line.z
# FIXME: the logic behind this code seems to work, but it might be
# broken
if cur_z != prev_z:
if prev_z is not None and last_layer_z is not None:
offset = self.est_layer_height if self.est_layer_height else 0.01
if abs(prev_z - last_layer_z) < offset:
if self.est_layer_height is None:
zs = sorted([l.z for l in all_layers if l.z is not None])
heights = [round(zs[i + 1] - zs[i], 3) for i in range(len(zs) - 1)]
heights = [height for height in heights if height]
if len(heights) >= 2: self.est_layer_height = heights[1]
elif heights: self.est_layer_height = heights[0]
else: self.est_layer_height = 0.1
base_z = round(prev_z - (prev_z % self.est_layer_height), 2)
else:
base_z = round(prev_z, 2)
else:
base_z = prev_z
if base_z != prev_base_z:
new_layer = Layer(cur_lines, base_z)
new_layer.duration = totalduration - layerbeginduration
layerbeginduration = totalduration
all_layers.append(new_layer)
if cur_layer_has_extrusion and prev_z not in all_zs:
all_zs.add(prev_z)
cur_lines = []
cur_layer_has_extrusion = False
layer_id += 1
layer_line = 0
last_layer_z = base_z
if layer_callback is not None:
layer_callback(self, len(all_layers) - 1)
prev_base_z = base_z
if build_layers:
cur_lines.append(true_line)
layer_idxs.append(layer_id)
line_idxs.append(layer_line)
layer_line += 1
prev_z = cur_z
# ## Loop done
# Store current status
self.imperial = imperial
self.relative = relative
self.relative_e = relative_e
self.current_tool = current_tool
self.current_x = current_x
self.current_y = current_y
self.current_z = current_z
self.offset_x = offset_x
self.offset_y = offset_y
self.offset_z = offset_z
self.current_e = current_e
self.offset_e = offset_e
self.max_e = max_e
self.total_e = total_e
self.current_e_multi[current_tool]=current_e_multi
self.offset_e_multi[current_tool]=offset_e_multi
self.max_e_multi[current_tool]=max_e_multi
self.total_e_multi[current_tool]=total_e_multi
# Finalize layers
if build_layers:
if cur_lines:
new_layer = Layer(cur_lines, prev_z)
new_layer.duration = totalduration - layerbeginduration
layerbeginduration = totalduration
all_layers.append(new_layer)
if cur_layer_has_extrusion and prev_z not in all_zs:
all_zs.add(prev_z)
self.append_layer_id = len(all_layers)
self.append_layer = Layer([])
self.append_layer.duration = 0
all_layers.append(self.append_layer)
self.layer_idxs = array('I', layer_idxs)
self.line_idxs = array('I', line_idxs)
# Compute bounding box
all_zs = self.all_zs.union({zmin}).difference({None})
zmin = min(all_zs)
zmax = max(all_zs)
self.filament_length = self.max_e
while len(self.filament_length_multi)<len(self.max_e_multi):
self.filament_length_multi+=[0]
for i in enumerate(self.max_e_multi):
self.filament_length_multi[i[0]]=i[1]
if self.filament_length > 0:
self.xmin = xmin_e if not math.isinf(xmin_e) else 0
self.xmax = xmax_e if not math.isinf(xmax_e) else 0
self.ymin = ymin_e if not math.isinf(ymin_e) else 0
self.ymax = ymax_e if not math.isinf(ymax_e) else 0
else:
self.xmin = xmin if not math.isinf(xmin) else 0
self.xmax = xmax if not math.isinf(xmax) else 0
self.ymin = ymin if not math.isinf(ymin) else 0
self.ymax = ymax if not math.isinf(ymax) else 0
self.zmin = zmin if not math.isinf(zmin) else 0
self.zmax = zmax if not math.isinf(zmax) else 0
self.width = self.xmax - self.xmin
self.depth = self.ymax - self.ymin
self.height = self.zmax - self.zmin
# Finalize duration
totaltime = datetime.timedelta(seconds = int(totalduration))
self.duration = totaltime
def idxs(self, i):
return self.layer_idxs[i], self.line_idxs[i]
def estimate_duration(self):
return self.layers_count, self.duration
class LightGCode(GCode):
line_class = LightLine
def main():
if len(sys.argv) < 2:
print("usage: %s filename.gcode" % sys.argv[0])
return
print("Line object size:", sys.getsizeof(Line("G0 X0")))
print("Light line object size:", sys.getsizeof(LightLine("G0 X0")))
gcode = GCode(open(sys.argv[1], "rU"))
print("Dimensions:")
xdims = (gcode.xmin, gcode.xmax, gcode.width)
print("\tX: %0.02f - %0.02f (%0.02f)" % xdims)
ydims = (gcode.ymin, gcode.ymax, gcode.depth)
print("\tY: %0.02f - %0.02f (%0.02f)" % ydims)
zdims = (gcode.zmin, gcode.zmax, gcode.height)
print("\tZ: %0.02f - %0.02f (%0.02f)" % zdims)
print("Filament used: %0.02fmm" % gcode.filament_length)
for i in enumerate(gcode.filament_length_multi):
print("E%d %0.02fmm" % (i[0],i[1]))
print("Number of layers: %d" % gcode.layers_count)
print("Estimated duration: %s" % gcode.estimate_duration()[1])
if __name__ == '__main__':
main()
|
the-stack_0_6409 | """
Define functions needed for the demos.
"""
import numpy as np
from scipy.fftpack import fft2, ifft2, fftshift, ifftshift
from scipy.signal import fftconvolve
from bm3d import gaussian_kernel
def get_psnr(y_est: np.ndarray, y_ref: np.ndarray) -> float:
"""
Return PSNR value for y_est and y_ref presuming the noise-free maximum is 1.
:param y_est: Estimate array
:param y_ref: Noise-free reference
:return: PSNR value
"""
return 10 * np.log10(1 / np.mean(((y_est - y_ref).ravel()) ** 2))
def get_cropped_psnr(y_est: np.ndarray, y_ref: np.ndarray, crop: tuple) -> float:
"""
Return PSNR value for y_est and y_ref presuming the noise-free maximum is 1.
Crop the images before calculating the value by crop.
:param y_est: Estimate array
:param y_ref: Noise-free reference
:param crop: Tuple of crop-x and crop-y from both stides
:return: PSNR value
"""
return get_psnr(np.atleast_3d(y_est)[crop[0]:-crop[0], crop[1]:-crop[1], :],
np.atleast_3d(y_ref)[crop[0]:-crop[0], crop[1]:-crop[1], :])
def get_experiment_kernel(noise_type: str, noise_var: float, sz: tuple = np.array((101, 101))):
"""
Get kernel for generating noise from specific experiment from the paper.
:param noise_type: Noise type string, g[0-4](w|)
:param noise_var: noise variance
:param sz: size of image, used only for g4 and g4w
:return: experiment kernel with the l2-norm equal to variance
"""
# if noiseType == gw / g0
kernel = np.array([[1]])
noise_types = ['gw', 'g0', 'g1', 'g2', 'g3', 'g4', 'g1w', 'g2w', 'g3w', 'g4w']
if noise_type not in noise_types:
raise ValueError("Noise type must be one of " + str(noise_types))
if noise_type != "g4" and noise_type != "g4w":
# Crop this size of kernel when generating,
# unless pink noise, in which
# if noiseType == we want to use the full image size
sz = np.array([101, 101])
else:
sz = np.array(sz)
# Sizes for meshgrids
sz2 = -(1 - (sz % 2)) * 1 + np.floor(sz / 2)
sz1 = np.floor(sz / 2)
uu, vv = np.meshgrid([i for i in range(-int(sz1[0]), int(sz2[0]) + 1)],
[i for i in range(-int(sz1[1]), int(sz2[1]) + 1)])
beta = 0.8
if noise_type[0:2] == 'g1':
# Horizontal line
kernel = np.atleast_2d(16 - abs(np.linspace(1, 31, 31) - 16))
elif noise_type[0:2] == 'g2':
# Circular repeating pattern
scale = 1
dist = uu ** 2 + vv ** 2
kernel = np.cos(np.sqrt(dist) / scale) * gaussian_kernel((sz[0], sz[1]), 10)
elif noise_type[0:2] == 'g3':
# Diagonal line pattern kernel
scale = 1
kernel = np.cos((uu + vv) / scale) * gaussian_kernel((sz[0], sz[1]), 10)
elif noise_type[0:2] == 'g4':
# Pink noise
dist = uu ** 2 + vv ** 2
n = sz[0] * sz[1]
spec = (np.sqrt((np.sqrt(n) * 1e-2) / (np.sqrt(dist) + np.sqrt(n) * 1e-2)))
kernel = fftshift(ifft2(ifftshift(spec)))
else: # gw and g0 are white
beta = 0
# -- Noise with additional white component --
if len(noise_type) > 2 and noise_type[2] == 'w':
kernel = kernel / np.sqrt(np.sum(kernel ** 2))
kalpha = np.sqrt((1 - beta) + beta * abs(fft2(kernel, (sz[0], sz[1]))) ** 2)
kernel = fftshift(ifft2(kalpha))
kernel = np.real(kernel)
# Correct variance
kernel = kernel / np.sqrt(np.sum(kernel ** 2)) * np.sqrt(noise_var)
return kernel
def get_experiment_noise(noise_type: str, noise_var: float, realization: int, sz: tuple)\
-> (np.ndarray, np.ndarray, np.ndarray):
"""
Generate noise for experiment with specified kernel, variance, seed and size.
Return noise and relevant parameters.
The generated noise is non-circular.
:param noise_type: Noise type, see get_experiment_kernel for list of accepted types.
:param noise_var: Noise variance of the resulting noise
:param realization: Seed for the noise realization
:param sz: image size -> size of resulting noise
:return: noise, PSD, and kernel
"""
np.random.seed(realization)
# Get pre-specified kernel
kernel = get_experiment_kernel(noise_type, noise_var, sz)
# Create noisy image
half_kernel = np.ceil(np.array(kernel.shape) / 2)
if len(sz) == 3 and half_kernel.size == 2:
half_kernel = [half_kernel[0], half_kernel[1], 0]
kernel = np.atleast_3d(kernel)
half_kernel = np.array(half_kernel, dtype=int)
# Crop edges
noise = fftconvolve(np.random.normal(size=(sz + 2 * half_kernel)), kernel, mode='same')
noise = np.atleast_3d(noise)[half_kernel[0]:-half_kernel[0], half_kernel[1]:-half_kernel[1], :]
psd = abs(fft2(kernel, (sz[0], sz[1]), axes=(0, 1))) ** 2 * sz[0] * sz[1]
return noise, psd, kernel
|
the-stack_0_6410 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-18 Richard Hull and contributors
# See LICENSE.rst for details.
# PYTHON_ARGCOMPLETE_OK
"""
Rotating 3D box wireframe & color dithering.
Adapted from:
http://codentronix.com/2011/05/12/rotating-3d-cube-using-python-and-pygame/
"""
import sys
import math
from operator import itemgetter
from demo_opts import get_device
from luma.core.render import canvas
from luma.core.sprite_system import framerate_regulator
def radians(degrees):
return degrees * math.pi / 180
class point(object):
def __init__(self, x, y, z):
self.coords = (x, y, z)
self.xy = (x, y)
self.z = z
def rotate_x(self, angle):
x, y, z = self.coords
rad = radians(angle)
c = math.cos(rad)
s = math.sin(rad)
return point(x, y * c - z * s, y * s + z * c)
def rotate_y(self, angle):
x, y, z = self.coords
rad = radians(angle)
c = math.cos(rad)
s = math.sin(rad)
return point(z * s + x * c, y, z * c - x * s)
def rotate_z(self, angle):
x, y, z = self.coords
rad = radians(angle)
c = math.cos(rad)
s = math.sin(rad)
return point(x * c - y * s, x * s + y * c, z)
def project(self, size, fov, viewer_distance):
x, y, z = self.coords
factor = fov / (viewer_distance + z)
return point(x * factor + size[0] / 2, -y * factor + size[1] / 2, z)
def sine_wave(min, max, step=1):
angle = 0
diff = max - min
diff2 = diff / 2
offset = min + diff2
while True:
yield angle, offset + math.sin(radians(angle)) * diff2
angle += step
def main(num_iterations=sys.maxsize):
regulator = framerate_regulator(fps=30)
vertices = [
point(-1, 1, -1),
point(1, 1, -1),
point(1, -1, -1),
point(-1, -1, -1),
point(-1, 1, 1),
point(1, 1, 1),
point(1, -1, 1),
point(-1, -1, 1)
]
faces = [
((0, 1, 2, 3), "red"),
((1, 5, 6, 2), "green"),
((0, 4, 5, 1), "blue"),
((5, 4, 7, 6), "magenta"),
((4, 0, 3, 7), "yellow"),
((3, 2, 6, 7), "cyan")
]
a, b, c = 0, 0, 0
for angle, dist in sine_wave(8, 40, 1.5):
with regulator:
num_iterations -= 1
if num_iterations == 0:
break
t = [v.rotate_x(a).rotate_y(b).rotate_z(c).project(device.size, 256, dist)
for v in vertices]
depth = []
for idx, face in enumerate(faces):
v1, v2, v3, v4 = face[0]
avg_z = (t[v1].z + t[v2].z + t[v3].z + t[v4].z) / 4.0
depth.append((idx, avg_z))
with canvas(device, dither=True) as draw:
for idx, depth in sorted(depth, key=itemgetter(1), reverse=True)[3:]:
(v1, v2, v3, v4), color = faces[idx]
if angle // 720 % 2 == 0:
fill, outline = color, color
else:
fill, outline = "black", "white"
draw.polygon(t[v1].xy + t[v2].xy + t[v3].xy + t[v4].xy, fill, outline)
a += 0.3
b -= 1.1
c += 0.85
if __name__ == "__main__":
try:
device = get_device()
main()
except KeyboardInterrupt:
pass
|
the-stack_0_6411 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 Matteo Ingrosso
In combination with top_3 script, this one plot the top 3 patches with their values.
"""
from get_top_3 import *
import matplotlib.pyplot as plt
import os
from PIL import Image
Image.MAX_IMAGE_PIXELS = 1000000000
from matplotlib import rcParams
rcParams['axes.titlesize'] = 35
rcParams['font.size'] = 40
# from the other file
#folder = input('gimme the folder: ')
region = input('Gimme the region: ')
rows = 2
cols = 3
def display_multiple_img(images, rows, cols):
figure, ax = plt.subplots(nrows=rows,ncols=cols )
figure.set_figheight(15)
figure.set_figwidth(20)
figure.set_dpi(300)
figure.subplots_adjust(hspace=0.2)
figure.subplots_adjust(wspace=0.4)
for ind,key in enumerate(images):
ax.ravel()[ind].imshow(Image.open(images[key], mode='r'))
ax.ravel()[ind].set_axis_off()
plt.figtext(0.128, 0.5, ssim_1, va='center')
plt.figtext(0.5, 0.5, ssim_2, va='center', ha='center')
plt.figtext(0.775, 0.5, ssim_3, va='center')
plt.figtext(-0.02, 0.5, region, va='center', ha="left", rotation=90, fontweight='bold')
# plt.figtext(0.5, 0.98, 'SSIM values', ha="center")
figure.suptitle('SSIM values', fontsize=40, fontweight='bold')
plt.tight_layout()
plt.show()
images = {'Image0': os.path.join(folder, 'validation', 'fake','save'+str(ssim_ind_1)+'.jpg')
, 'Image1': os.path.join(folder, 'validation', 'fake','save'+str(ssim_ind_2)+'.jpg')
, 'Image2': os.path.join(folder, 'validation', 'fake','save'+str(ssim_ind_3)+'.jpg')
, 'Image3': os.path.join(folder, 'validation', 'real','save'+str(ssim_ind_1)+'.jpg')
, 'Image4': os.path.join(folder, 'validation', 'real','save'+str(ssim_ind_2)+'.jpg')
, 'Image5': os.path.join(folder, 'validation', 'real','save'+str(ssim_ind_3)+'.jpg')}
display_multiple_img(images, rows, cols)
|
the-stack_0_6413 | #!/usr/bin/env python
import metadata.io
import phylodist.io
import phylodist.histogram
DATA_ROOT = '/dacb/globus'
metadataDF = metadata.io.loadFile(
DATA_ROOT + '/metadata.tab',
indexCols=['origin_O2', 'O2', 'week', 'replicate', 'sample', 'date', 'type'],
verbose=True
)
phylodistSampleDict = phylodist.io.sweepFiles(
DATA_ROOT,
sampleNameExtractionFunction=metadata.io.defaultSampleNameExtractionFunction
)
sampleDictTaxHistDict = phylodist.histogram.computeAllForSamples(
phylodistSampleDict
)
taxonomyDictTaxHist = phylodist.histogram.mergeAcrossSamplesTaxLevels(
sampleDictTaxHistDict,
metadata=metadataDF
)
# filter at 2.5% abundance
for taxonomyLevel in TAXONOMY_HIERARCHY:
dF = taxonomyDictTaxHist[taxonomyLevel]
taxonomyDictTaxHist[taxonomyLevel] = dF.where(dF >= 2.5)
taxonomyDictTaxHist[taxonomyLevel].dropna(how='all', inplace=True)
phylodist.io.writeExcelTaxonomyDictTaxHist(
DATA_ROOT + '/phylodist.xlsx',
taxonomyDictTaxHist
)
|
the-stack_0_6415 | import os
from Crypto.Cipher import Blowfish
from Crypto.Random import get_random_bytes
import codecs
import kbr.file_utils as file_utils
import re
import sys
import requests
import time
id_cipher = None
def init( id_secret:str) -> None:
global id_cipher
id_cipher = Blowfish.new(id_secret.encode('utf-8'), mode=Blowfish.MODE_ECB)
def decrypt_value(value:str) -> str:
value = str(value)
value_hex = codecs.decode(value, 'hex')
decrypted_value = id_cipher.decrypt( value_hex ).decode("utf-8").lstrip("!")
return decrypted_value
def encrypt_value(value:str) -> str:
value = str(value)
value = value.encode('utf-8')
s = (b"!" * (8 - len(value) % 8)) + value
# Encrypt
return codecs.encode(id_cipher.encrypt(s), 'hex').decode("utf-8")
def directory_hash_id(id):
s = str(id)
l = len(s)
# Shortcut -- ids 0-999 go under ../000/
if l < 4:
return ["000"]
# Pad with zeros until a multiple of three
padded = ((3 - len(s) % 3) * "0") + s
# Drop the last three digits -- 1000 files per directory
padded = padded[:-3]
# Break into chunks of three
return [padded[i * 3:(i + 1) * 3] for i in range(len(padded) // 3)]
def construct_file_path(obj_id, file_dir=None):
"""
Taken and adjusted from the galaxy code base.
Construct the absolute path for accessing the object identified by `obj_id`.
:type file_dir: string
:param file_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
This option is used for backward compatibility. If
`True` then the composed directory structure does not include a
hash id (e.g., /files/dataset_10.dat (old) vs.
/files/000/dataset_10.dat (new))
"""
# base = os.path.abspath(file_dir, self.file_path))
base = file_dir
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj_id))
# Create a subdirectory for the object ID
path = os.path.join(base, rel_path)
path = os.path.join(path, "dataset_%s.dat" % obj_id)
print( f"Trying new style path {path} ")
if os.path.isfile(path):
return path
#Try old style dir names:
path = base
path = os.path.join(path, "dataset_%s.dat" % obj_id)
if os.path.isfile( path ):
return path
path = file_utils.find_first("dataset_%s.dat" % obj_id, file_dir)
if path is not None:
return path
raise RuntimeError(f"Cannot find dataset: 'dataset_{obj_id}.dat'")
def create_uuid(length=16):
# Generate a unique, high entropy random number.
# Length 16 --> 128 bit
long_uuid = codecs.encode(get_random_bytes(length), 'hex').decode("utf-8")
return long_uuid[:32]
def encrypt_ids(entry: any) -> []:
if isinstance(entry, list):
return list_encrypt_ids(entry)
if entry == [] or entry == {}:
return entry
if isinstance(entry, dict):
for key in entry.keys():
if key == 'nels_id':
continue
if key == 'id' or key.find('_id') > -1 and isinstance(entry[key], int):
entry[f"{key}"] = encrypt_value(entry[key])
else:
raise RuntimeError(f"Cannot change ids in {entry}")
return entry
def list_encrypt_ids(entries: []) -> []:
for entry in entries:
entry = encrypt_ids(entry)
return entries
def readable_date(timestamp:str) -> str:
if timestamp is None:
return None
timestamp = timestamp.replace('T', ' ')
timestamp = re.sub(r'\.\d+', '', timestamp)
return timestamp
def timedelta_to_epoc(timerange) -> int:
''' 3h, 2d, 1w --> now - delta as epoc secs '''
if timerange == '' or timerange is None:
return 0
ts = time.time()
time_delta = ts - timedelta_to_sec( timerange)
return time_delta
def timedelta_to_sec(timerange) -> int:
''' 1m, 3h, 2d, 1w --> now - delta as epoc secs '''
if timerange == '' or timerange is None:
return 0
time_delta = 0
try:
g = re.match(r'(\d+)([mhdwMY])', timerange)
num, range = g.groups(0)
if range == 'm':
time_delta = 60*int(num)
if range == 'h':
time_delta = 3600*int(num)
elif range == 'd':
time_delta = 24*3600*int(num)
elif range == 'w':
time_delta = 24*3600*7*int(num)
elif range == 'M':
time_delta = 30*24*3600*7*int(num)
elif range == '1Y':
time_delta = 365*24*3600*7*int(num)
except Exception as e:
print( f"timerange {timerange} is invalid valid examples: 5m, 1d, 2h, 1w, 1M, 1Y")
sys.exit(1)
return time_delta
def get_ssh_credential(config, nels_id: int, tmpfile=True):
nels_storage_client_key = config['nels_storage_client_key']
nels_storage_client_secret = config['nels_storage_client_secret']
nels_storage_url = config['nels_storage_url'].rstrip("/")
# make sure the id is a string
# nels_id = str(nels_id)
# api_url = 'https://nels.bioinfo.no/'
# api_url = 'https://test-fe.cbu.uib.no/nels-'
api_url = f"{nels_storage_url}/users/{nels_id}"
# logger.debug(f"API URL: {api_url}")
response = requests.get(api_url, auth=(nels_storage_client_key, nels_storage_client_secret))
if (response.status_code == requests.codes.ok):
json_response = response.json()
if tmpfile:
tmp = tempfile.NamedTemporaryFile(mode='w+t', suffix=".txt", dir=tmp_dir, delete=False)
tmp.write(json_response['key-rsa'])
tmp.close()
json_response['key_file'] = tmp.name
else:
outfile = f"{nels_id}.rsa"
file_utils.write(outfile, json_response['key-rsa'])
os.chmod(outfile, 0o600)
json_response['key_file'] = outfile
return json_response
else:
raise Exception("HTTP response code=%s" % str(response.status_code))
|
the-stack_0_6416 | import numpy as np
from numpy.core.umath_tests import inner1d
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
def image_histogram_equalization(image, number_bins=256):
'''histogram equalization the image
'''
# from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
# get image histogram
image_histogram, bins = np.histogram(
image.flatten(), number_bins, density=True)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
return image_equalized.reshape(image.shape) # , cdf
def elastic_transform(image, alpha=512, sigma=20, spline_order=1, mode='nearest', random_state=np.random):
"""Elastic deformation of image as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
image = image.reshape((256, 512, 1))
assert image.ndim == 3
shape = image.shape[:2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]
result = np.empty_like(image)
for i in range(image.shape[2]):
result[:, :, i] = map_coordinates(
image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)
return result
def center_crop(layer, target_size, target_size2):
_, _, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_size) // 2
xy2 = (layer_height - target_size2) // 2
return layer[:, :, xy1:(xy1 + target_size), xy2:(xy2 + target_size2)]
def pixel_list(im):
ret = []
i = 0
for x in im:
j = 0
for y in x:
if y > 0:
ret.append([i, j])
j += 1
i += 1
return np.array(ret)
def HausdorffDist(A, B):
# Hausdorf Distance: Compute the Hausdorff distance between two point
# clouds.
# Let A and B be subsets of metric space (Z,dZ),
# The Hausdorff distance between A and B, denoted by dH(A,B),
# is defined by:
# dH(A,B) = max(h(A,B),h(B,A)),
# where h(A,B) = max(min(d(a,b))
# and d(a,b) is a L2 norm
# dist_H = hausdorff(A,B)
# A: First point sets (MxN, with M observations in N dimension)
# B: Second point sets (MxN, with M observations in N dimension)
# ** A and B may have different number of rows, but must have the same
# number of columns.
#
# Edward DongBo Cui; Stanford University; 06/17/2014
# Find pairwise distance
D_mat = np.sqrt(inner1d(A, A)[np.newaxis].T +
inner1d(B, B)-2*(np.dot(A, B.T)))
# Find DH
dH = np.max(
np.array([np.max(np.min(D_mat, axis=0)), np.max(np.min(D_mat, axis=1))]))
return(dH)
def get_n_fold(total, fold, idx):
if len(total) % fold != 0 or idx < 0 or idx >= fold:
raise ValueError
fd = total[idx::fold]
for f in fd:
total.remove(f)
return fd
if __name__ == "__main__":
from PIL import Image
from matplotlib import pyplot as plt
prev_mask = Image.open('./data/ultrasound/ground truth/G0/01/0000.png')
prev_mask = elastic_transform(
np.array(prev_mask)).reshape(256, 512)
prev_mask = Image.fromarray(prev_mask)
plt.imshow(prev_mask, cmap='gray')
plt.show()
|
the-stack_0_6417 | import numpy
from panda3d.core import Point3, TransformState, Vec3
from panda3d.bullet import BulletSphereShape, BulletRigidBodyNode
from panda3d.ode import OdeBody, OdeMass, OdeSphereGeom
from .Ingredient import Ingredient
import cellpack.autopack as autopack
helper = autopack.helper
class SingleSphereIngr(Ingredient):
"""
This Ingredient is represented by a single sphere
and either a single radius, or a list of radii and offset vectors
for each sphere representing the ingredient
"""
def __init__(
self,
molarity=0.0,
radius=None,
position=None,
sphereFile=None,
packingPriority=0,
name=None,
pdb=None,
color=None,
nbJitter=5,
jitterMax=(1, 1, 1),
perturbAxisAmplitude=0.1,
principalVector=(1, 0, 0),
meshFile=None,
packingMode="random",
placeType="jitter",
Type="SingleSphere",
meshObject=None,
nbMol=0,
**kw
):
Ingredient.__init__(
self,
molarity=molarity,
radii=[[radius]],
positions=[[position]], # positions2=None,
sphereFile=sphereFile,
packingPriority=packingPriority,
name=name,
pdb=pdb,
color=color,
nbJitter=nbJitter,
jitterMax=jitterMax,
perturbAxisAmplitude=perturbAxisAmplitude,
principalVector=principalVector,
meshFile=meshFile,
packingMode=packingMode,
placeType=placeType,
meshObject=meshObject,
nbMol=nbMol,
Type=Type,
**kw
)
self.modelType = "Spheres"
if name is None:
name = "%5.2f_%f" % (radius, molarity)
self.name = name
self.singleSphere = True
# min and max radius for a single sphere should be the same
self.minRadius = radius
self.encapsulatingRadius = radius
# make a sphere ?->rapid ?
if self.mesh is None and autopack.helper is not None:
if not autopack.helper.nogui:
# if not autopack.helper.nogui :
# build a cylinder and make it length uLength, radius radii[0]
# this mesh is used bu RAPID for collision
p = autopack.helper.getObject("autopackHider")
if p is None:
p = autopack.helper.newEmpty("autopackHider")
if autopack.helper.host.find("blender") == -1:
autopack.helper.toggleDisplay(p, False)
self.mesh = autopack.helper.Sphere(
self.name + "_basic",
radius=self.radii[0][0],
color=self.color,
parent=p,
res=24,
)[0]
else:
self.mesh = autopack.helper.unitSphere(
self.name + "_basic", 5, radius=self.radii[0][0]
)[0]
self.getData()
# should do that for all ingredient type
if self.representation is None and not hasattr(
self.mesh, "getFaces"
): # this is not working with dejavu
# and should go in the graphics.
if not autopack.helper.nogui:
self.representation = autopack.helper.Sphere(
self.name + "_rep",
radius=self.radii[0][0],
color=self.color,
parent=self.mesh,
res=24,
)[0]
else:
self.representation = autopack.helper.Icosahedron(
self.name + "_rep", radius=self.radii[0][0]
)[0]
def collides_with_compartment(
self,
jtrans,
rotMat,
level,
gridPointsCoords,
histoVol,
):
"""
Check spheres for collision
TODO improve the testwhen grid stepSize is larger that size of the ingredient
"""
centers = self.positions[level]
radii = (self.radii[level],)
centT = self.transformPoints(jtrans, rotMat, centers) # this should be jtrans
for radc, posc in zip(radii, centT):
ptsInSphere = histoVol.grid.getPointsInSphere(posc, radc[0]) # indices
compIdsSphere = numpy.take(histoVol.grid.gridPtId, ptsInSphere, 0)
if self.compNum <= 0:
wrongPt = [cid for cid in compIdsSphere if cid != self.compNum]
if len(wrongPt):
print("OK false compartment", len(wrongPt))
return True
return False
def get_new_distance_values(
self, jtrans, rotMatj, gridPointsCoords, distance, dpad, level=0
):
self.centT = centT = self.transformPoints(
jtrans, rotMatj, self.positions[level]
)
centT = self.centT # self.transformPoints(jtrans, rotMatj, self.positions[-1])
insidePoints = {}
newDistPoints = {}
for radc, posc in zip(self.radii[-1], centT):
rad = radc + dpad
ptsInSphere = self.env.grid.getPointsInSphere(posc, rad)
delta = numpy.take(gridPointsCoords, ptsInSphere, 0) - posc
delta *= delta
distA = numpy.sqrt(delta.sum(1))
for pti in range(len(ptsInSphere)):
pt = ptsInSphere[pti]
dist = distA[pti]
d = dist - radc
if d <= 0: # point is inside dropped sphere
if pt in insidePoints:
if abs(d) < abs(insidePoints[pt]):
insidePoints[pt] = d
else:
insidePoints[pt] = d
elif d < distance[pt]: # point in region of influence
if pt in newDistPoints:
if d < newDistPoints[pt]:
newDistPoints[pt] = d
else:
newDistPoints[pt] = d
return insidePoints, newDistPoints
def add_rb_node(self, worldNP):
shape = BulletSphereShape(self.encapsulatingRadius)
inodenp = worldNP.attachNewNode(BulletRigidBodyNode(self.name))
inodenp.node().setMass(1.0)
# inodenp.node().addShape(shape)
inodenp.node().addShape(
shape, TransformState.makePos(Point3(0, 0, 0))
) # rotation ?
# spherenp.setPos(-2, 0, 4)
return inodenp
def add_rb_node_ode(self, world, jtrans, pMat):
body = OdeBody(world)
M = OdeMass()
M.setSphereTotal(1.0, self.encapsulatingRadius)
body.setMass(M)
body.setPosition(Vec3(jtrans[0], jtrans[1], jtrans[2]))
body.setRotation(pMat)
# the geometry for the collision ?
geom = OdeSphereGeom(self.ode_space, self.encapsulatingRadius)
geom.setBody(body)
return geom
|
the-stack_0_6418 | import argparse
import json
import sys
import time
import uuid
import os
import sh
from sh import docker
parentdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.sys.path.insert(0, parentdir)
from configfinder import config_settings
def build_and_commit(package: str, fuzzer_image: str, json_output_path: str = None, qemu=False, timeout=None) -> str:
"""
This builds a package inside a docker container and then commits the container to an image.
:return:
"""
start = time.time()
docker_image_name = package + "_" + str(uuid.uuid4())[:8]
docker_container_name = str(uuid.uuid4())
try:
if not qemu:
build_process = docker.run('--cpus=0.90', "--privileged", "--name", docker_container_name, "--entrypoint",
"python", fuzzer_image, "/inputinferer/configfinder/builder_wrapper.py", "-p",
package, _out=sys.stdout, _ok_code=[config_settings.BUILDER_BUILD_NORMAL,
config_settings.BUILDER_BUILD_FAILED,
config_settings.BUILDER_BUILD_QEMU],
_timeout=timeout) # type: sh.RunningCommand
else:
build_process = docker.run('--cpus=0.90', "--privileged", "--name", docker_container_name, "--entrypoint",
"python", fuzzer_image, "/inputinferer/configfinder/builder_wrapper.py",
"-p", package, "-Q",
_out=sys.stdout,
_ok_code=[config_settings.BUILDER_BUILD_NORMAL,
config_settings.BUILDER_BUILD_FAILED,
config_settings.BUILDER_BUILD_QEMU],
_timeout=timeout) # type: sh.RunningCommand
except sh.TimeoutException as e:
print("Building {0} timed out!".format(package))
return None
exit_code = build_process.exit_code
if exit_code == -1:
print("Failed to build image for package {0}, not commiting".format(package))
return None
docker.commit(docker_container_name, docker_image_name, _out=sys.stdout)
end = time.time()
if json_output_path is not None:
json_dict = {}
json_dict["docker_image_name"] = docker_image_name
if exit_code == config_settings.BUILDER_BUILD_NORMAL:
json_dict["qemu"] = False
elif exit_code == config_settings.BUILDER_BUILD_QEMU:
json_dict["qemu"] = True
json_dict["time"] = end - start
with open(json_output_path, "w") as json_output_fp:
json.dump(json_dict, json_output_fp)
docker.rm(docker_container_name) # Remove the image after we commited
return docker_image_name
def return_current_package_image(package: str, fuzzer_image: str, package_image: str, json_output_path: str = None,
qemu=False, timeout=None) -> str:
"""
Checks if the current package_image still exists and if not creates a new one.
"""
output = str(docker.images(package_image))
print(output.split("\n"))
if len(output.split("\n")) > 2:
return package_image
else:
return build_and_commit(package, fuzzer_image=fuzzer_image, json_output_path=json_output_path, qemu=qemu,
timeout=timeout)
def get_image_or_store_in_buildfile(package: str, fuzzer_image, buildfile_path: str, qemu=False):
if not os.path.exists(buildfile_path):
return build_and_commit(package, fuzzer_image=fuzzer_image, json_output_path=buildfile_path, qemu=qemu)
else:
with open(buildfile_path, "r") as fp:
build_dict = json.load(fp)
return return_current_package_image(package, fuzzer_image, build_dict["docker_image_name"],
json_output_path=buildfile_path, qemu=qemu)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Start the building Process')
parser.add_argument("-di", "--base_image", required=True, type=str, help="Fuzzer image.")
parser.add_argument("-p", "--package", required=True, type=str,
help="The package to build")
parser.add_argument("-out", "--output_path", required=False, type=str, default=None,
help="Where to store the json configuration?")
arguments = parser.parse_args()
build_and_commit(package=arguments.package, fuzzer_image=arguments.docker_image,
json_output_path=arguments.output_path)
|
the-stack_0_6419 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 10:08:27 2018
@author: rflamary
"""
import numpy as np
import pylab as pl
import scipy
import scipy.optimize
import stdgrb
import time
t_start=time.clock()
def tic():
global t_start
t_start=time.clock()
def toc():
global t_start
t=time.clock()-t_start
print('Elapsed time: {:1.3f}s'.format(t))
return t
#%%
n=2000
d=200
np.random.seed(0)
c=-np.random.rand(d)
A=np.random.rand(n,d)
b=np.random.rand(n)
lb=np.zeros(d)
ub=np.ones(d)
#%%
print('Scipy simplex solver')
tic()
sol=scipy.optimize.linprog(c,A,b)
x0=sol.x
v0=sol.fun
toc()
print('Scipy interior point solver')
tic()
sol=scipy.optimize.linprog(c,A,b,method='interior-point')
x00=sol.x
v00=sol.fun
toc()
print('Default method')
tic()
x1,v1=stdgrb.lp_solve(c,A,b,lb,ub,logtoconsole=0)
toc()
print('Simplex method')
tic()
x2,v2=stdgrb.lp_solve(c,A,b,lb,ub,1,logtoconsole=0)
toc()
print('Interior point method')
tic()
x3,v3=stdgrb.lp_solve(c,A,b,lb,ub,2,logtoconsole=0,crossover=0)
toc()
#%%
|
the-stack_0_6422 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines test inputs and invocations for JAX primitives.
Used to test various implementations of JAX primitives, e.g., against
NumPy (lax_reference) or TensorFlow.
"""
import operator
from typing import Any, Callable, Dict, Iterable, Optional, NamedTuple, Sequence, Tuple, Union
from functools import partial
from absl import testing
import jax
from jax import config
from jax import dtypes
from jax import test_util as jtu
from jax import lax
from jax import lax_linalg
from jax import numpy as jnp
from jaxlib import xla_client
import numpy as np
FLAGS = config.FLAGS
Rng = Any # A random number generator
class RandArg(NamedTuple):
"""Descriptor for a randomly generated argument.
See description of `Harness`.
"""
shape: Tuple[int, ...]
dtype: np.dtype
class StaticArg(NamedTuple):
"""Descriptor for a static argument.
See description of `Harness`.
"""
value: Any
class Harness:
"""Specifies inputs and callable for a primitive.
A harness is conceptually a callable and a list of arguments, that together
exercise a use case. The harness can optionally have additional parameters
that can be used by the test.
The arguments are specified through argument descriptors. An argument
descriptor can be:
* a numeric value or ndarray, or
* an instance of ``RandArg(shape, dtype)`` to be used with a PRNG to generate
random tensor of the given shape and type, or
* an instance of ``StaticArg(value)``. These are values that specialize the
callable, but are not exposed as external arguments.
For example, a harness for ``lax.take(arr, indices, axis=None)`` may want
to expose as external (dynamic) argument the array and the indices, and
keep the axis as a static argument (technically specializing the `take` to
a axis):
Harness(f"take_axis={axis}",
lax.take,
[RandArg((2, 4), np.float32), np.array([-1, 0, 1]), StaticArg(axis)],
axis=axis)
"""
# Descriptive name of the harness, used as a testcase_name. Unique in a group.
name: str
# The function taking all arguments (static and dynamic).
fun: Callable
arg_descriptors: Sequence[Union[RandArg, StaticArg, Any]]
rng_factory: Callable
params: Dict[str, Any]
def __init__(self, name, fun, arg_descriptors, *,
rng_factory=jtu.rand_default, **params):
self.name = name
self.fun = fun
self.arg_descriptors = arg_descriptors
self.rng_factory = rng_factory
self.params = params
def __str__(self):
return self.name
def _arg_maker(self, arg_descriptor, rng: Rng):
if isinstance(arg_descriptor, StaticArg):
return arg_descriptor.value
if isinstance(arg_descriptor, RandArg):
return self.rng_factory(rng)(arg_descriptor.shape, arg_descriptor.dtype)
return arg_descriptor
def args_maker(self, rng: Rng) -> Sequence:
"""All-argument maker, including the static ones."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors]
def dyn_args_maker(self, rng: Rng) -> Sequence:
"""A dynamic-argument maker, for use with `dyn_fun`."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors
if not isinstance(ad, StaticArg)]
def dyn_fun(self, *dyn_args):
"""Invokes `fun` given just the dynamic arguments."""
all_args = self._args_from_dynargs(dyn_args)
return self.fun(*all_args)
def _args_from_dynargs(self, dyn_args: Sequence) -> Sequence:
"""All arguments, including the static ones."""
next_dynamic_argnum = 0
all_args = []
for ad in self.arg_descriptors:
if isinstance(ad, StaticArg):
all_args.append(ad.value)
else:
all_args.append(dyn_args[next_dynamic_argnum])
next_dynamic_argnum += 1
return all_args
def parameterized(harness_group: Iterable[Harness],
one_containing : Optional[str] = None):
"""Decorator for tests.
The tests receive a `harness` argument.
The `one_containing` parameter is useful for debugging. If given, then
picks only one harness whose name contains the string. The whole set of
parameterized tests is reduced to one test, whose name is not decorated
to make it easier to pick for running.
"""
cases = tuple(
dict(testcase_name=harness.name if one_containing is None else "",
harness=harness)
for harness in harness_group
if one_containing is None or one_containing in harness.name)
if one_containing is not None:
if not cases:
raise ValueError(f"Cannot find test case with name containing {one_containing}."
"Names are:"
"\n".join([harness.name for harness in harness_group]))
cases = cases[0:1]
return testing.parameterized.named_parameters(*cases)
### Harness definitions ###
###
_LAX_UNARY_ELEMENTWISE = (
lax.abs, lax.acosh, lax.asinh, lax.atanh, lax.bessel_i0e, lax.bessel_i1e,
lax.ceil, lax.cos, lax.cosh, lax.digamma, lax.erf, lax.erf_inv, lax.erfc,
lax.exp, lax.expm1, lax.floor, lax.is_finite, lax.lgamma, lax.log,
lax.log1p, lax.neg, lax.round, lax.rsqrt, lax.sign, lax.sin, lax.sinh,
lax.sqrt, lax.tan, lax.tanh)
lax_unary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_UNARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg in [
np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype)
]
)
lax_bitwise_not = tuple(
[Harness(f"{jtu.dtype_str(dtype)}",
lax.bitwise_not,
[arg],
dtype=dtype)
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg in [
np.array([-1, -3, -2, 0, 0, 2, 1, 3], dtype=dtype),
]] +
[Harness("bool",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_not]
for arg in [
np.array([True, False])
]]
)
lax_population_count = tuple(
Harness(f"{jtu.dtype_str(dtype)}",
lax.population_count,
[arg],
dtype=dtype)
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg in [
np.array([-1, -2, 0, 1], dtype=dtype)
]
)
def _get_max_identity(dtype):
if dtypes.issubdtype(dtype, np.inexact):
return np.array(-np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(False, np.bool_)
def _get_min_identity(dtype):
if dtypes.issubdtype(dtype, np.inexact):
return np.array(np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(True, np.bool_)
lax_add_mul = tuple(
Harness(f"fun={f_jax.__name__}_{jtu.dtype_str(dtype)}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.add, lax.mul]
for dtype in filter(lambda t: t != np.bool_, jtu.dtypes.all)
for lhs, rhs in [
(np.array([1, 2], dtype=dtype), np.array([3, 4], dtype=dtype))
]
) + tuple(
Harness(f"fun={f_jax.__name__}_bounds_{jtu.dtype_str(dtype)}",
f_jax,
[StaticArg(lhs), StaticArg(rhs)],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.add, lax.mul]
for dtype in filter(lambda t: t != np.bool_, jtu.dtypes.all)
for lhs, rhs in [
(np.array([3, 3], dtype=dtype),
np.array([_get_max_identity(dtype), _get_min_identity(dtype)], dtype=dtype))
]
)
lax_min_max = tuple(
Harness(f"fun={f_jax.__name__}_{jtu.dtype_str(dtype)}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.min, lax.max]
for dtype in jtu.dtypes.all
for lhs, rhs in [
(np.array([1, 2], dtype=dtype), np.array([3, 4], dtype=dtype))
]
) + tuple(
Harness(f"fun={f_jax.__name__}_inf_nan_{jtu.dtype_str(dtype)}_{lhs[0]}_{rhs[0]}",
f_jax,
[StaticArg(lhs), StaticArg(rhs)],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.min, lax.max]
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for lhs, rhs in [
(np.array([np.inf], dtype=dtype), np.array([np.nan], dtype=dtype)),
(np.array([-np.inf], dtype=dtype), np.array([np.nan], dtype=dtype))
]
)
_LAX_BINARY_ELEMENTWISE = (
lax.add, lax.atan2, lax.div, lax.igamma, lax.igammac, lax.max, lax.min,
lax.nextafter, lax.rem, lax.sub)
lax_binary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype
)
for f_lax in _LAX_BINARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg1, arg2 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.1, 0.2, 1., 1.4, -1.6], dtype=dtype))
]
)
_LAX_BINARY_ELEMENTWISE_LOGICAL = (
lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor, lax.shift_left,
)
lax_binary_elementwise_logical = tuple(
[Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_BINARY_ELEMENTWISE_LOGICAL
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg1, arg2 in [
(np.array([1, 3, 2, 0, 0, 2, 1, 3], dtype=dtype),
np.array([1, 2, 3, 0, 1, 0, 2, 3], dtype=dtype))
]
] +
[Harness(f"{f_lax.__name__}_bool",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor]
for arg1, arg2 in [
(np.array([True, True, False, False]),
np.array([True, False, True, False])),
]
]
)
lax_betainc = tuple(
Harness(f"_{jtu.dtype_str(dtype)}",
lax.betainc,
[arg1, arg2, arg3],
dtype=dtype)
for dtype in jtu.dtypes.all_floating
for arg1, arg2, arg3 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.3, 1, 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.2, 0.1, 1, 1.4, -1.6], dtype=dtype),
np.array([1.0, -1.0, 2.0, 1.0, 0.3, 0.3, -1.0, 2.4, 1.6], dtype=dtype))
]
)
_gather_input = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
lax_gather = tuple(
# Construct gather harnesses using take
[Harness(f"from_take_indices_shape={indices.shape}_axis={axis}",
lambda a, i, axis: jnp.take(a, i, axis=axis),
[_gather_input,
indices,
StaticArg(axis)])
for indices in [
# Ensure each set of indices has a distinct shape
np.array(2, dtype=np.int32),
np.array([2], dtype=np.int32),
np.array([2, 4], dtype=np.int32),
np.array([[2, 4], [5, 6]], dtype=np.int32),
np.array([0, 1, 10], dtype=np.int32), # Index out of bounds
np.array([0, 1, 2, -1], dtype=np.int32), # Index out of bounds
]
for axis in [0, 1, 2]] +
# Directly from lax.gather in lax_test.py.
[Harness(
f"_shape={shape}_idxs_shape={idxs.shape}_dnums={dnums}_slice_sizes={slice_sizes}",
lambda op, idxs, dnums, slice_sizes: lax.gather(op, idxs, dimension_numbers=dnums, slice_sizes=slice_sizes),
[RandArg(shape, np.float32),
idxs, StaticArg(dnums), StaticArg(slice_sizes)])
for shape, idxs, dnums, slice_sizes in [
((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
((10, 5), np.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
]
)
lax_scatter = tuple(
# Directly from lax.scatter in tests/lax_test.py
Harness(
f"fun={f_lax.__name__}_shape={jtu.format_shape_dtype_string(shape, dtype)}_scatterindices={scatter_indices.tolist()}_updateshape={update_shape}_updatewindowdims={dimension_numbers.update_window_dims}_insertedwindowdims={dimension_numbers.inserted_window_dims}_scatterdimstooperanddims={dimension_numbers.scatter_dims_to_operand_dims}_indicesaresorted={indices_are_sorted}_uniqueindices={unique_indices}".replace(' ', ''),
partial(f_lax, indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices),
[RandArg(shape, dtype), StaticArg(scatter_indices),
RandArg(update_shape, dtype), StaticArg(dimension_numbers)],
f_lax=f_lax,
shape=shape,
dtype=dtype,
scatter_indices=scatter_indices,
update_shape=update_shape,
dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
# We explicitly decide against testing lax.scatter, as its reduction function
# is lambda x, y: y, which is not commutative and thus makes results
# non-deterministic when an index into the operand is updated several times.
for f_lax in [lax.scatter_min, lax.scatter_max, lax.scatter_mul,
lax.scatter_add]
for dtype in { lax.scatter_min: jtu.dtypes.all
, lax.scatter_max: jtu.dtypes.all
# lax.scatter_mul and lax.scatter_add are not compatible with
# np.bool_ operands.
, lax.scatter_mul: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.scatter_add: filter(lambda t: t != np.bool_, jtu.dtypes.all)
}[f_lax]
for shape, scatter_indices, update_shape, dimension_numbers in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for indices_are_sorted in [False, True]
# `unique_indices` does not affect correctness, only performance, and thus
# does not need to be tested here. If/when it will make sense to add a test
# with `unique_indices` = True, particular care will have to be taken with
# regards to the choice of parameters, as the results are only predictable
# when all the indices to be updated are pairwise non-overlapping. Identifying
# such cases is non-trivial.
for unique_indices in [False]
)
lax_pad = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_pads={pads}",
lax.pad,
[RandArg(arg_shape, dtype), np.array(0, dtype), StaticArg(pads)],
rng_factory=jtu.rand_small,
arg_shape=arg_shape, dtype=dtype, pads=pads)
for arg_shape in [(2, 3)]
for dtype in jtu.dtypes.all
for pads in [
[(0, 0, 0), (0, 0, 0)], # no padding
[(1, 1, 0), (2, 2, 0)], # only positive edge padding
[(1, 2, 1), (0, 1, 0)], # edge padding and interior padding
[(0, 0, 0), (-1, -1, 0)], # negative padding
[(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges
[(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension
]
)
lax_top_k = tuple( # random testing
Harness(f"_inshape={jtu.format_shape_dtype_string(shape, dtype)}_k={k}",
lax.top_k,
[RandArg(shape, dtype), StaticArg(k)],
shape=shape,
dtype=dtype,
k=k)
for dtype in jtu.dtypes.all
for shape in [(3,), (5, 3)]
for k in [-1, 1, 3, 4]
for rng_factory in [jtu.rand_default]
) + tuple( # stability test
Harness(f"stability_inshape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_k={k}",
lax.top_k,
[arr, StaticArg(k)],
shape=arr.shape,
dtype=arr.dtype,
k=k)
for arr in [
np.array([5, 7, 5, 8, 8, 5], dtype=np.int32)
]
for k in [1, 3, 6]
) + tuple( # nan/inf sorting test
Harness(f"nan_inshape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_k={k}",
lax.top_k,
[arr, StaticArg(k)],
shape=arr.shape,
dtype=arr.dtype,
k=k)
for arr in [
np.array([+np.inf, np.nan, -np.nan, np.nan, -np.inf, 3], dtype=np.float32)
]
for k in [1, 3, 6]
)
lax_sort = tuple( # one array, random data, all axes, all dtypes
Harness(f"one_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lax.sort,
[RandArg(shape, dtype), StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,), (5, 7)]
for dimension in range(len(shape))
for is_stable in [False, True]
) + tuple( # one array, potential edge cases
Harness(f"one_special_array_shape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_axis={dimension}_isstable={is_stable}",
lax.sort,
[arr, StaticArg(dimension), StaticArg(is_stable)],
shape=arr.shape,
dimension=dimension,
dtype=arr.dtype,
is_stable=is_stable)
for arr, dimension in [
[np.array([+np.inf, np.nan, -np.nan, -np.inf, 2, 4, 189], dtype=np.float32), -1]
]
for is_stable in [False, True]
) + tuple( # 2 arrays, random data, all axes, all dtypes
Harness(f"two_arrays_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lambda *args: lax.sort_p.bind(*args[:-2], dimension=args[-2], is_stable=args[-1], num_keys=1),
[RandArg(shape, dtype), RandArg(shape, dtype), StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,), (5, 7)]
for dimension in range(len(shape))
for is_stable in [False, True]
) + tuple( # 3 arrays, random data, all axes, all dtypes
Harness(f"three_arrays_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lambda *args: lax.sort_p.bind(*args[:-2], dimension=args[-2], is_stable=args[-1], num_keys=1),
[RandArg(shape, dtype), RandArg(shape, dtype), RandArg(shape, dtype),
StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,)]
for dimension in (0,)
for is_stable in [False, True]
)
lax_linalg_qr = tuple(
Harness(f"multi_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_fullmatrices={full_matrices}",
lax_linalg.qr,
[RandArg(shape, dtype), StaticArg(full_matrices)],
shape=shape,
dtype=dtype,
full_matrices=full_matrices)
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)]
for full_matrices in [False, True]
)
def _fft_harness_gen(nb_axes):
def _fft_rng_factory(dtype):
_all_integers = jtu.dtypes.all_integer + jtu.dtypes.all_unsigned + jtu.dtypes.boolean
# For integer types, use small values to keep the errors small
if dtype in _all_integers:
return jtu.rand_small
else:
return jtu.rand_default
return tuple(
Harness(f"{nb_axes}d_shape={jtu.format_shape_dtype_string(shape, dtype)}_ffttype={fft_type}_fftlengths={fft_lengths}",
lax.lax_fft.fft,
[RandArg(shape, dtype), StaticArg(fft_type), StaticArg(fft_lengths)],
rng_factory=_fft_rng_factory(dtype),
shape=shape,
dtype=dtype,
fft_type=fft_type,
fft_lengths=fft_lengths)
for dtype in jtu.dtypes.all
for shape in filter(lambda x: len(x) >= nb_axes,
[(10,), (12, 13), (14, 15, 16), (14, 15, 16, 17)])
for fft_type, fft_lengths in [(xla_client.FftType.FFT, shape[-nb_axes:]),
(xla_client.FftType.IFFT, shape[-nb_axes:]),
(xla_client.FftType.RFFT, shape[-nb_axes:]),
(xla_client.FftType.IRFFT,
shape[-nb_axes:-1] + ((shape[-1] - 1) * 2,))]
if not (dtype in jtu.dtypes.complex and fft_type == xla_client.FftType.RFFT)
)
lax_fft = tuple(_fft_harness_gen(1) + _fft_harness_gen(2) + _fft_harness_gen(3) +
_fft_harness_gen(4))
lax_linalg_svd = tuple(
Harness(f"shape={jtu.format_shape_dtype_string(shape, dtype)}_fullmatrices={full_matrices}_computeuv={compute_uv}",
lambda *args: lax_linalg.svd_p.bind(args[0], full_matrices=args[1],
compute_uv=args[2]),
[RandArg(shape, dtype), StaticArg(full_matrices), StaticArg(compute_uv)],
shape=shape,
dtype=dtype,
full_matrices=full_matrices,
compute_uv=compute_uv)
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for shape in [(2, 2), (2, 7), (29, 29), (2, 3, 53), (2, 3, 29, 7)]
for full_matrices in [False, True]
for compute_uv in [False, True]
)
lax_slice = tuple(
Harness(f"_shape={shape}_start_indices={start_indices}_limit_indices={limit_indices}_strides={strides}", # type: ignore
lax.slice,
[RandArg(shape, dtype), # type: ignore
StaticArg(start_indices), # type: ignore
StaticArg(limit_indices), # type: ignore
StaticArg(strides)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
limit_indices=limit_indices) # type: ignore
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
# out-of-bounds cases
[(5,), (-1,), (0,), None],
[(5,), (-1,), (1,), None],
[(5,), (-4,), (-2,), None],
[(5,), (-5,), (-2,), None],
[(5,), (-6,), (-5,), None],
[(5,), (-10,), (-9,), None],
[(5,), (-100,), (-99,), None],
[(5,), (5,), (6,), None],
[(5,), (10,), (11,), None],
[(5,), (0,), (100,), None],
[(5,), (3,), (6,), None]
]
for dtype in [np.float32]
)
# Use lax_slice, but (a) make the start_indices dynamic arg, and (b) no strides.
lax_dynamic_slice = [
Harness(harness.name,
lax.dynamic_slice,
[harness.arg_descriptors[0],
np.array(list(start_indices)),
StaticArg(tuple(map(operator.sub, limit_indices, start_indices)))],
**harness.params)
for harness in lax_slice
for start_indices in [harness.params["start_indices"]]
for limit_indices in [harness.params["limit_indices"]]
]
lax_dynamic_update_slice = tuple(
Harness((f"_operand={jtu.format_shape_dtype_string(shape, dtype)}" # type: ignore
f"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}"
f"_start_indices={start_indices}"),
lax.dynamic_update_slice,
[RandArg(shape, dtype), # type: ignore
RandArg(update_shape, update_dtype), # type: ignore
np.array(start_indices)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
update_shape=update_shape) # type: ignore
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
[(3,), (-1,), (1,)], # out-of-bounds
[(3,), (10,), (1,)], # out-of-bounds
[(3,), (10,), (4,)], # out-of-bounds shape too big
[(3,), (10,), (2,)], # out-of-bounds
]
for dtype, update_dtype in [
(np.float32, np.float32),
(np.float64, np.float64)
])
lax_squeeze = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_dimensions={dimensions}", # type: ignore
lax.squeeze,
[RandArg(arg_shape, dtype), StaticArg(dimensions)], # type: ignore[has-type]
arg_shape=arg_shape, dtype=dtype, dimensions=dimensions) # type: ignore[has-type]
for arg_shape, dimensions in [
[(1,), (0,)],
[(1,), (-1,)],
[(2, 1, 4), (1,)],
[(2, 1, 4), (-2,)],
[(2, 1, 3, 1), (1,)],
[(2, 1, 3, 1), (1, 3)],
[(2, 1, 3, 1), (3,)],
[(2, 1, 3, 1), (1, -1)],
]
for dtype in [np.float32]
)
shift_inputs = [
(arg, dtype, shift_amount)
for dtype in jtu.dtypes.all_unsigned + jtu.dtypes.all_integer
for arg in [
np.array([-250, -1, 0, 1, 250], dtype=dtype),
]
for shift_amount in [0, 1, 2, 3, 7]
]
lax_shift_left = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_left,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))])
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_logical = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_logical,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))],
dtype=dtype)
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_arithmetic = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_arithmetic,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))],
dtype=dtype)
for arg, dtype, shift_amount in shift_inputs
)
lax_select_and_gather_add = tuple(
# Tests with 2d shapes (see tests.lax_autodiff_test.testReduceWindowGrad)
Harness(f"2d_shape={jtu.format_shape_dtype_string(shape, dtype)}_selectprim={select_prim}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}",
lax._select_and_gather_add,
[RandArg(shape, dtype), RandArg(shape, dtype), StaticArg(select_prim),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation),
StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for dtype in jtu.dtypes.all_floating
for shape in [(4, 6)]
for select_prim in [lax.le_p, lax.ge_p]
for window_dimensions in [(2, 1), (1, 2)]
for window_strides in [(1, 1), (2, 1), (1, 2)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 3), (1, 2))]))
for base_dilation in [(1, 1)]
for window_dilation in [(1, 1)]
) + tuple(
# Tests with 4d shapes (see tests.lax_autodiff_test.testReduceWindowGrad)
Harness(f"4d_shape={jtu.format_shape_dtype_string(shape, dtype)}_selectprim={select_prim}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}",
lax._select_and_gather_add,
[RandArg(shape, dtype), RandArg(shape, dtype), StaticArg(select_prim),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation),
StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for dtype in jtu.dtypes.all_floating
for shape in [(3, 2, 4, 6)]
for select_prim in [lax.le_p, lax.ge_p]
for window_dimensions in [(1, 1, 2, 1), (2, 1, 2, 1)]
for window_strides in [(1, 2, 2, 1), (1, 1, 1, 1)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 1), (1, 0), (2, 3), (0, 2))]))
for base_dilation in [(1, 1, 1, 1)]
for window_dilation in [(1, 1, 1, 1)]
)
lax_reduce_window = tuple(
# Tests with 2d shapes (see tests.lax_test.testReduceWindow)
Harness(f"2d_shape={jtu.format_shape_dtype_string(shape, dtype)}_initvalue={init_value}_computation={computation.__name__}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}".replace(' ', ''),
lax.reduce_window,
[RandArg(shape, dtype), StaticArg(init_value), StaticArg(computation),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation), StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
init_value=init_value,
computation=computation,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for computation in [lax.add, lax.max, lax.min, lax.mul]
for dtype in { lax.add: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.mul: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.max: jtu.dtypes.all
, lax.min: jtu.dtypes.all
}[computation]
for init_value in map(
dtype,
(lambda ts: ts[0] if not dtype in jtu.dtypes.all_floating else ts[1])(
{ lax.add: ([0, 1], [0, 1])
, lax.mul: ([1], [1])
, lax.max: ([1], [-np.inf, 1])
, lax.min: ([0], [np.inf, 0])
}[computation]
)
)
for shape in [(4, 6)]
for window_dimensions in [(1, 2)]
for window_strides in [(2, 1)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 3), (1, 2))]))
for base_dilation in [(2, 3)]
for window_dilation in [(1, 2)]
) + tuple(
# Tests with 4d shapes (see tests.lax_test.testReduceWindow)
Harness(f"4d_shape={jtu.format_shape_dtype_string(shape, dtype)}_initvalue={init_value}_computation={computation.__name__}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}".replace(' ', ''),
lax.reduce_window,
[RandArg(shape, dtype), StaticArg(init_value), StaticArg(computation),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation), StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
init_value=init_value,
computation=computation,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for computation in [lax.add, lax.max, lax.min, lax.mul]
for dtype in { lax.add: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.mul: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.max: jtu.dtypes.all
, lax.min: jtu.dtypes.all
}[computation]
for init_value in map(
dtype,
(lambda ts: ts[0] if not dtype in jtu.dtypes.all_floating else ts[1])(
{ lax.add: ([0, 1], [0, 1])
, lax.mul: ([1], [1])
, lax.max: ([1], [-np.inf, 1])
, lax.min: ([0], [np.inf, 0])
}[computation]
)
)
for shape in [(3, 2, 4, 6)]
for window_dimensions in [(1, 1, 2, 1)]
for window_strides in [(1, 2, 2, 1)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 1), (1, 0), (2, 3), (0, 2))]))
for base_dilation in [(2, 1, 3, 2)]
for window_dilation in [(1, 2, 2, 1)]
)
random_gamma = tuple(
Harness(f"_shape={jtu.format_shape_dtype_string(shape, dtype)}",
jax.jit(jax.random.gamma),
[np.array([42, 43], dtype=np.uint32), RandArg(shape, dtype)])
for shape in ((), (3,))
for dtype in (np.float32, np.float64)
)
random_split = tuple(
Harness(f"_i={key_i}",
jax.jit(lambda key: jax.random.split(key, 2)),
[key])
for key_i, key in enumerate([np.array([0, 0], dtype=np.uint32),
np.array([42, 43], dtype=np.uint32),
np.array([0xFFFFFFFF, 0], dtype=np.uint32),
np.array([0, 0xFFFFFFFF], dtype=np.uint32),
np.array([0xFFFFFFFF, 0xFFFFFFFF], dtype=np.uint32)])
)
def _make_conv_harness(name, *, lhs_shape=(2, 3, 9, 10), rhs_shape=(3, 3, 4, 5),
dtype=np.float32, window_strides=(1, 1), precision=None,
padding=((0, 0), (0, 0)), lhs_dilation=(1, 1),
rhs_dilation=(1, 1), feature_group_count=1,
dimension_numbers=("NCHW", "OIHW", "NCHW"),
batch_group_count=1):
return Harness(f"_{name}_lhs={jtu.format_shape_dtype_string(lhs_shape, dtype)}_rhs={jtu.format_shape_dtype_string(rhs_shape, dtype)}_windowstrides={window_strides}_padding={padding}_lhsdilation={lhs_dilation}_rhsdilation={rhs_dilation}_dimensionnumbers={dimension_numbers}_featuregroupcount={feature_group_count}_batchgroupcount={batch_group_count}_precision={precision}".replace(' ', ''),
lax.conv_general_dilated,
[RandArg(lhs_shape, dtype), RandArg(rhs_shape, dtype),
StaticArg(window_strides), StaticArg(padding),
StaticArg(lhs_dilation), StaticArg(rhs_dilation),
StaticArg(dimension_numbers), StaticArg(feature_group_count),
StaticArg(batch_group_count), StaticArg(precision)],
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
dtype=dtype,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
lax_conv_general_dilated = tuple( # Validate dtypes and precision
# This first harness runs the tests for all dtypes and precisions using
# default values for all the other parameters. Variations of other parameters
# can thus safely skip testing their corresponding default value.
_make_conv_harness("dtype_precision", dtype=dtype, precision=precision)
for dtype in jtu.dtypes.all_inexact
for precision in [None, lax.Precision.DEFAULT, lax.Precision.HIGH,
lax.Precision.HIGHEST]
) + tuple( # Validate variations of feature_group_count and batch_group_count
_make_conv_harness("group_counts", lhs_shape=lhs_shape, rhs_shape=rhs_shape,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count)
for batch_group_count, feature_group_count in [
(1, 2), # feature_group_count != 1
(2, 1), # batch_group_count != 1
]
for lhs_shape, rhs_shape in [
((2 * batch_group_count, 3 * feature_group_count, 9, 10),
(3 * feature_group_count * batch_group_count, 3, 4, 5))
]
) + tuple( # Validate variations of window_strides
_make_conv_harness("window_strides", window_strides=window_strides)
for window_strides in [
(2, 3) # custom window
]
) + tuple( # Validate variations of padding
_make_conv_harness("padding", padding=padding)
for padding in [
((1, 2), (0, 0)), # padding only one spatial axis
((1, 2), (2, 1)) # padding on both spatial axes
]
) + tuple( # Validate variations of dilations
_make_conv_harness("dilations", lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation)
for lhs_dilation, rhs_dilation in [
((2, 2), (1, 1)), # dilation only on LHS (transposed)
((1, 1), (2, 3)), # dilation only on RHS (atrous)
((2, 3), (3, 2)) # dilation on both LHS and RHS (transposed & atrous)
]
) + tuple(
_make_conv_harness("dimension_numbers", lhs_shape=lhs_shape,
rhs_shape=rhs_shape, dimension_numbers=dimension_numbers)
# Dimension numbers and corresponding permutation
for dimension_numbers, lhs_shape, rhs_shape in [
(("NHWC", "HWIO", "NHWC"), (2, 9, 10, 3), (4, 5, 3, 3)), # TF default
(("NCHW", "HWIO", "NHWC"), (2, 3, 9, 10), (4, 5, 3, 3)), # custom
]
)
|
the-stack_0_6423 | # coding: utf-8
import toml
import logging
import argparse
from laputa.watch import Watcher
from laputa.record import Recorder
from laputa.notify import IFTTTNotifier
def read_config(file_name):
with open(file_name) as config_file:
config = toml.loads(config_file.read())
return config
def parse():
parser = argparse.ArgumentParser(description='Laputa, flying in the sky')
parser.add_argument('-c', metavar='CONFIG_FILE',
required=True, help='config file')
return parser.parse_args()
def main():
args = parse()
config = read_config(args.c)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
handler = logging.FileHandler(config['run']['log_file'])
handler.setLevel(logging.INFO)
logging_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(logging_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
watcher = Watcher(config['laputa']['weibo_uid'],
Recorder(config['run']['record_file']),
IFTTTNotifier(config['laputa']['ifttt_key'],
config['laputa']['ifttt_event']))
watcher.watch()
|
the-stack_0_6425 | import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer, FixedForwardWindowIndexer
from pandas.core.window.indexers import ExpandingIndexer
def test_bad_get_window_bounds_signature():
class BadIndexer(BaseIndexer):
def get_window_bounds(self):
return None
indexer = BadIndexer()
with pytest.raises(ValueError, match="BadIndexer does not implement"):
Series(range(5)).rolling(indexer)
def test_expanding_indexer():
s = Series(range(10))
indexer = ExpandingIndexer()
result = s.rolling(indexer).mean()
expected = s.expanding().mean()
tm.assert_series_equal(result, expected)
def test_indexer_constructor_arg():
# Example found in computation.rst
use_expanding = [True, False, True, False, True]
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = i + 1
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
result = df.rolling(indexer).sum()
expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})
tm.assert_frame_equal(result, expected)
def test_indexer_accepts_rolling_args():
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if center and min_periods == 1 and closed == "both" and i == 2:
start[i] = 0
end[i] = num_values
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1)
result = df.rolling(indexer, center=True, min_periods=1, closed="both").sum()
expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})
tm.assert_frame_equal(result, expected)
def test_win_type_not_implemented():
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
return np.array([0, 1]), np.array([1, 2])
df = DataFrame({"values": range(2)})
indexer = CustomIndexer()
with pytest.raises(NotImplementedError, match="BaseIndexer subclasses not"):
df.rolling(indexer, win_type="boxcar")
@pytest.mark.parametrize("func", ["skew", "cov", "corr"])
def test_notimplemented_functions(func):
# GH 32865
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
return np.array([0, 1]), np.array([1, 2])
df = DataFrame({"values": range(2)})
indexer = CustomIndexer()
with pytest.raises(NotImplementedError, match=f"{func} is not supported"):
getattr(df.rolling(indexer), func)()
@pytest.mark.parametrize("constructor", [Series, DataFrame])
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {},),
("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {},),
(
"max",
np.max,
[2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan],
{},
),
(
"std",
np.std,
[
1.0,
1.0,
1.0,
55.71654452,
54.85739087,
53.9845657,
1.0,
1.0,
0.70710678,
np.nan,
],
{"ddof": 1},
),
(
"var",
np.var,
[
1.0,
1.0,
1.0,
3104.333333,
3009.333333,
2914.333333,
1.0,
1.0,
0.500000,
np.nan,
],
{"ddof": 1},
),
],
)
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
values = np.arange(10)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
match = "Forward-looking windows can't have center=True"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, center=True)
result = getattr(rolling, func)()
match = "Forward-looking windows don't support setting the closed argument"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, closed="right")
result = getattr(rolling, func)()
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
expected = constructor(expected)
tm.assert_equal(result, expected)
expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result, expected2)
|
the-stack_0_6426 | #@+leo-ver=5-thin
#@+node:ekr.20101110092851.5742: * @file leoOPML.py
#@+<< docstring >>
#@+node:ekr.20060904103412.1: ** << docstring >>
#@@language rest
r'''A plugin to read and write Leo outlines in .opml
(http://en.wikipedia.org/wiki/OPML) format.
The OPML plugin creates two new commands that read and write Leo outlines in
OPML format. The read-opml-file command creates a Leo outline from an .opml
file. The write-opml-file command writes the present Leo outline to an .opml
file.
Various settings control what gets written to .opml files, and in what format.
As usual, you specify settings for the OPML plugin using leoSettings.leo. The
settings for the OPML are found in the node: @settings-->Plugins-->opml plugin.
Here are the settings that control the format of .opml files. The default values
are shown.
- @string opml_namespace = leo:com:leo-opml-version-1
The namespace urn for the xmlns attribute of <opml> elements. This value
typically is not used, but it should refer to Leo in some way.
- @bool opml_use_outline_elements = True
- If True, Leo writes body text to <leo:body> elements nested in <outline>
elements. Otherwise, Leo writes body text to leo:body attributes of <outline>
elements.
- @string opml_version = 2.0
The opml version string written to the <OPML> element. Use 2.0 unless there is a
specific reason to use 1.0.
- @bool opml_write_body_text = True
Leo writes body text to the OPML file only if this is True.
- @bool opml_write_leo_details = True
If True, Leo writes the native attributes of Leo's <v> elements as attributes of
the opml <outline> elements.
The native attributes of <v> elements are a, t, vtag (new), tnodeList,
marks, expanded and descendentTnodeUnknownAttributes.
- @bool opml_write_leo_globals_attributes = True
If True, Leo writes body_outline_ratio` and global_window_position attributes to
the <head> element of the .opml file.
- @bool opml_write_ua_attributes
If True, write unknownAttributes **NOTE**: ua_attributes are not currently read
from opml.
- @bool opml_expand_ua_dictionary
If True, expand an unknownAttriubte 'x' of type dict to 'ua_x_key0', 'ua_x_key1'
etc. **WARNING**: using this feature may prevent reading these ua_attributes from
opml, if that feature is implemented in the future.
- @bool opml_skip_ua_dictionary_blanks
If True, when expanding as above, skip blank dict entries.
'''
#@-<< docstring >>
# 2014/10/21: support Android outliner by treating _note attributes as body text.
# To do: read/write uA's.
printElements = [] # ['all','outline','head','body',]
# For traces.
#@+<< imports >>
#@+node:ekr.20060904103412.3: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoPlugins as leoPlugins
import leo.core.leoNodes as leoNodes
import xml.sax
import xml.sax.saxutils
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
#@-<< imports >>
#@+others
#@+node:ekr.20060904132527.9: ** Module level
#@+node:ekr.20060904103412.4: *3* init
def init():
'''Return True if the plugin has loaded successfully.'''
leoPlugins.registerHandler(('open2', 'new'), onCreate)
g.plugin_signon(__name__)
return True
#@+node:ekr.20060904103412.5: *3* onCreate
def onCreate(tag, keys):
c = keys.get('c')
if c:
c.opmlController = OpmlController(c)
#@+node:ekr.20060904141220: ** class NodeClass
class NodeClass:
'''
A class representing one outline element.
Use getters to access the attributes, properties and rules of this mode.
'''
#@+others
#@+node:ekr.20060904141220.1: *3* node.__init__
def __init__(self):
self.attributes = {}
self.bodyString = ''
self.headString = ''
self.children = []
self.gnx = None
#@+node:ekr.20060904141220.2: *3* node.__str__ & __repr__
def __str__(self):
return '<node: %s>' % self.headString
__repr__ = __str__
#@+node:ekr.20060913220507: *3* dump
def dump(self):
print('\nnode: %s: %s' % (self.gnx, self.headString))
if self.children:
print('children:[')
for child in self.children:
print(' node: %s: %s' % (child.gnx, child.headString))
print(']')
else:
print('children:[]')
print('attrs: %s' % self.attributes.values())
#@-others
#@+node:ekr.20060904103412.6: ** class OpmlController
class OpmlController:
'''The controller class for this plugin.'''
#@+others
#@+node:ekr.20060904103412.7: *3* oc.__init__& reloadSettings
def __init__(self, c):
'''Ctor for OpmlController class.'''
self.c = c
c.opmlCommands = self
c.k.registerCommand('read-opml-file', self.readOpmlCommand)
c.k.registerCommand('write-opml-file', self.writeOpmlCommand)
self.currentVnode = None
self.topVnode = None
self.generated_gnxs = {} # Keys are gnx's (strings). Values are vnodes.
self.reloadSettings()
def reloadSettings(self):
c = self.c
c.registerReloadSettings(self)
self.opml_read_derived_files = c.config.getBool('opml-read-derived-files')
self.opml_write_derived_files = c.config.getBool('opml-write-derived-files')
#@+node:ekr.20060914163456: *3* oc.createVnodes & helpers
def createVnodes(self, c, dummyRoot):
'''**Important**: this method and its helpers are low-level code
corresponding to link/unlink methods in leoNodes.py.
Modify this with extreme care.'''
self.generated_gnxs = {}
parent_v = c.hiddenRootNode
parent_v.children = []
children = self.createChildren(c, dummyRoot, parent_v)
assert c.hiddenRootNode.children == children
return children
#@+node:ekr.20060914171659.2: *4* oc.createChildren
# node is a NodeClass object, parent_v is a VNode.
def createChildren(self, c, node, parent_v):
children = []
for child in node.children:
gnx = child.gnx
v = gnx and self.generated_gnxs.get(gnx)
if not v:
v = self.createVnode(c, child, v)
self.createChildren(c, child, v)
children.append(v)
parent_v.children = children
for child in children:
child.parents.append(parent_v)
return children
#@+node:ekr.20060914171659.1: *4* oc.createVnode & helpers
def createVnode(self, c, node, v=None):
if not v:
v = leoNodes.VNode(context=c)
v.b, v.h = node.bodyString, node.headString
if node.gnx:
ni = g.app.nodeIndices
v.fileIndex = ni.tupleToString(ni.scanGnx(node.gnx))
self.generated_gnxs[node.gnx] = v
self.handleVnodeAttributes(node, v)
return v
#@+node:ekr.20060917213611: *5* oc.handleVnodeAttributes
def handleVnodeAttributes(self, node, v):
a = node.attributes.get('leo:a')
if a:
# 'C' (clone) and 'D' bits are not used.
if 'M' in a: v.setMarked()
if 'E' in a: v.expand()
# if 'O' in a: v.setOrphan()
if 'T' in a: self.topVnode = v
if 'V' in a: self.currentVnode = v
if 0: # Leo no longer uses the tnodeList.
s = node.attributes.get('leo:tnodeList')
tnodeList = s and s.split(',')
if tnodeList:
# This tnode list will be resolved later.
v.tempTnodeList = tnodeList
#@+node:ekr.20060913220707: *3* oc.dumpTree
def dumpTree(self, root, dummy=True):
if not dummy:
root.dump()
for child in root.children:
self.dumpTree(child, dummy=False)
#@+node:ekr.20111003220434.15488: *3* oc.parse_opml_file & helper
def parse_opml_file(self, fn):
c = self.c
if not fn or not fn.endswith('.opml'):
return g.trace('bad file name: %s' % repr(fn))
c = self.c
path = g.os_path_normpath(g.os_path_join(g.app.loadDir, fn))
try:
f = open(path, 'rb')
s = f.read() # type(s) is bytes for Python 3.x.
s = self.cleanSaxInputString(s)
except IOError:
return g.trace('can not open %s' % path)
# pylint:disable=catching-non-exception
try:
theFile = BytesIO(s)
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_external_ges, 1)
# Do not include external general entities.
# The actual feature name is "http://xml.org/sax/features/external-general-entities"
parser.setFeature(xml.sax.handler.feature_external_pes, 0)
handler = SaxContentHandler(c, fn)
parser.setContentHandler(handler)
parser.parse(theFile) # expat does not support parseString
sax_node = handler.getNode()
except xml.sax.SAXParseException:
g.error('error parsing', fn)
g.es_exception()
sax_node = None
except Exception:
g.error('unexpected exception parsing', fn)
g.es_exception()
sax_node = None
return sax_node
#@+node:ekr.20111003220434.15490: *4* oc.cleanSaxInputString
def cleanSaxInputString(self, s):
'''Clean control characters from s.
s may be a bytes or a (unicode) string.'''
# Note: form-feed ('\f') is 12 decimal.
badchars = [chr(ch) for ch in range(32)]
badchars.remove('\t')
badchars.remove('\r')
badchars.remove('\n')
flatten = ''.join(badchars)
pad = ' ' * len(flatten)
flatten = bytes(flatten, 'utf-8')
pad = bytes(pad, 'utf-8')
transtable = bytes.maketrans(flatten, pad)
return s.translate(transtable)
#@+node:ekr.20141020112451.18342: *3* oc.putToOPML
def putToOPML(self, owner):
'''
Write the c.p as OPML, using the owner's put method.'''
PutToOPML(owner)
#@+node:ekr.20060904103721: *3* oc.readFile & helpers
def readFile(self, fileName):
'''Read the opml file.'''
dumpTree = False
if not fileName:
g.trace('no fileName')
return None
c = self.c.new()
# Create the new commander *now*
# so that created vnodes will have the proper context.
# Pass one: create the intermediate nodes.
dummyRoot = self.parse_opml_file(fileName)
if not dummyRoot:
return None
if dumpTree:
self.dumpTree(dummyRoot)
# Pass two: create the outline from the sax nodes.
children = self.createVnodes(c, dummyRoot)
p = leoNodes.Position(v=children[0], childIndex=0, stack=None)
# Check the outline.
errors = c.checkOutline()
if errors:
c.dumpOutline()
g.trace('%s errors!' % errors)
return None
# if self.opml_read_derived_files:
# at = c.atFileCommands
# c.fileCommands.tnodesDict = self.createTnodesDict()
# self.resolveTnodeLists(c)
# if self.opml_read_derived_files:
# c.atFileCommands.readAll(c.rootPosition())
c.selectPosition(p)
c.redraw()
return c # for testing.
#@+node:ekr.20060921153603: *4* oc.createTnodesDict
def createTnodesDict(self):
'''
Create c.tnodesDict by from self.generated_gnxs
by converting VNode entries to tnodes.
'''
d = {}
for key in list(self.generated_gnxs.keys()):
v = self.generated_gnxs.get(key)
d[key] = v
return d
#@+node:ekr.20060917214140: *4* oc.setCurrentPosition
def setCurrentPosition(self, c):
v = self.currentVnode
if not v:
return
for p in c.allNodes_iter():
if p.v == v:
c.selectPosition(p)
break
#@+node:ekr.20060918132045: *4* oc.resolveTnodeLists
def resolveTnodeLists(self, c):
for p in c.allNodes_iter():
if hasattr(p.v, 'tempTnodeList'):
result = []
for gnx in p.v.tempTnodeList:
v = self.generated_gnxs.get(gnx)
if v:
result.append(v)
else:
g.trace('No tnode for %s' % gnx)
p.v.tnodeList = result
delattr(p.v, 'tempTnodeList')
#@+node:ekr.20060919201810: *3* oc.readOpmlCommand
def readOpmlCommand(self, event=None):
'''Open a Leo window containing the contents of an .opml file.'''
c = self.c
fileName = g.app.gui.runOpenFileDialog(c,
title="Read OPML",
filetypes=[("OPML files", "*.opml"), ("All files", "*")],
defaultextension=".opml")
c.bringToFront()
if fileName:
self.readFile(fileName)
else:
c.bodyWantsFocus()
#@+node:ekr.20060904103721.1: *3* oc.writeFile
def writeFile(self, fileName):
'''Write fileName as an OPML file.'''
if not fileName:
return
ok = self.c.fileCommands.write_Leo_file(
fileName,
outlineOnlyFlag=not self.opml_write_derived_files,
toString=False, toOPML=True)
if ok:
g.es_print('wrote %s' % fileName)
else:
g.es_print('did not write %s' % fileName)
#@+node:ekr.20060919201330: *3* oc.writeOpmlCommand
def writeOpmlCommand(self, event=None):
'''Save a Leo outline to an OPMLfile.'''
c = self.c
if g.app.disableSave:
g.es("Save commands disabled", color="purple")
return
# Make sure we never pass None to the ctor.
if not c.mFileName:
c.frame.title = ""
initialfile = g.ensure_extension(c.mFileName, ".opml")
# set local fileName, _not_ c.mFileName
fileName = g.app.gui.runSaveFileDialog(c,
initialfile=initialfile,
title="Write OPML",
filetypes=[("OPML files", "*.opml")],
defaultextension=".opml")
c.bringToFront()
if fileName:
fileName = g.ensure_extension(fileName, ".opml")
c.opmlCommands.writeFile(fileName)
#@-others
#@+node:ekr.20060919172012.2: ** class PutToOPML
class PutToOPML:
'''Write c.p's tree as OPML, using the owner's put method.'''
def __init__(self, owner):
self.c = owner.c
self.leo_file_encoding = owner.leo_file_encoding
self.owner = owner # a leoFileCommands.FileCommand instance.
self.initConfig()
self.putAll()
def put(self, s):
return self.owner.put(s)
#@+others
#@+node:ekr.20141020112451.18340: *3* initConfig
def initConfig(self):
'''Init all configuration settings.'''
c = self.c
# These prevent pylint warnings
self.opml_use_outline_elements = True
self.opml_write_derived_files = True
self.opml_write_leo_details = True
self.opml_write_leo_globals_attributes = True
self.opml_write_body_text = True
self.opml_write_ua_attributes = True
self.opml_expand_ua_dictionary = True
self.opml_skip_ua_dictionary_blanks = True
for ivar in (
'opml_use_outline_elements',
'opml_write_derived_files',
'opml_write_leo_details',
'opml_write_leo_globals_attributes',
'opml_write_body_text',
'opml_write_ua_attributes',
'opml_expand_ua_dictionary',
'opml_skip_ua_dictionary_blanks',
):
setattr(self, ivar, c.config.getBool(ivar))
#@+node:ekr.20141020112451.18337: *3* putAll
def putAll(self):
'''
Put the selected outline as OPML.
All elements and attributes prefixed by 'leo:' are leo-specific.
All other elements and attributes are specified by the OPML 1 spec.
'''
self.putXMLLine()
self.putOPMLProlog()
self.putOPMLHeader()
self.putOPMLNodes()
self.putOPMLPostlog()
#@+node:ekr.20060919172012.3: *3* putOPMLProlog
def putOPMLProlog(self):
s = self.c.config.getString('opml-namespace') or 'leo:com:leo-opml'
ver = self.c.config.getString('opml-version') or '2.0'
self.put('<opml version="%s" xmlns:leo="%s">' % (ver, s))
#@+node:ekr.20060919172012.4: *3* putOPMLHeader
def putOPMLHeader(self):
'''Put the OPML header, including attributes for globals, prefs and find settings.'''
c = self.c; indent = ' ' * 4
if self.opml_write_leo_globals_attributes:
self.put('\n<head leo:body_outline_ratio="%s">' % str(c.frame.ratio))
width, height, left, top = c.frame.get_window_info()
self.put('\n%s<leo:global_window_position' % indent)
self.put(' top="%s" left="%s" height="%s" width="%s"/>' % (
str(top), str(left), str(height), str(width)))
self.put('\n</head>')
else:
self.put('\n<head/>')
#@+node:ekr.20060919172012.5: *3* putOPMLNodes
def putOPMLNodes(self):
c = self.c; root = c.rootPosition()
self.put('\n<body>')
for p in root.self_and_siblings_iter():
self.putOPMLNode(p)
self.put('\n</body>')
#@+node:ekr.20060919172012.6: *3* putOPMLNode
def putOPMLNode(self, p):
indent = ' ' * (4 * p.level()) # Always use 4-space indents.
body = p.bodyString() or ''; head = p.headString() or ''
attrFormat = ' %s="%s"'
self.put('\n%s<outline' % indent)
if self.opml_write_leo_details: # Put leo-specific attributes.
for name, val in (
('leo:v', p.v.fileIndex),
('leo:a', self.aAttributes(p)),
# ('leo:tnodeList',self.tnodeListAttributes(p)),
):
if val: self.put(attrFormat % (name, val))
data = self.uAAttributes(p)
if data:
# for name,val in data.iteritems():
for name in list(data.keys()):
val = data.get(name)
self.put(attrFormat % (name, val))
self.put(attrFormat % ('text', self.attributeEscape(head)))
closed = False
if body and self.opml_write_body_text:
if self.opml_use_outline_elements:
self.put('>'); closed = True
self.put('<leo:body>%s</leo:body>' % xml.sax.saxutils.escape(body))
else:
self.put(attrFormat % ('leo:body', self.attributeEscape(body)))
if p.hasChildren():
if not closed:
self.put('>'); closed = True
for p2 in p.children_iter():
self.putOPMLNode(p2)
if closed:
self.put('\n%s</outline>' % indent)
# self.put('</outline>\n')
else:
self.put('/>')
#@+node:ekr.20060919172012.7: *4* attributeEscape
def attributeEscape(self, s):
# Unlike xml.sax.saxutils.escape, replace " by " and replace newlines by character reference.
s = s or ''
return (
s.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace('\n', ' \n')
)
#@+node:ekr.20060919172012.8: *4* aAttributes
def aAttributes(self, p):
c = self.c
attr = []
if p.isExpanded(): attr.append('E')
if p.isMarked(): attr.append('M')
if c.isCurrentPosition(p): attr.append('V')
return ''.join(attr)
#@+node:ekr.20060919172012.9: *4* tnodeListAttributes (Not used)
# Based on fileCommands.putTnodeList.
def tnodeListAttributes(self, p):
'''Put the tnodeList attribute of p.v'''
# Remember: entries in the tnodeList correspond to @+node sentinels, _not_ to tnodes!
if not hasattr(p.v, 'tnodeList') or not p.v.tnodeList:
return None
# Assign fileIndices.
for v in p.v.tnodeList:
try: # Will fail for None or any pre 4.1 file index.
theId, time, n = p.v.fileIndex
except Exception:
g.trace("assigning gnx for ", p.v)
gnx = g.app.nodeIndices.getNewIndex()
p.v.setFileIndex(gnx) # Don't convert to string until the actual write.
s = ','.join([g.app.nodeIndices.toString(v.fileIndex) for v in p.v.tnodeList])
return s
#@+node:tbrown.20061004094757: *4* uAAttributes
def uAAttributes(self, p):
"""write unknownAttributes with various levels of expansion"""
data = {}
if self.opml_write_ua_attributes and hasattr(p.v, 'unknownAttributes'):
# for uak, uav in p.v.unknownAttributes.iteritems():
d = p.u
for uak in list(d.keys()):
uav = d.get(uak)
if self.opml_expand_ua_dictionary and isinstance(uav, dict):
# for uakc, uavc in uav.iteritems():
for uakc in list(uav.keys()):
uavc = uav.get(uakc)
if str(uavc) != '' or not self.opml_skip_ua_dictionary_blanks:
data['leo:ua_' + uak + '_' + uakc] = self.attributeEscape(str(uavc))
else:
data['leo:ua_' + uak] = self.attributeEscape(str(uav))
return data
#@+node:ekr.20060919172012.11: *3* putOPMLPostlog
def putOPMLPostlog(self):
self.put('\n</opml>\n')
#@+node:ekr.20141020112451.18339: *3* putXMLLine
def putXMLLine(self):
'''Put the **properly encoded** <?xml> element.'''
self.put('%s"%s"%s\n' % (
g.app.prolog_prefix_string,
self.leo_file_encoding,
g.app.prolog_postfix_string))
#@-others
#@+node:ekr.20060904134958.164: ** class SaxContentHandler (XMLGenerator)
class SaxContentHandler(xml.sax.saxutils.XMLGenerator):
'''A sax content handler class that reads OPML files.'''
#@+others
#@+node:ekr.20060904134958.165: *3* __init__ & helper
def __init__(self, c, inputFileName):
'''Ctor for SaxContentHandler class (OMPL plugin).'''
self.c = c
self.inputFileName = inputFileName
super().__init__()
self.dispatchDict = self.define_dispatch_dict()
# Semantics.
self.content = []
self.elementStack = []
self.errors = 0
self.level = 0
self.node = None
self.nodeStack = []
self.ratio = 0.5 # body-outline ratio.
self.rootNode = None
#@+node:ekr.20060917185525: *4* define_disptatch_dict
def define_dispatch_dict(self):
# There is no need for an 'end' method if all info is carried in attributes.
# Keys are **elements**.
d = {
'body': (None, None),
'head': (self.startHead, None),
'opml': (None, None),
'outline': (self.startOutline, self.endOutline),
'leo:body': (self.startBodyText, self.endBodyText),
'leo:global_window_position': (self.startWinPos, None),
}
return d
#@+node:ekr.20060904134958.166: *3* helpers
#@+node:ekr.20060904134958.167: *4* attrsToList
def attrsToList(self, attrs):
'''
Convert the attributes to a list of g.Bunches.
attrs: an Attributes item passed to startElement.
'''
return [g.Bunch(name=name, val=attrs.getValue(name))
for name in attrs.getNames()]
#@+node:ekr.20060904134958.170: *4* error
def error(self, message):
print('\n\nXML error: %s\n' % (message))
self.errors += 1
#@+node:ekr.20060917185525.1: *4* inElement
def inElement(self, name):
return self.elementStack and name in self.elementStack
#@+node:ekr.20060904134958.171: *4* printStartElement & helpers
def printStartElement(self, name, attrs):
indent = '\t' * self.level or ''
if attrs.getLength() > 0:
print('%s<%s %s>' % (
indent,
self.clean(name).strip(),
self.attrsToString(attrs, sep=' ')))
else:
print('%s<%s>' % (
indent,
self.clean(name).strip()))
if name.lower() in ['outline', 'head', 'body',]:
print('')
#@+node:ekr.20060904134958.168: *5* attrsToString
def attrsToString(self, attrs, sep='\n'):
'''Convert the attributes to a string.
attrs: an Attributes item passed to startElement.
sep: the separator charater between attributes.'''
result = [
'%s="%s"' % (bunch.name, bunch.val)
for bunch in self.attrsToList(attrs)
]
return sep.join(result)
#@+node:ekr.20060904134958.169: *5* clean
def clean(self, s):
return g.toEncodedString(s, "ascii")
#@+node:ekr.20060904134958.174: *3* Do nothing...
#@+node:ekr.20060904134958.175: *4* other methods
def ignorableWhitespace(self, content):
g.trace()
def processingInstruction(self, target, data):
g.trace()
def skippedEntity(self, name):
g.trace(name)
def startElementNS(self, name, qname, attrs):
g.trace(name)
def endElementNS(self, name, qname):
g.trace(name)
#@+node:ekr.20060904134958.176: *4* endDocument
def endDocument(self):
pass
#@+node:ekr.20060904134958.177: *4* startDocument
def startDocument(self):
pass
#@+node:ekr.20060904134958.178: *3* characters
def characters(self, content):
name = self.elementStack[-1].lower() if self.elementStack else '<no element name>'
# Opml elements should not have content: everything is carried in attributes.
if name == 'leo:body':
if self.node:
self.content.append(content)
else:
self.error('No node for %s content' % (name))
else:
if content.strip():
print('content:', name, repr(content))
#@+node:ekr.20060904134958.179: *3* endElement & helpers
def endElement(self, name):
name = name.lower()
if name in printElements or 'all' in printElements:
indent = '\t' * (self.level - 1) or ''
print('%s</%s>' % (indent, self.clean(name).strip()))
data = self.dispatchDict.get(name)
if data is None:
g.trace('unknown element', name)
else:
junk, func = data
if func:
func()
name2 = self.elementStack.pop()
assert name == name2
#@+node:ekr.20060919193501: *4* endBodyText
def endBodyText(self):
'''End a <leo:body> element.'''
if self.content:
self.node.bodyString = ''.join(self.content)
self.content = []
#@+node:ekr.20060917185948: *4* endOutline
def endOutline(self):
self.level -= 1
self.node = self.nodeStack.pop()
#@+node:ekr.20060904134958.180: *3* startElement & helpers
def startElement(self, name, attrs):
name = name.lower()
if name in printElements or 'all' in printElements:
self.printStartElement(name, attrs)
self.elementStack.append(name)
data = self.dispatchDict.get(name)
if data is None:
g.trace('unknown element', name)
else:
func, junk = data
if func:
func(attrs)
#@+node:ekr.20060919193501.1: *4* startBodyText
def startBodyText(self, attrs):
'''Start a <leo:body> element.'''
self.content = []
#@+node:ekr.20060922072852: *4* startHead
def startHead(self, attrs):
if not self.inElement('opml'):
self.error('<head> outside <opml>')
self.doHeadAttributes(attrs)
#@+node:ekr.20060922072852.1: *5* doHeadAttributes
def doHeadAttributes(self, attrs):
ratio = 0.5
for bunch in self.attrsToList(attrs):
name = bunch.name; val = bunch.val
if name == 'leo:body_outline_ratio':
try:
ratio = float(val)
except ValueError:
pass
self.ratio = ratio
#@+node:ekr.20060917190349: *4* startOutline
def startOutline(self, attrs):
if self.inElement('head'):
self.error('<outline> inside <head>')
if not self.inElement('body'):
self.error('<outline> outside <body>')
self.level += 1
if self.rootNode:
parent = self.node
else:
self.rootNode = parent = NodeClass() # The dummy parent node.
parent.headString = 'dummyNode'
self.node = NodeClass()
parent.children.append(self.node)
self.doOutlineAttributes(attrs)
self.nodeStack.append(parent)
#@+node:ekr.20060904141220.34: *5* doOutlineAttributes
def doOutlineAttributes(self, attrs):
node = self.node
for bunch in self.attrsToList(attrs):
name, val = bunch.name, bunch.val
if name == 'text': # Text is the 'official' opml attribute for headlines.
node.headString = val
elif name in ('_note', 'leo:body'):
# Android outliner uses _note.
node.bodyString = val
elif name == 'leo:v':
node.gnx = val
else:
node.attributes[name] = val
#@+node:ekr.20060922071010: *4* startWinPos
def startWinPos(self, attrs):
if not self.inElement('head'):
self.error('<leo:global_window_position> outside <body>')
self.doGlobalWindowAttributes(attrs)
#@+node:ekr.20060922071010.1: *5* doGlobalWindowAttributes
def doGlobalWindowAttributes(self, attrs):
c = self.c
top = 50; left = 50; height = 500; width = 700 # Reasonable defaults.
try:
for bunch in self.attrsToList(attrs):
name = bunch.name; val = bunch.val
if name == 'top': top = int(val)
elif name == 'left': left = int(val)
elif name == 'height': height = int(val)
elif name == 'width': width = int(val)
except ValueError:
pass
c.frame.setTopGeometry(width, height, left, top)
c.frame.deiconify()
c.frame.lift()
c.frame.update()
#@+node:ekr.20060904134958.183: *3* getNode
def getNode(self):
return self.rootNode
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 80
#@-leo
|
the-stack_0_6428 | """Class implementation for the scale_y_from_point interfaces.
"""
from typing import Any
from typing import Dict
from apysc._animation.animation_scale_y_from_point_interface import \
AnimationScaleYFromPointInterface
from apysc._type.dictionary import Dictionary
from apysc._type.expression_string import ExpressionString
from apysc._type.int import Int
from apysc._type.number import Number
from apysc._type.revert_interface import RevertInterface
class ScaleYFromPointInterface(
AnimationScaleYFromPointInterface, RevertInterface):
_scale_y_from_point: Dictionary[str, Number]
def _initialize_scale_y_from_point_if_not_initialized(self) -> None:
"""
Initialize the `_scale_y_from_point` attribute if it hasn't been
initialized yet.
"""
if hasattr(self, '_scale_y_from_point'):
return
self._scale_y_from_point = Dictionary({})
def get_scale_y_from_point(self, y: Int) -> Number:
"""
Get a scale-y value from the given y-coordinate.
Parameters
----------
y : Int
Y-coordinate.
Returns
-------
scale_y : ap.Number
Scale-y value from the given y-coordinate.
References
----------
- GraphicsBase scale_from_point interfaces document
- https://bit.ly/3xRBhlw
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.get_scale_y_from_point, locals_=locals(),
module_name=__name__, class_=ScaleYFromPointInterface):
from apysc._display import scale_interface_helper
from apysc._validation import number_validation
number_validation.validate_integer(integer=y)
self._initialize_scale_y_from_point_if_not_initialized()
default_val: ap.Number = ap.Number(1.0)
key_exp_str: ExpressionString = scale_interface_helper.\
get_coordinate_key_for_expression(coordinate=int(y._value))
scale_y: ap.Number = self._scale_y_from_point.get(
key=key_exp_str, default=default_val)
return scale_y
def set_scale_y_from_point(self, scale_y: Number, y: Int) -> None:
"""
Update a scale-y value from the given y-coordinate.
Parameters
----------
scale_y : Number
Scale-y value to set.
y : Int
Y-coordinate.
References
----------
- GraphicsBase scale_from_point interfaces document
- https://bit.ly/3xRBhlw
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.set_scale_y_from_point, locals_=locals(),
module_name=__name__, class_=ScaleYFromPointInterface):
from apysc._display import scale_interface_helper
from apysc._validation import number_validation
number_validation.validate_num(num=scale_y)
number_validation.validate_integer(integer=y)
self._initialize_scale_y_from_point_if_not_initialized()
key_exp_str: ExpressionString = scale_interface_helper.\
get_coordinate_key_for_expression(coordinate=int(y._value))
self._scale_y_from_point._value[key_exp_str.value] = scale_y
self._append_scale_y_from_point_update_expression(y=y)
def _append_scale_y_from_point_update_expression(
self, *, y: Int) -> None:
"""
Append the scale-y from the specified y-coordinate updating
expression.
Parameters
----------
y : Int
Y-coordinate.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.set_scale_y_from_point, locals_=locals(),
module_name=__name__, class_=ScaleYFromPointInterface):
from apysc._display import scale_interface_helper
expression: str
expression = scale_interface_helper.get_scale_updating_expression(
coordinate=y,
scale_dict=self._scale_y_from_point,
interface_variable_name=self.variable_name,
coordinate_type=scale_interface_helper.CoordinateType.Y)
ap.append_js_expression(expression=expression)
_scale_y_from_point_snapshots: Dict[str, Dict[str, Any]]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_scale_y_from_point_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_scale_y_from_point_snapshots',
value={**self._scale_y_from_point._value},
snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._scale_y_from_point._value = self._scale_y_from_point_snapshots[
snapshot_name]
|
the-stack_0_6430 | import datetime
def get_pages(posts):
""" Groups blog posts into 'pages' of five posts """
pages = []
for i in range(4, len(posts), 5):
pages.append(posts[i-4: i+1])
r = len(posts) % 5
if r > 0:
pages.append(posts[len(posts) - r:])
return pages
def gen_tags(posts):
""" Returns a list of dictionaries indicating tag name and tag count
sorted by tag count. """
tag_list = {}
for post in posts:
for tag in post['tags']:
if tag in tag_list:
tag_list[tag] += 1
else:
tag_list[tag] = 1
tags = [{'tag': x, 'count': tag_list[x]} for x in tag_list]
tags.sort(key = lambda x: x['count'], reverse = True)
return tags
class Blog():
def __init__(self, flatpages, post_dir, draft_dir):
self.flatpages = flatpages
self.posts = [page for page in self.flatpages
if page.path.startswith(post_dir)]
self.posts.sort(key = lambda i:
datetime.datetime.strptime(i['date'], '%d %B %Y'),
reverse = True)
self.drafts = [page for page in self.flatpages
if page.path.startswith(draft_dir)]
self.pages = get_pages(self.posts)
self.tags = gen_tags(self.posts)
for post in self.posts:
post.slug = post.path.split('/')[1]
|
the-stack_0_6433 | import pytest
from seedwork.domain.exceptions import BusinessRuleValidationException
from seedwork.domain.value_objects import Money
from modules.catalog.domain.entities import Seller, Listing
from modules.catalog.domain.value_objects import ListingStatus
def test_seller_publishes_listing_happy_path():
seller = Seller(id=Seller.next_id())
listing = Listing(
id=Listing.next_id(),
title="Tiny dragon",
description="Tiny dragon for sale",
price=Money(1),
seller_id=seller.id,
)
seller.publish_listing(listing)
assert listing.status == ListingStatus.PUBLISHED
def test_seller_fails_to_publish_listing_with_zero_price():
seller = Seller(id=Seller.next_id())
listing = Listing(
id=Listing.next_id(),
title="Tiny dragon",
description="Tiny dragon for sale",
price=Money(0),
seller_id=seller.id,
)
with pytest.raises(BusinessRuleValidationException):
seller.publish_listing(listing)
|
the-stack_0_6434 | # Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for classifier models"""
import copy
from core.domain import classifier_registry
from core.platform import models
import feconf
import utils
(classifier_models,) = models.Registry.import_models([models.NAMES.classifier])
class Classifier(object):
"""Domain object for a classifier.
A classifier is a machine learning model created using a particular
classification algorithm which is used for answer classification
task.
Attributes:
id: str. The unique id of the classifier.
exp_id: str. The exploration id to which this classifier belongs.
exp_version_when_created: str. The version of the exploration when
this classification model was created.
state_name: str. The name of the state to which the classifier belongs.
algorithm_id: str. The id of the algorithm used for generating
classifier.
cached_classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the data used by the
classifier. This depends on the algorithm ID.
"""
def __init__(self, classifier_id, exp_id, exp_version_when_created,
state_name, algorithm_id, cached_classifier_data,
data_schema_version):
"""Constructs an Classifier domain object.
Args:
classifier_id: str. The unique id of the classifier.
exp_id: str. The exploration id to which the classifier belongs.
exp_version_when_created: int. The version of the exploration when
this classification model was created.
state_name: str. The name of the state to which the classifier
belongs.
algorithm_id: str. The id of the algorithm used for generating
classifier.
cached_classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the
data used by the classifier.
"""
self._id = classifier_id
self._exp_id = exp_id
self._exp_version_when_created = exp_version_when_created
self._state_name = state_name
self._algorithm_id = algorithm_id
self._cached_classifier_data = copy.deepcopy(cached_classifier_data)
self._data_schema_version = data_schema_version
@property
def id(self):
return self._id
@property
def exp_id(self):
return self._exp_id
@property
def exp_version_when_created(self):
return self._exp_version_when_created
@property
def state_name(self):
return self._state_name
@property
def algorithm_id(self):
return self._algorithm_id
@property
def cached_classifier_data(self):
return self._cached_classifier_data
@property
def data_schema_version(self):
return self._data_schema_version
def update_state_name(self, state_name):
"""Updates the state_name attribute of the Classifier domain object.
Args:
state_name: str. The name of the updated state to which the
classifier belongs.
"""
self._state_name = state_name
def to_dict(self):
"""Constructs a dict representation of Classifier domain object.
Returns:
A dict representation of Classifier domain object.
"""
return {
'classifier_id': self._id,
'exp_id': self._exp_id,
'exp_version_when_created': self._exp_version_when_created,
'state_name': self._state_name,
'algorithm_id': self._algorithm_id,
'cached_classifier_data': self._cached_classifier_data,
'data_schema_version': self._data_schema_version
}
def validate(self):
"""Validates the classifier before it is saved to storage."""
if not isinstance(self.id, basestring):
raise utils.ValidationError(
'Expected id to be a string, received %s' % self.id)
if not isinstance(self.exp_id, basestring):
raise utils.ValidationError(
'Expected exp_id to be a string, received %s' % self.exp_id)
if not isinstance(self.exp_version_when_created, int):
raise utils.ValidationError(
'Expected exp_version_when_created to be a int, received %s' %
self.exp_version_when_created)
if not isinstance(self.state_name, basestring):
raise utils.ValidationError(
'Expected id to be a string, received %s' % self.state_name)
utils.require_valid_name(self.state_name, 'the state name')
if not isinstance(self.algorithm_id, basestring):
raise utils.ValidationError(
'Expected algorithm_id to be a string, received %s' %
self.algorithm_id)
utils.require_valid_name(
self.algorithm_id, 'the algorithm id')
if self.algorithm_id not in (
feconf.INTERACTION_CLASSIFIER_MAPPING.values()):
raise utils.ValidationError(
'Invalid algorithm id: %s' % self.algorithm_id)
if not isinstance(self.cached_classifier_data, dict):
raise utils.ValidationError(
'Expected cached_classifier_data to be a dict, received %s' %(
self.cached_classifier_data))
classifier_class = (
classifier_registry.Registry.get_classifier_by_algorithm_id(
self.algorithm_id))
classifier_class.validate(self.cached_classifier_data)
|
the-stack_0_6435 | from mock import Mock, call, patch
from pip._internal.commands.install import build_wheels
class TestWheelCache:
def check_build_wheels(
self,
pep517_requirements,
legacy_requirements,
):
"""
Return: (mock_calls, return_value).
"""
def build(reqs, **kwargs):
# Fail the first requirement.
return [reqs[0]]
builder = Mock()
builder.build.side_effect = build
build_failures = build_wheels(
builder=builder,
pep517_requirements=pep517_requirements,
legacy_requirements=legacy_requirements,
)
return (builder.build.mock_calls, build_failures)
@patch('pip._internal.commands.install.is_wheel_installed')
def test_build_wheels__wheel_installed(self, is_wheel_installed):
is_wheel_installed.return_value = True
mock_calls, build_failures = self.check_build_wheels(
pep517_requirements=['a', 'b'],
legacy_requirements=['c', 'd'],
)
# Legacy requirements were built.
assert mock_calls == [
call(['a', 'b'], should_unpack=True),
call(['c', 'd'], should_unpack=True),
]
# Legacy build failures are not included in the return value.
assert build_failures == ['a']
@patch('pip._internal.commands.install.is_wheel_installed')
def test_build_wheels__wheel_not_installed(self, is_wheel_installed):
is_wheel_installed.return_value = False
mock_calls, build_failures = self.check_build_wheels(
pep517_requirements=['a', 'b'],
legacy_requirements=['c', 'd'],
)
# Legacy requirements were not built.
assert mock_calls == [
call(['a', 'b'], should_unpack=True),
]
assert build_failures == ['a']
|
the-stack_0_6438 | from services.module.moduleService import ModuleService
from repositories.demoddata.demoddataRepo import DemoddataRepo
from repositories.payload.payloadRepo import PayloadRepo
from repositories.waterfall.waterfallRepo import WaterfallRepo
from repositories.observation.observationsRepo import ObservationRepo
class ObservationsService:
def __init__(self, cmd):
self.__cmd = cmd
self.__module_service = ModuleService(self.__cmd)
repos = self.filter_repositories()
self.__observations_repo = ObservationRepo(self.__cmd, repos)
def filter_repositories(self):
downloadable_data_repos = []
all = not self.__cmd.payloads and not self.__cmd.waterfalls and not self.__cmd.demoddata
if all or self.__cmd.payloads:
downloadable_data_repos.append(PayloadRepo(self.__cmd.working_dir, self.__module_service.loadPayloadModules()))
if all == True or self.__cmd.waterfalls:
downloadable_data_repos.append(WaterfallRepo(self.__cmd.working_dir, self.__module_service.loadWaterfallModules()))
if all == True or self.__cmd.demoddata:
downloadable_data_repos.append(DemoddataRepo(self.__cmd.working_dir, self.__module_service.loadDemoddataModules()))
return downloadable_data_repos
def extract(self):
self.__observations_repo.extract()
|
the-stack_0_6439 | """evaluate.py
This script is used to evalute trained ImageNet models.
"""
import sys
import argparse
import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
from config import config
from utils.utils import config_keras_backend, clear_keras_session
from utils.dataset import get_dataset
from models.adamw import AdamW
from keras.utils import to_categorical
from methods import run_attack
#from tensorflow.keras.applications import InceptionV3
#from tensorflow.keras.applications import VGG19
#from tensorflow.keras.applications import ResNet152V2
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input as resnet_preprocess_input
#from keras.applications.resnet101 import ResNet101
from keras.applications.vgg19 import VGG19, decode_predictions
from keras.applications.vgg19 import preprocess_input as vgg_preprocess_input
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input as inception_preprocess_input
from methods import get_accuracy, run_attack
#from tf.keras.preprocessing.image import ImageDataGenerator
import cv2
import copy
DESCRIPTION = """For example:
$ python3 evaluate.py --dataset_dir ${HOME}/data/ILSVRC2012/tfrecords \
--batch_size 64 \
saves/mobilenet_v2-model-final.h5
python3 evaluate_resnet_all.py --dataset_dir /l/IMAGENET_ORIGINAL/train/imagenet_tfrecord --inv_model_file /l/keras_imagenet-master/saves/inception_v3-ckpt-030_orig.h5
"""
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--dataset_dir', type=str,
default=config.DEFAULT_DATASET_DIR)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--inv_model_file', type=str,
help='a saved model (.h5) file')
args = parser.parse_args()
config_keras_backend()
if not args.inv_model_file.endswith('.h5'):
sys.exit('model_file is not a .h5')
inv_model = tf.keras.models.load_model(
args.inv_model_file,
compile=False,
custom_objects={'AdamW': AdamW})
inv_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
ds_validation = get_dataset(
args.dataset_dir, 'validation', args.batch_size)
## VGG
vgg_model = VGG19(include_top=True, weights='imagenet', classes=1000)
vgg_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# InceptionV3
inception_model = InceptionV3(include_top=True, weights='imagenet', classes=1000)
inception_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
## ResNet
resnet_model = ResNet50(include_top=True, weights='imagenet', classes=1000)
resnet_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Process batches
iteration = 0
sum1 = 0
sum2 = 0
for images, labels in tfds.as_numpy(ds_validation):
if iteration < 199:
print('continuing')
iteration += 1
continue
if iteration == 500:
exit()
labels = np.argmax(labels, axis=1)
#adv_imgs = run_attack(True, 'CarliniL2Method', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
#adv_imgs = run_attack(False, 'DeepFool', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
adv_imgs = run_attack(False, 'FastGradientMethod', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
#adv_imgs = run_attack(False, 'ProjectedGradientDescent', inception_model, images, labels, batch_size=10, dataset='cifar', fgsm_epsilon=0.1, cwl2_confidence=0)
## VGG ################################################
#img *= (2.0/255) # normalize to: 0.0~2.0
#img -= 1.0 # subtract mean to make it: -1.0~1.0
#img = np.expand_dims(img, axis=0)
vgg_imgs = []
resnet_imgs = []
inc_imgs = []
flip_imgs = []
inv_imgs = []
adv_vgg_imgs = []
adv_resnet_imgs = []
adv_inc_imgs = []
adv_flip_imgs = []
adv_inv_imgs = []
for ii in range(images.shape[0]):
img = copy.deepcopy(images[ii,:,:,:])
img += 1.0
#img /= (2.0/255)
img *= (255.0/2.0)
## VGG
vgg_img = copy.deepcopy(img)
vgg_img = cv2.resize(vgg_img, (224, 224))
vgg_img = vgg_preprocess_input(vgg_img)
vgg_imgs.append(vgg_img)
## Resnet
resnet_img = copy.deepcopy(img)
resnet_img = cv2.resize(resnet_img, (224, 224))
resnet_img = resnet_preprocess_input(resnet_img)
resnet_imgs.append(resnet_img)
## InceptionV3
inc_img = copy.deepcopy(img)
inc_img = cv2.resize(inc_img, (299, 299))
inc_img = inception_preprocess_input(inc_img)
inc_imgs.append(inc_img)
## Flipped
#flip_img = copy.deepcopy(img)
#flip_img = cv2.resize(flip_img, (299, 299))
#flip_img = cv2.flip(flip_img, 1)
#flip_img = inception_preprocess_input(flip_img)
#flip_imgs.append(flip_img)
flip_img = copy.deepcopy(images[ii,:,:,:])
flip_img = cv2.flip(flip_img, 1)
flip_imgs.append(flip_img)
## Inverse
inv_img = copy.deepcopy(images[ii,:,:,:])#########
inv_img += 1.0
inv_img /= 2.0
inv_img = 1 - inv_img
inv_img *= 255.0
inv_img = cv2.resize(inv_img, (299, 299))
inv_img = inception_preprocess_input(inv_img)
inv_imgs.append(inv_img)
#==========================================
# ADVERSARIAL ---------------
adv_img = copy.deepcopy(adv_imgs[ii,:,:,:])
adv_img += 1.0
#adv_img /= (2.0/255)
adv_img *= (255.0/2.0)
# VGG
adv_vgg_img = copy.deepcopy(adv_img)
adv_vgg_img = cv2.resize(adv_vgg_img, (224, 224))
adv_vgg_img = vgg_preprocess_input(adv_vgg_img)
adv_vgg_imgs.append(adv_vgg_img)
# Resnet
adv_resnet_img = copy.deepcopy(adv_img)
adv_resnet_img = cv2.resize(adv_resnet_img, (224, 224))
adv_resnet_img = resnet_preprocess_input(adv_resnet_img)
adv_resnet_imgs.append(adv_resnet_img)
# InceptionV3
adv_inc_img = copy.deepcopy(adv_img)
adv_inc_img = cv2.resize(adv_inc_img, (299, 299))
adv_inc_img = inception_preprocess_input(adv_inc_img)
adv_inc_imgs.append(adv_inc_img)
## Flipped
#adv_flip_img = copy.deepcopy(img)
#adv_flip_img = cv2.resize(adv_flip_img, (299, 299))
#adv_flip_img = cv2.flip(adv_flip_img, 1)
#adv_flip_img = inception_preprocess_input(adv_flip_img)
#adv_flip_imgs.append(adv_flip_img)
adv_flip_img = copy.deepcopy(adv_imgs[ii,:,:,:])
adv_flip_img = cv2.flip(adv_flip_img, 1)
adv_flip_imgs.append(adv_flip_img)
## Inverse
##test on inverse Inceptionv3
adv_inv_img = copy.deepcopy(adv_imgs[ii,:,:,:])#########
adv_inv_img += 1.0
adv_inv_img /= 2.0
adv_inv_img = 1 - adv_inv_img
adv_inv_img *= 255.0
adv_inv_img = cv2.resize(adv_inv_img, (299, 299))
adv_inv_img = inception_preprocess_input(adv_inv_img)
adv_inv_imgs.append(adv_inv_img)
# Horizontal Flipping
# test on Resnet
vgg_imgs = np.asarray(vgg_imgs)
resnet_imgs = np.asarray(resnet_imgs)
inc_imgs = np.asarray(inc_imgs)
flip_imgs = np.asarray(flip_imgs)
inv_imgs = np.asarray(inv_imgs)
adv_vgg_imgs = np.asarray(adv_vgg_imgs)
adv_resnet_imgs = np.asarray(adv_resnet_imgs)
adv_inc_imgs = np.asarray(adv_inc_imgs)
adv_flip_imgs = np.asarray(adv_flip_imgs)
adv_inv_imgs = np.asarray(adv_inv_imgs)
# Default ResNet accuracy
_, results1 = resnet_model.evaluate(x=resnet_imgs, y=labels, verbose=0)
_, results2 = vgg_model.evaluate(x=vgg_imgs, y=labels, verbose=0)
_, results3 = inception_model.evaluate(x=inc_imgs, y=labels, verbose=0)
_, results4 = inception_model.evaluate(x=flip_imgs, y=labels, verbose=0)
_, results5 = inv_model.evaluate(x=inv_imgs, y=labels, verbose=0)
# print('-----------------------------------------------------')
_, results6 = resnet_model.evaluate(x=adv_resnet_imgs, y=labels, verbose=0)
_, results7 = vgg_model.evaluate(x=adv_vgg_imgs, y=labels, verbose=0)
_, results8 = inception_model.evaluate(x=adv_inc_imgs, y=labels, verbose=0)
_, results9 = inception_model.evaluate(x=adv_flip_imgs, y=labels, verbose=0)
_, results10 = inv_model.evaluate(x=adv_inv_imgs, y=labels, verbose=0)
print(iteration)
print(results1, results6)
print(results2, results7)
print(results3, results8)
print(results4, results9)
print(results5, results10)
with open("kot_fgsm_untarg.txt", "a") as myfile:
myfile.write(str(results1) + ' ' + str(results2) + ' ' + str(results3) + ' ' + str(results4) + ' ' + str(results5) + ' ' + str(results6) + ' ' + str(results7) + ' ' + str(results8) + ' ' + str(results9) + ' ' + str(results10) + '\n' )
iteration += 1
#exit()
#results = resnet_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
#print('RESNET test loss, test acc:', results)
#results = vgg_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
#print('VGG test loss, test acc:', results)
# labels = np.argmax(labels, axis=1)
#
# #results = model.evaluate(
# # x=images, y=to_categorical(labels, 1000))
# #print('test loss, test acc:', results)
# total = total + images.shape[0]
# print(total)
exit()
results = resnet_model.evaluate(
x=ds_validation,
steps=50000 // args.batch_size)
print('test loss, test acc:', results)
clear_keras_session()
if __name__ == '__main__':
main()
|
the-stack_0_6440 | # def fib(n): # write Fibonacci series up to n
# a, b = 0, 1
# while a < n:
# print(a, end=' ')
# a, b = b, a+b
# print()
# def fib2(n): # return Fibonacci series up to n
# result = []
# a, b = 0, 1
# while a < n:
# result.append(a)
# a, b = b, a+b
# return result
# a = __name__
# print("The name of the module is: ", a)
import sys
old = sys.getrecursionlimit()
print("Initial recursion depth", old)
sys.setrecursionlimit(1000000)
old = sys.getrecursionlimit()
print("new recursion limit", old) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.