content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
'''
Source code developed by DI2AG.
Thayer School of Engineering at Dartmouth College
Authors: Dr. Eugene Santos, Jr
Mr. Chase Yakaboski,
Mr. Gregory Hyde,
Dr. Keum Joo Kim
'''
import json
import argparse
import os
import sys
import pickle
import subprocess
from chp.query import Query
PASSED_JSON_FILE = '/home/cyakaboski/passed_message.json'
NODE = 'c-dell-m630-0-11'
SAVE_DIR = '/home/cyakaboski/temp'
BKB_PATHWAY_CORE_DIR = '/home/cyakaboski/src/python/projects/bkb-pathway-provider/core'
'''
PASSED_JSON_FILE = '/home/ncats/passed_message.json'
NODE = 'c-dell-m630-0-11'
SAVE_DIR = '/home/ncats/tmp'
BKB_PATHWAY_CORE_DIR = '/home/ncats/live/core'
'''
def processUiQuery(dict_):
query_dict = dict()
query_dict['name'] = dict_['name']
query_dict['evidence'] = dict_['genetic_evidence']
query_dict['targets'] = dict_['genetic_targets']
if dict_['demographic_evidence'] is not None:
query_dict['meta_evidence'] = [tuple(demo) for demo in dict_['demographic_evidence']]
else:
query_dict['meta_evidence'] = None
if dict_['demographic_targets'] is not None:
query_dict['meta_targets'] = [tuple(demo) for demo in dict_['demographic_targets']]
else:
query_dict['meta_targets'] = None
query = Query(**query_dict)
return query
def consumeJsonFile(file_name):
with open(file_name, 'r') as passed_file:
query_dict = json.load(passed_file)
os.system('rm {}'.format(file_name))
return query_dict
def runOnNode(query, node_name, save_dir):
pickle_file, json_file = query.save(save_dir)
command = ['ssh', node_name,
'python3', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.py'),
'--config_file', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.config'),
'--headless',
'--query_file', pickle_file,
'--save_dir', save_dir]
subprocess.run(command)
return json_file
def makeVariableJsonFile(save_dir, node_name):
vars_file = os.path.join(save_dir, 'bkb_variables.pk')
command = ['ssh', node_name,
'python3', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.py'),
'--config_file', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.config'),
'--get_variables', vars_file]
subprocess.run(command)
#--Collect vars_dict from vars_file
with open(vars_file, 'rb') as f_:
vars_dict = pickle.load(f_)
return vars_dict
def collectResults(query_file):
with open(query_file) as f_:
query_res_dict = json.load(f_)
return query_res_dict
def sendJson(results):
print('Begin-JSON------')
print(json.JSONEncoder().encode(results))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--f', default=None, type=str)
parser.add_argument('--get_variables', action='store_true')
args = parser.parse_args()
if args.f is not None:
#-- Consume JSON File passed by UI
query_dict = consumeJsonFile(args.f)
#-- Process the passed JSON file into recognized and runnable Query
query = processUiQuery(query_dict)
#-- Analyze the Query and run reasoning on a specified dell node.
saved_query_file = runOnNode(query, NODE, SAVE_DIR)
#-- Load JSON result file and send back over ssh
res_json = collectResults(saved_query_file)
sendJson(res_json)
elif args.get_variables:
vars_dict = makeVariableJsonFile(SAVE_DIR, NODE)
sendJson(vars_dict)
| 32.290909 | 93 | 0.670045 | [
"Apache-2.0"
] | NCATSTranslator/chp | chp/babel/bkb-service.py | 3,552 | Python |
# Proximal
import sys
sys.path.append('../../')
from proximal.utils.utils import *
from proximal.halide.halide import *
from proximal.lin_ops import *
import numpy as np
from scipy import signal
from scipy import ndimage
import matplotlib.pyplot as plt
############################################################
# Load image
np_img = get_test_image(2048)
print('Type ', np_img.dtype, 'Shape', np_img.shape)
imgplot = plt.imshow(np_img, interpolation='nearest', clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Numpy')
# Force recompile in local dir
tic()
Halide('A_conv', recompile=True)
Halide('At_conv', recompile=True) # Force recompile in local dir
print('Compilation took: {0:.1f}ms'.format(toc()))
# Test the runner
output = np.zeros_like(np_img)
K = get_kernel(15, len(np_img.shape))
tic()
Halide('A_conv').A_conv(np_img, K, output) # Call
print('Running took: {0:.1f}ms'.format(toc()))
plt.figure()
imgplot = plt.imshow(output, interpolation='nearest', clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Output from Halide')
tic()
output_scipy = signal.convolve2d(np_img, K, mode='same', boundary='wrap')
print('Running Scipy.convolve2d took: {0:.1f}ms'.format(toc()))
fn = conv(K, Variable(np_img.shape), implem='halide')
output_ref = np.zeros(np_img.shape, dtype=np.float32, order='F')
tic()
fn.forward([np_img], [output_ref])
print('Running conv fft convolution took: {0:.1f}ms'.format(toc()))
# Error
print('Maximum error {0}'.format(np.amax(np.abs(output_ref - output))))
plt.figure()
imgplot = plt.imshow(output_ref * 255,
interpolation='nearest',
clim=(0.0, 255.0))
imgplot.set_cmap('gray')
plt.title('Output from Scipy')
############################################################################
# Check correlation
############################################################################
output_corr = np.zeros_like(np_img)
tic()
Halide('At_conv').At_conv(np_img, K, output_corr) # Call
print('Running correlation took: {0:.1f}ms'.format(toc()))
#output_corr_ref = signal.convolve2d(np_img, np.flipud(np.fliplr(K)), mode='same', boundary='wrap')
output_corr_ref = ndimage.correlate(np_img, K, mode='wrap')
# Adjoint.
output_corr_ref = np.zeros(np_img.shape, dtype=np.float32, order='F')
tic()
fn.adjoint([np_img], [output_corr_ref])
print('Running transpose conv fft convolution took: {0:.1f}ms'.format(toc()))
# Error
print('Maximum error correlation {0}'.format(
np.amax(np.abs(output_corr_ref - output_corr))))
plt.show()
| 29.348837 | 99 | 0.647781 | [
"MIT"
] | antonysigma/ProxImaL | proximal/examples/test_conv.py | 2,524 | Python |
# coding: UTF-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import warnings
warnings.filterwarnings("ignore")
import argparse
import numpy as np
import shutil
import PIL
import time
from imageio import imread, imsave
from googletrans import Translator
import torch
import torchvision
import torch.nn.functional as F
from torchvision import transforms as T
import clip
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from clip_fft import to_valid_rgb, fft_image, resume_fft, pixel_image
from utils import slice_imgs, derivat, sim_func, slerp, basename, file_list, img_list, img_read, pad_up_to, txt_clean, latent_anima, cvshow, checkout, save_cfg, old_torch
import transforms
try: # progress bar for notebooks
get_ipython().__class__.__name__
from progress_bar import ProgressIPy as ProgressBar
except: # normal console
from progress_bar import ProgressBar
clip_models = ['ViT-B/16', 'ViT-B/32', 'RN50', 'RN50x4', 'RN50x16', 'RN101']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', default='1280-720', help='Output resolution')
parser.add_argument('-t', '--in_txt', default=None, help='Text string or file to process (main topic)')
parser.add_argument('-pre', '--in_txt_pre', default=None, help='Prefix for input text')
parser.add_argument('-post', '--in_txt_post', default=None, help='Postfix for input text')
parser.add_argument('-t2', '--in_txt2', default=None, help='Text string or file to process (style)')
parser.add_argument('-t0', '--in_txt0', default=None, help='input text to subtract')
parser.add_argument('-im', '--in_img', default=None, help='input image or directory with images')
parser.add_argument('-w0', '--weight0', default=0.3, type=float, help='weight for subtraction')
parser.add_argument('-w2', '--weight2', default=0.5, type=float, help='weight for style')
parser.add_argument('-wi', '--weight_img', default=0.5, type=float, help='weight for images')
parser.add_argument('-r', '--resume', default=None, help='Resume from saved params or from an image')
parser.add_argument( '--out_dir', default='_out')
parser.add_argument('-tr', '--translate', action='store_true', help='Translate with Google Translate')
parser.add_argument( '--invert', action='store_true', help='Invert criteria')
parser.add_argument('-v', '--verbose', default=True, type=bool)
# training
parser.add_argument( '--gen', default='RGB', help='Generation (optimization) method: FFT or RGB')
parser.add_argument('-m', '--model', default='ViT-B/32', choices=clip_models, help='Select CLIP model to use')
parser.add_argument( '--steps', default=300, type=int, help='Iterations (frames) per scene (text line)')
parser.add_argument( '--samples', default=100, type=int, help='Samples to evaluate per frame')
parser.add_argument('-lr', '--lrate', default=1, type=float, help='Learning rate')
# motion
parser.add_argument('-opt', '--opt_step', default=1, type=int, help='How many optimizing steps per save/transform step')
parser.add_argument('-sm', '--smooth', action='store_true', help='Smoothen interframe jittering for FFT method')
parser.add_argument('-it', '--interpol', default=True, help='Interpolate topics? (or change by cut)')
parser.add_argument( '--fstep', default=100, type=int, help='How many frames before changing motion')
parser.add_argument( '--scale', default=0.012, type=float)
parser.add_argument( '--shift', default=10., type=float, help='in pixels')
parser.add_argument( '--angle', default=0.8, type=float, help='in degrees')
parser.add_argument( '--shear', default=0.4, type=float)
parser.add_argument( '--anima', default=True, help='Animate motion')
# tweaks
parser.add_argument('-a', '--align', default='overscan', choices=['central', 'uniform', 'overscan', 'overmax'], help='Sampling distribution')
parser.add_argument('-tf', '--transform', default='custom', choices=['none', 'custom', 'elastic'], help='use augmenting transforms?')
parser.add_argument( '--contrast', default=1.2, type=float)
parser.add_argument( '--colors', default=2, type=float)
parser.add_argument('-sh', '--sharp', default=None, type=float)
parser.add_argument('-mc', '--macro', default=0.4, type=float, help='Endorse macro forms 0..1 ')
parser.add_argument('-e', '--enforce', default=0, type=float, help='Enforce details (by boosting similarity between two parallel samples)')
parser.add_argument('-x', '--expand', default=0, type=float, help='Boosts diversity (by enforcing difference between prev/next samples)')
parser.add_argument('-n', '--noise', default=2., type=float, help='Add noise to make composition sparse (FFT only)') # 0.04
parser.add_argument( '--sim', default='mix', help='Similarity function (angular/spherical/mixed; None = cossim)')
parser.add_argument( '--rem', default=None, help='Dummy text to add to project name')
a = parser.parse_args()
if a.size is not None: a.size = [int(s) for s in a.size.split('-')][::-1]
if len(a.size)==1: a.size = a.size * 2
a.gen = a.gen.upper()
a.invert = -1. if a.invert is True else 1.
# Overriding some parameters, depending on other settings
if a.gen == 'RGB':
a.smooth = False
a.align = 'overscan'
if a.sharp is None: a.sharp = -1. if a.gen == 'RGB' else 1.
if a.model == 'ViT-B/16': a.sim = 'cossim'
return a
def frame_transform(img, size, angle, shift, scale, shear):
if old_torch(): # 1.7.1
img = T.functional.affine(img, angle, shift, scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR)
img = T.functional.center_crop(img, size)
img = pad_up_to(img, size)
else: # 1.8+
img = T.functional.affine(img, angle, shift, scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR)
img = T.functional.center_crop(img, size) # on 1.8+ also pads
return img
def main():
a = get_args()
# Load CLIP models
model_clip, _ = clip.load(a.model, jit=old_torch())
try:
a.modsize = model_clip.visual.input_resolution
except:
a.modsize = 288 if a.model == 'RN50x4' else 384 if a.model == 'RN50x16' else 224
if a.verbose is True: print(' using model', a.model)
xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}
if a.model in xmem.keys():
a.samples = int(a.samples * xmem[a.model])
if a.translate:
translator = Translator()
if a.enforce != 0:
a.samples = int(a.samples * 0.5)
if 'elastic' in a.transform:
trform_f = transforms.transforms_elastic
a.samples = int(a.samples * 0.95)
elif 'custom' in a.transform:
trform_f = transforms.transforms_custom
a.samples = int(a.samples * 0.95)
else:
trform_f = transforms.normalize()
def enc_text(txt):
if a.translate:
txt = translator.translate(txt, dest='en').text
emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77])
return emb.detach().clone()
def enc_image(img_file):
img_t = torch.from_numpy(img_read(img_file)/255.).unsqueeze(0).permute(0,3,1,2).cuda()[:,:3,:,:]
in_sliced = slice_imgs([img_t], a.samples, a.modsize, transforms.normalize(), a.align)[0]
emb = model_clip.encode_image(in_sliced)
return emb.detach().clone()
# Encode inputs
count = 0
texts = []
styles = []
images = []
if a.in_txt is not None:
if os.path.isfile(a.in_txt):
with open(a.in_txt, 'r', encoding="utf-8") as f:
texts = f.readlines()
texts = [tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#']
else:
texts = [a.in_txt]
if a.in_txt_pre is not None:
texts = [' '.join([a.in_txt_pre, tt]).strip() for tt in texts]
if a.in_txt_post is not None:
texts = [' '.join([tt, a.in_txt_post]).strip() for tt in texts]
key_txt_encs = [enc_text(txt) for txt in texts]
count = max(count, len(key_txt_encs))
if a.in_txt2 is not None:
if os.path.isfile(a.in_txt2):
with open(a.in_txt2, 'r', encoding="utf-8") as f:
styles = f.readlines()
styles = [tt.strip() for tt in styles if len(tt.strip()) > 0 and tt[0] != '#']
else:
styles = [a.in_txt2]
key_styl_encs = [enc_text(style) for style in styles]
count = max(count, len(key_styl_encs))
if a.in_img is not None and os.path.exists(a.in_img):
images = file_list(a.in_img) if os.path.isdir(a.in_img) else [a.in_img]
key_img_encs = [enc_image(image) for image in images]
count = max(count, len(key_img_encs))
assert count > 0, "No inputs found!"
if a.in_txt0 is not None:
if a.verbose is True: print(' subtract text:', a.in_txt0)
if a.translate:
a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
# if a.verbose is True: print(' translated to:', a.in_txt0)
anti_txt_encs = [enc_text(txt) for txt in a.in_txt0.split('.')]
if a.verbose is True: print(' samples:', a.samples)
global params_tmp
shape = [1, 3, *a.size]
if a.gen == 'RGB':
params_tmp, _, sz = pixel_image(shape, a.resume)
params_tmp = params_tmp[0].cuda().detach()
else:
params_tmp, sz = resume_fft(a.resume, shape, decay=1.5, sd=1)
if sz is not None: a.size = sz
# [glob]steps = for save/move, opt_steps = for optimization cycle
steps = a.steps
glob_steps = count * steps
opt_steps = steps * a.opt_step
if glob_steps == a.fstep: a.fstep = glob_steps // 2 # otherwise no motion
workname = basename(a.in_txt) if a.in_txt is not None else basename(a.in_img)
workname = txt_clean(workname)
workdir = os.path.join(a.out_dir, workname)
if a.rem is not None: workdir += '-%s' % a.rem
if 'RN' in a.model.upper(): workdir += '-%s' % a.model
if a.noise > 0: workdir += '-n%.2g' % a.noise
if a.macro > 0: workdir += '-m%.2g' % a.macro
if a.smooth is True: workdir += '-sm'
if a.transform != 'custom': workdir += '-tf%s' % a.transform
if a.gen == 'RGB': workdir += '-rgb'
tempdir = os.path.join(workdir, 'ttt')
os.makedirs(tempdir, exist_ok=True)
save_cfg(a, workdir)
if a.in_txt is not None and os.path.isfile(a.in_txt):
shutil.copy(a.in_txt, os.path.join(workdir, os.path.basename(a.in_txt)))
if a.in_txt2 is not None and os.path.isfile(a.in_txt2):
shutil.copy(a.in_txt2, os.path.join(workdir, os.path.basename(a.in_txt2)))
midp = 0.5
if a.anima:
if a.gen == 'RGB': # zoom in
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[-0.3], verbose=False)
m_scale = 1 + (m_scale + 0.3) * a.scale
else:
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[0.6], verbose=False)
m_scale = 1 - (m_scale-0.6) * a.scale
m_shift = latent_anima([2], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp,midp], verbose=False)
m_angle = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shear = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shift = (midp-m_shift) * a.shift * abs(m_scale-1) / a.scale
m_angle = (midp-m_angle) * a.angle * abs(m_scale-1) / a.scale
m_shear = (midp-m_shear) * a.shear * abs(m_scale-1) / a.scale
def get_encs(encs, num):
cnt = len(encs)
if cnt == 0: return []
enc_1 = encs[min(num, cnt-1)]
enc_2 = encs[min(num+1, cnt-1)]
return slerp(enc_1, enc_2, opt_steps)
prev_enc = 0
def process(num):
global params_tmp, opt_state, params, image_f, optimizer
if a.interpol is True: # linear topics interpolation
txt_encs = get_encs(key_txt_encs, num)
styl_encs = get_encs(key_styl_encs, num)
img_encs = get_encs(key_img_encs, num)
else: # change by cut
txt_encs = [key_txt_encs[min(num, len(key_txt_encs)-1)][0]] * opt_steps if len(key_txt_encs) > 0 else []
styl_encs = [key_styl_encs[min(num, len(key_styl_encs)-1)][0]] * opt_steps if len(key_styl_encs) > 0 else []
img_encs = [key_img_encs[min(num, len(key_img_encs)-1)][0]] * opt_steps if len(key_img_encs) > 0 else []
if a.verbose is True:
if len(texts) > 0: print(' ref text: ', texts[min(num, len(texts)-1)][:80])
if len(styles) > 0: print(' ref style: ', styles[min(num, len(styles)-1)][:80])
if len(images) > 0: print(' ref image: ', basename(images[min(num, len(images)-1)])[:80])
pbar = ProgressBar(steps)
for ii in range(opt_steps):
glob_step = num * steps + ii // a.opt_step # save/transform
loss = 0
txt_enc = txt_encs[ii % len(txt_encs)].unsqueeze(0) if len(txt_encs) > 0 else None
styl_enc = styl_encs[ii % len(styl_encs)].unsqueeze(0) if len(styl_encs) > 0 else None
img_enc = img_encs[ii % len(img_encs)].unsqueeze(0) if len(img_encs) > 0 else None
# MOTION: transform frame, reload params
if ii % a.opt_step == 0:
scale = m_scale[glob_step] if a.anima else 1 + a.scale
shift = tuple(m_shift[glob_step]) if a.anima else [0, a.shift]
angle = m_angle[glob_step][0] if a.anima else a.angle
shear = m_shear[glob_step][0] if a.anima else a.shear
if a.gen == 'RGB':
img_tmp = frame_transform(params_tmp, a.size, angle, shift, scale, shear)
params, image_f, _ = pixel_image([1, 3, *a.size], resume=img_tmp)
else: # FFT
if old_torch(): # 1.7.1
img_tmp = torch.irfft(params_tmp, 2, normalized=True, signal_sizes=a.size)
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.rfft(img_tmp, 2, normalized=True)
else: # 1.8+
if type(params_tmp) is not torch.complex64:
params_tmp = torch.view_as_complex(params_tmp)
img_tmp = torch.fft.irfftn(params_tmp, s=a.size, norm='ortho')
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.fft.rfftn(img_tmp, s=a.size, dim=[2,3], norm='ortho')
params_tmp = torch.view_as_real(params_tmp)
params, image_f, _ = fft_image([1, 3, *a.size], sd=1, resume=params_tmp)
optimizer = torch.optim.Adam(params, a.lrate)
# optimizer = torch.optim.AdamW(params, a.lrate, weight_decay=0.01, amsgrad=True)
image_f = to_valid_rgb(image_f, colors = a.colors)
del img_tmp
if a.smooth is True and num + ii > 0:
optimizer.load_state_dict(opt_state)
noise = a.noise * (torch.rand(1, 1, a.size[0], a.size[1]//2+1, 1)-0.5).cuda() if a.noise>0 else 0.
img_out = image_f(noise)
img_sliced = slice_imgs([img_out], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc = model_clip.encode_image(img_sliced)
if a.gen == 'RGB': # empirical hack
loss += 1.66 * abs(img_out.mean((2,3)) - 0.45).sum() # fix brightness
loss += 1.66 * abs(img_out.std((2,3)) - 0.17).sum() # fix contrast
if txt_enc is not None:
loss -= a.invert * sim_func(txt_enc, out_enc, a.sim)
if styl_enc is not None:
loss -= a.weight2 * sim_func(styl_enc, out_enc, a.sim)
if img_enc is not None:
loss -= a.weight_img * sim_func(img_enc, out_enc, a.sim)
if a.in_txt0 is not None: # subtract text
for anti_txt_enc in anti_txt_encs:
loss += 0.3 * sim_func(anti_txt_enc, out_enc, a.sim)
if a.sharp != 0: # scharr|sobel|naive
loss -= a.sharp * derivat(img_out, mode='naive')
if a.enforce != 0:
img_sliced = slice_imgs([image_f(noise)], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc2 = model_clip.encode_image(img_sliced)
loss -= a.enforce * sim_func(out_enc, out_enc2, a.sim)
del out_enc2; torch.cuda.empty_cache()
if a.expand > 0:
global prev_enc
if ii > 0:
loss += a.expand * sim_func(prev_enc, out_enc, a.sim)
prev_enc = out_enc.detach().clone()
del img_out, img_sliced, out_enc; torch.cuda.empty_cache()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ii % a.opt_step == a.opt_step-1:
params_tmp = params[0].detach().clone()
if a.smooth is True:
opt_state = optimizer.state_dict()
if ii % a.opt_step == 0:
with torch.no_grad():
img_t = image_f(contrast=a.contrast)[0].permute(1,2,0)
img = torch.clip(img_t*255, 0, 255).cpu().numpy().astype(np.uint8)
imsave(os.path.join(tempdir, '%06d.jpg' % glob_step), img, quality=95)
if a.verbose is True: cvshow(img)
del img, img_t
pbar.upd()
params_tmp = params[0].detach().clone()
glob_start = time.time()
try:
for i in range(count):
process(i)
except KeyboardInterrupt:
pass
os.system('ffmpeg -v warning -y -i %s/\%%06d.jpg "%s.mp4"' % (tempdir, os.path.join(workdir, workname)))
if __name__ == '__main__':
main()
| 49.329759 | 170 | 0.598804 | [
"MIT"
] | ksburaya/aphantasia | illustrip.py | 18,400 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ApiOperation']
class ApiOperation(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
method: Optional[pulumi.Input[str]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[str]] = None,
request: Optional[pulumi.Input[pulumi.InputType['RequestContractArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
responses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResponseContractArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
template_parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ParameterContractArgs']]]]] = None,
url_template: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Api Operation details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param pulumi.Input[str] description: Description of the operation. May include HTML formatting tags.
:param pulumi.Input[str] display_name: Operation Name.
:param pulumi.Input[str] method: A Valid HTTP Operation Method. Typical Http Methods like GET, PUT, POST but not limited by only them.
:param pulumi.Input[str] operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] policies: Operation Policies
:param pulumi.Input[pulumi.InputType['RequestContractArgs']] request: An entity containing request details.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResponseContractArgs']]]] responses: Array of Operation responses.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ParameterContractArgs']]]] template_parameters: Collection of URL template parameters.
:param pulumi.Input[str] url_template: Relative URL template identifying the target resource for this operation. May include parameters. Example: /customers/{cid}/orders/{oid}/?date={date}
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__['api_id'] = api_id
__props__['description'] = description
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
if method is None and not opts.urn:
raise TypeError("Missing required property 'method'")
__props__['method'] = method
__props__['operation_id'] = operation_id
__props__['policies'] = policies
__props__['request'] = request
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['responses'] = responses
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['template_parameters'] = template_parameters
if url_template is None and not opts.urn:
raise TypeError("Missing required property 'url_template'")
__props__['url_template'] = url_template
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiOperation")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiOperation, __self__).__init__(
'azure-nextgen:apimanagement/v20200601preview:ApiOperation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiOperation':
"""
Get an existing ApiOperation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ApiOperation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the operation. May include HTML formatting tags.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Operation Name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def method(self) -> pulumi.Output[str]:
"""
A Valid HTTP Operation Method. Typical Http Methods like GET, PUT, POST but not limited by only them.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Optional[str]]:
"""
Operation Policies
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter
def request(self) -> pulumi.Output[Optional['outputs.RequestContractResponse']]:
"""
An entity containing request details.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def responses(self) -> pulumi.Output[Optional[Sequence['outputs.ResponseContractResponse']]]:
"""
Array of Operation responses.
"""
return pulumi.get(self, "responses")
@property
@pulumi.getter(name="templateParameters")
def template_parameters(self) -> pulumi.Output[Optional[Sequence['outputs.ParameterContractResponse']]]:
"""
Collection of URL template parameters.
"""
return pulumi.get(self, "template_parameters")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlTemplate")
def url_template(self) -> pulumi.Output[str]:
"""
Relative URL template identifying the target resource for this operation. May include parameters. Example: /customers/{cid}/orders/{oid}/?date={date}
"""
return pulumi.get(self, "url_template")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 47.985577 | 794 | 0.660655 | [
"Apache-2.0"
] | pulumi/pulumi-azure-nextgen | sdk/python/pulumi_azure_nextgen/apimanagement/v20200601preview/api_operation.py | 9,981 | Python |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
平安行动自动打卡
请事先安装好 lxml 和 requests 模块
pip install lxml requests
然后修改 27-31 行为自己的数据,未使用的变量保持原样即可
如有需要请自行配置 149-171 行的 SMTP 发信或 174-177 行的 Server 酱微信提醒
Created on 2020-04-13 20:20
@author: ZhangJiawei & Liu Chongpeng & Liu Lu
"""
import requests
import lxml.html
import re
import json
import random
import time
import smtplib
import traceback
myid = "STUDENTID"
mypass = "PASSWORD"
mybound = "BOUNDFIELDS"
mydata = r'FORMDATA'
# mysckey = "SCKEY"
title = ""
msg = ""
proxies = {"http": None, "https": None}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "MESSAGE_TICKET=%7B%22times%22%3A0%7D; ",
"Host": "cas.hrbeu.edu.cn",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362"
}
def findStr(source, target):
return source.find(target) != -1
if __name__ == '__main__':
try:
## 登陆校园网络认证界面
url_login = 'https://cas.hrbeu.edu.cn/cas/login?'
print("============================\n[debug] Begin to login ...")
sesh = requests.session()
req = sesh.get(url_login, proxies=proxies)
html_content = req.text
login_html = lxml.html.fromstring(html_content)
hidden_inputs = login_html.xpath( r'//div[@id="main"]//input[@type="hidden"]')
user_form = {x.attrib["name"]: x.attrib["value"] for x in hidden_inputs}
user_form["username"] = myid
user_form["password"] = mypass
user_form["captcha"] = ''
user_form["submit"] = '登 录'
headers['Cookie'] = headers['Cookie'] + req.headers['Set-cookie']
req.url = f'https://cas.hrbeu.edu.cn/cas/login'
response302 = sesh.post(req.url, data=user_form, headers=headers, proxies=proxies)
## 进入平安行动界面
jkgc_response = sesh.get( "http://jkgc.hrbeu.edu.cn/infoplus/form/JSXNYQSBtest/start", proxies=proxies)
headers['Accept'] = '*/*'
headers['Cookie'] = jkgc_response.request.headers['Cookie']
headers['Host'] = 'jkgc.hrbeu.edu.cn'
headers['Referer'] = jkgc_response.url
jkgc_html = lxml.html.fromstring(jkgc_response.text)
csrfToken = jkgc_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken = csrfToken.pop().attrib["content"]
jkgc_form = {
'idc': 'JSXNYQSBtest',
'release': '',
'csrfToken': csrfToken,
'formData': {
'_VAR_URL': jkgc_response.url,
'_VAR_URL_Attr': {}
}
}
jkgc_form['formData'] = json.dumps(jkgc_form['formData'])
jkgc_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/start'
response3 = sesh.post(jkgc_url, data=jkgc_form, headers=headers, proxies=proxies)
## 提交平安行动表单
form_url = json.loads(response3.text)['entities'][0]
form_response = sesh.get(form_url)
headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
headers['Referer'] = form_url
headers['X-Requested-With'] = 'XMLHttpRequest'
submit_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/doAction'
submit_html = lxml.html.fromstring(form_response.text)
csrfToken2 = submit_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken2 = csrfToken2.pop().attrib["content"]
submit_form = {
'actionId': '1',
'boundFields': mybound, # boundFields 修改位置
'csrfToken': csrfToken2,
'formData': mydata, # formData 修改位置
'lang': 'zh',
'nextUsers': '{}',
'rand': str(random.random() * 999),
'remark': '',
'stepId': re.match(r'.*form/(\d*?)/', form_response.url).group(1),
'timestamp': str(int(time.time()+0.5))
}
response_end = sesh.post(submit_url, data=submit_form, headers=headers, proxies=proxies)
resJson = json.loads(response_end.text)
## 表单填写完成,返回结果
print('[debug] Form url: ', form_response.url)
print('[debug] Form Status: ', resJson['ecode'])
print('[debug] Form stJson: ', resJson)
## 生成提醒返回的标题和信息
if (resJson['errno'] == 0):
print('[info] Checkin succeed with jsoncode', resJson['ecode'])
title = f'打卡成功 <{submit_form["stepId"]}>'
msg = '\t表单地址: ' + form_response.url + '\n\n\t表单状态: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
else:
print('[error] Checkin error with jsoncode', resJson['ecode'])
title = f'打卡失败!校网出错'
msg = '\t表单地址: ' + form_response.url + '\n\n\t错误信息: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
except:
print('\n[error] :.:.:.:.: Except return :.:.:.:.:')
err = traceback.format_exc()
print('[error] Python Error: \n', err)
title = '打卡失败!脚本出错'
msg = '\t脚本报错: \n\n\t' + err + '============================\n'
finally:
print(':.:.:.:.: Finally :.:.:.:.:')
## 发送邮件
# from email.mime.text import MIMEText
# from email.header import Header
# mail_host = "smtp.qq.com" # SMTP 服务器地址
# mail_user = "[email protected]" # SMTP 发信邮箱用户名
# mail_pass = "emailpassword" # SMTP 发信邮箱密码
# sender = '[email protected]' # 发信人邮箱,即 SMTP 发信邮箱用户名
# receivers = ['[email protected]'] # 收信人邮箱,多邮箱以数组形式写
# message = MIMEText(msg, 'plain', 'utf-8')
# message['From'] = Header("[email protected]", 'utf-8') # 发信人邮箱,仅用于显示
# message['To'] = Header("[email protected]", 'utf-8') # 收信人邮箱,仅用于显示
# subject = title
# message['Subject'] = Header(subject, 'utf-8')
# try:
# smtpObj = smtplib.SMTP_SSL(mail_host) # Python 3.7 及以上版本 SSL 加密发信
# smtpObj.connect(mail_host, 465) # Python 3.7 及以上版本 加密发信 SMTP 端口号 465
# smtpObj.login(mail_user,mail_pass)
# smtpObj.sendmail(sender, receivers, message.as_string())
# print ("[info] Success: The email was sent successfully") # 日志输出
# except smtplib.SMTPException:
# print ("[error] Error: Can not send mail") # 日志输出
## 或者发送 Server 酱的微信提醒
# wcurl = 'https://sc.ftqq.com/' + mysckey + '.send'
# wcdata = {'text': title, 'desp': msg}
# try:
# wcresult = requests.post(wcurl, wcdata)
# print('[info] Notification sended at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
# except:
# print('[error] Failed to send notification!')
print('[info] Task Finished at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
print('============================\n')
| 41.524862 | 149 | 0.562932 | [
"MIT"
] | Hydrcat/HEU-Checkin-COVID-19 | Server/checkin.py | 8,098 | Python |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class JreUsage(object):
"""
Java Runtime usage during a specified time period. A Java Runtime is identified by its vendor and version.
"""
#: A constant which can be used with the security_status property of a JreUsage.
#: This constant has a value of "UNKNOWN"
SECURITY_STATUS_UNKNOWN = "UNKNOWN"
#: A constant which can be used with the security_status property of a JreUsage.
#: This constant has a value of "UP_TO_DATE"
SECURITY_STATUS_UP_TO_DATE = "UP_TO_DATE"
#: A constant which can be used with the security_status property of a JreUsage.
#: This constant has a value of "UPDATE_REQUIRED"
SECURITY_STATUS_UPDATE_REQUIRED = "UPDATE_REQUIRED"
#: A constant which can be used with the security_status property of a JreUsage.
#: This constant has a value of "UPGRADE_REQUIRED"
SECURITY_STATUS_UPGRADE_REQUIRED = "UPGRADE_REQUIRED"
def __init__(self, **kwargs):
"""
Initializes a new JreUsage object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this JreUsage.
:type id: str
:param fleet_id:
The value to assign to the fleet_id property of this JreUsage.
:type fleet_id: str
:param managed_instance_id:
The value to assign to the managed_instance_id property of this JreUsage.
:type managed_instance_id: str
:param security_status:
The value to assign to the security_status property of this JreUsage.
Allowed values for this property are: "UNKNOWN", "UP_TO_DATE", "UPDATE_REQUIRED", "UPGRADE_REQUIRED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type security_status: str
:param release_date:
The value to assign to the release_date property of this JreUsage.
:type release_date: datetime
:param end_of_support_life_date:
The value to assign to the end_of_support_life_date property of this JreUsage.
:type end_of_support_life_date: datetime
:param vendor:
The value to assign to the vendor property of this JreUsage.
:type vendor: str
:param distribution:
The value to assign to the distribution property of this JreUsage.
:type distribution: str
:param version:
The value to assign to the version property of this JreUsage.
:type version: str
:param operating_systems:
The value to assign to the operating_systems property of this JreUsage.
:type operating_systems: list[oci.jms.models.OperatingSystem]
:param approximate_installation_count:
The value to assign to the approximate_installation_count property of this JreUsage.
:type approximate_installation_count: int
:param approximate_application_count:
The value to assign to the approximate_application_count property of this JreUsage.
:type approximate_application_count: int
:param approximate_managed_instance_count:
The value to assign to the approximate_managed_instance_count property of this JreUsage.
:type approximate_managed_instance_count: int
:param time_start:
The value to assign to the time_start property of this JreUsage.
:type time_start: datetime
:param time_end:
The value to assign to the time_end property of this JreUsage.
:type time_end: datetime
:param time_first_seen:
The value to assign to the time_first_seen property of this JreUsage.
:type time_first_seen: datetime
:param time_last_seen:
The value to assign to the time_last_seen property of this JreUsage.
:type time_last_seen: datetime
"""
self.swagger_types = {
'id': 'str',
'fleet_id': 'str',
'managed_instance_id': 'str',
'security_status': 'str',
'release_date': 'datetime',
'end_of_support_life_date': 'datetime',
'vendor': 'str',
'distribution': 'str',
'version': 'str',
'operating_systems': 'list[OperatingSystem]',
'approximate_installation_count': 'int',
'approximate_application_count': 'int',
'approximate_managed_instance_count': 'int',
'time_start': 'datetime',
'time_end': 'datetime',
'time_first_seen': 'datetime',
'time_last_seen': 'datetime'
}
self.attribute_map = {
'id': 'id',
'fleet_id': 'fleetId',
'managed_instance_id': 'managedInstanceId',
'security_status': 'securityStatus',
'release_date': 'releaseDate',
'end_of_support_life_date': 'endOfSupportLifeDate',
'vendor': 'vendor',
'distribution': 'distribution',
'version': 'version',
'operating_systems': 'operatingSystems',
'approximate_installation_count': 'approximateInstallationCount',
'approximate_application_count': 'approximateApplicationCount',
'approximate_managed_instance_count': 'approximateManagedInstanceCount',
'time_start': 'timeStart',
'time_end': 'timeEnd',
'time_first_seen': 'timeFirstSeen',
'time_last_seen': 'timeLastSeen'
}
self._id = None
self._fleet_id = None
self._managed_instance_id = None
self._security_status = None
self._release_date = None
self._end_of_support_life_date = None
self._vendor = None
self._distribution = None
self._version = None
self._operating_systems = None
self._approximate_installation_count = None
self._approximate_application_count = None
self._approximate_managed_instance_count = None
self._time_start = None
self._time_end = None
self._time_first_seen = None
self._time_last_seen = None
@property
def id(self):
"""
Gets the id of this JreUsage.
The internal identifier of the Java Runtime.
:return: The id of this JreUsage.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this JreUsage.
The internal identifier of the Java Runtime.
:param id: The id of this JreUsage.
:type: str
"""
self._id = id
@property
def fleet_id(self):
"""
Gets the fleet_id of this JreUsage.
The `OCID`__ of the related fleet. This property value is present only for /actions/listJreUsage.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The fleet_id of this JreUsage.
:rtype: str
"""
return self._fleet_id
@fleet_id.setter
def fleet_id(self, fleet_id):
"""
Sets the fleet_id of this JreUsage.
The `OCID`__ of the related fleet. This property value is present only for /actions/listJreUsage.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param fleet_id: The fleet_id of this JreUsage.
:type: str
"""
self._fleet_id = fleet_id
@property
def managed_instance_id(self):
"""
Gets the managed_instance_id of this JreUsage.
The `OCID`__ of the related managed instance. This property value is present only for /actions/listJreUsage.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The managed_instance_id of this JreUsage.
:rtype: str
"""
return self._managed_instance_id
@managed_instance_id.setter
def managed_instance_id(self, managed_instance_id):
"""
Sets the managed_instance_id of this JreUsage.
The `OCID`__ of the related managed instance. This property value is present only for /actions/listJreUsage.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param managed_instance_id: The managed_instance_id of this JreUsage.
:type: str
"""
self._managed_instance_id = managed_instance_id
@property
def security_status(self):
"""
Gets the security_status of this JreUsage.
The security status of the Java Runtime.
Allowed values for this property are: "UNKNOWN", "UP_TO_DATE", "UPDATE_REQUIRED", "UPGRADE_REQUIRED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The security_status of this JreUsage.
:rtype: str
"""
return self._security_status
@security_status.setter
def security_status(self, security_status):
"""
Sets the security_status of this JreUsage.
The security status of the Java Runtime.
:param security_status: The security_status of this JreUsage.
:type: str
"""
allowed_values = ["UNKNOWN", "UP_TO_DATE", "UPDATE_REQUIRED", "UPGRADE_REQUIRED"]
if not value_allowed_none_or_none_sentinel(security_status, allowed_values):
security_status = 'UNKNOWN_ENUM_VALUE'
self._security_status = security_status
@property
def release_date(self):
"""
Gets the release_date of this JreUsage.
The release date of the Java Runtime (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:return: The release_date of this JreUsage.
:rtype: datetime
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""
Sets the release_date of this JreUsage.
The release date of the Java Runtime (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:param release_date: The release_date of this JreUsage.
:type: datetime
"""
self._release_date = release_date
@property
def end_of_support_life_date(self):
"""
Gets the end_of_support_life_date of this JreUsage.
The End of Support Life (EOSL) date of the Java Runtime (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:return: The end_of_support_life_date of this JreUsage.
:rtype: datetime
"""
return self._end_of_support_life_date
@end_of_support_life_date.setter
def end_of_support_life_date(self, end_of_support_life_date):
"""
Sets the end_of_support_life_date of this JreUsage.
The End of Support Life (EOSL) date of the Java Runtime (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:param end_of_support_life_date: The end_of_support_life_date of this JreUsage.
:type: datetime
"""
self._end_of_support_life_date = end_of_support_life_date
@property
def vendor(self):
"""
**[Required]** Gets the vendor of this JreUsage.
The vendor of the Java Runtime.
:return: The vendor of this JreUsage.
:rtype: str
"""
return self._vendor
@vendor.setter
def vendor(self, vendor):
"""
Sets the vendor of this JreUsage.
The vendor of the Java Runtime.
:param vendor: The vendor of this JreUsage.
:type: str
"""
self._vendor = vendor
@property
def distribution(self):
"""
**[Required]** Gets the distribution of this JreUsage.
The distribution of a Java Runtime is the name of the lineage of product to which it belongs, for example _Java(TM) SE Runtime Environment_.
:return: The distribution of this JreUsage.
:rtype: str
"""
return self._distribution
@distribution.setter
def distribution(self, distribution):
"""
Sets the distribution of this JreUsage.
The distribution of a Java Runtime is the name of the lineage of product to which it belongs, for example _Java(TM) SE Runtime Environment_.
:param distribution: The distribution of this JreUsage.
:type: str
"""
self._distribution = distribution
@property
def version(self):
"""
**[Required]** Gets the version of this JreUsage.
The version of the Java Runtime.
:return: The version of this JreUsage.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this JreUsage.
The version of the Java Runtime.
:param version: The version of this JreUsage.
:type: str
"""
self._version = version
@property
def operating_systems(self):
"""
Gets the operating_systems of this JreUsage.
The operating systems that have this Java Runtime installed.
:return: The operating_systems of this JreUsage.
:rtype: list[oci.jms.models.OperatingSystem]
"""
return self._operating_systems
@operating_systems.setter
def operating_systems(self, operating_systems):
"""
Sets the operating_systems of this JreUsage.
The operating systems that have this Java Runtime installed.
:param operating_systems: The operating_systems of this JreUsage.
:type: list[oci.jms.models.OperatingSystem]
"""
self._operating_systems = operating_systems
@property
def approximate_installation_count(self):
"""
Gets the approximate_installation_count of this JreUsage.
The approximate count of installations that are installations of this Java Runtime.
:return: The approximate_installation_count of this JreUsage.
:rtype: int
"""
return self._approximate_installation_count
@approximate_installation_count.setter
def approximate_installation_count(self, approximate_installation_count):
"""
Sets the approximate_installation_count of this JreUsage.
The approximate count of installations that are installations of this Java Runtime.
:param approximate_installation_count: The approximate_installation_count of this JreUsage.
:type: int
"""
self._approximate_installation_count = approximate_installation_count
@property
def approximate_application_count(self):
"""
Gets the approximate_application_count of this JreUsage.
The approximate count of the applications running on this Java Runtime.
:return: The approximate_application_count of this JreUsage.
:rtype: int
"""
return self._approximate_application_count
@approximate_application_count.setter
def approximate_application_count(self, approximate_application_count):
"""
Sets the approximate_application_count of this JreUsage.
The approximate count of the applications running on this Java Runtime.
:param approximate_application_count: The approximate_application_count of this JreUsage.
:type: int
"""
self._approximate_application_count = approximate_application_count
@property
def approximate_managed_instance_count(self):
"""
Gets the approximate_managed_instance_count of this JreUsage.
The approximate count of the managed instances that report this Java Runtime.
:return: The approximate_managed_instance_count of this JreUsage.
:rtype: int
"""
return self._approximate_managed_instance_count
@approximate_managed_instance_count.setter
def approximate_managed_instance_count(self, approximate_managed_instance_count):
"""
Sets the approximate_managed_instance_count of this JreUsage.
The approximate count of the managed instances that report this Java Runtime.
:param approximate_managed_instance_count: The approximate_managed_instance_count of this JreUsage.
:type: int
"""
self._approximate_managed_instance_count = approximate_managed_instance_count
@property
def time_start(self):
"""
Gets the time_start of this JreUsage.
Lower bound of the specified time period filter. JMS provides a view of the data that is _per day_. The query uses only the date element of the parameter.
:return: The time_start of this JreUsage.
:rtype: datetime
"""
return self._time_start
@time_start.setter
def time_start(self, time_start):
"""
Sets the time_start of this JreUsage.
Lower bound of the specified time period filter. JMS provides a view of the data that is _per day_. The query uses only the date element of the parameter.
:param time_start: The time_start of this JreUsage.
:type: datetime
"""
self._time_start = time_start
@property
def time_end(self):
"""
Gets the time_end of this JreUsage.
Upper bound of the specified time period filter. JMS provides a view of the data that is _per day_. The query uses only the date element of the parameter.
:return: The time_end of this JreUsage.
:rtype: datetime
"""
return self._time_end
@time_end.setter
def time_end(self, time_end):
"""
Sets the time_end of this JreUsage.
Upper bound of the specified time period filter. JMS provides a view of the data that is _per day_. The query uses only the date element of the parameter.
:param time_end: The time_end of this JreUsage.
:type: datetime
"""
self._time_end = time_end
@property
def time_first_seen(self):
"""
Gets the time_first_seen of this JreUsage.
The date and time the resource was _first_ reported to JMS.
This is potentially _before_ the specified time period provided by the filters.
For example, a resource can be first reported to JMS before the start of a specified time period,
if it is also reported during the time period.
:return: The time_first_seen of this JreUsage.
:rtype: datetime
"""
return self._time_first_seen
@time_first_seen.setter
def time_first_seen(self, time_first_seen):
"""
Sets the time_first_seen of this JreUsage.
The date and time the resource was _first_ reported to JMS.
This is potentially _before_ the specified time period provided by the filters.
For example, a resource can be first reported to JMS before the start of a specified time period,
if it is also reported during the time period.
:param time_first_seen: The time_first_seen of this JreUsage.
:type: datetime
"""
self._time_first_seen = time_first_seen
@property
def time_last_seen(self):
"""
Gets the time_last_seen of this JreUsage.
The date and time the resource was _last_ reported to JMS.
This is potentially _after_ the specified time period provided by the filters.
For example, a resource can be last reported to JMS before the start of a specified time period,
if it is also reported during the time period.
:return: The time_last_seen of this JreUsage.
:rtype: datetime
"""
return self._time_last_seen
@time_last_seen.setter
def time_last_seen(self, time_last_seen):
"""
Sets the time_last_seen of this JreUsage.
The date and time the resource was _last_ reported to JMS.
This is potentially _after_ the specified time period provided by the filters.
For example, a resource can be last reported to JMS before the start of a specified time period,
if it is also reported during the time period.
:param time_last_seen: The time_last_seen of this JreUsage.
:type: datetime
"""
self._time_last_seen = time_last_seen
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.465267 | 245 | 0.662089 | [
"Apache-2.0",
"BSD-3-Clause"
] | LaudateCorpus1/oci-python-sdk | src/oci/jms/models/jre_usage.py | 21,334 | Python |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
from functools import partial
import itertools
import operator
from typing import cast, Optional
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
import jax.ops
from jax import api
from jax import lax
from jax import linear_util
from jax import numpy as jnp
from jax import test_util as jtu
from jax import dtypes
from jax import tree_util
from jax.interpreters import partial_eval, xla
from jax.test_util import check_grads
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
def supported_dtypes(dtypes):
return [t for t in dtypes if t in jtu.supported_dtypes()]
float_dtypes = supported_dtypes([jnp.bfloat16, onp.float16, onp.float32,
onp.float64])
complex_dtypes = [onp.complex64, onp.complex128]
int_dtypes = [onp.int32, onp.int64]
uint_dtypes = [onp.uint32, onp.uint64]
unsigned_dtypes = [onp.uint32, onp.uint64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True, tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, onp.float32: 1e-3,
onp.float64: 1e-12, onp.complex64: 2e-4,
onp.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, -1.5, 1.5), ["rev"], inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.float64: 1e-7, onp.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 2e-2, onp.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={onp.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={onp.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, uint_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={onp.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float16: 1e-2}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={onp.float16: 1e-2, onp.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={onp.complex128: 1e-14}),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("sign", 1, number_dtypes + uint_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, []),
op_record('copysign', 2, default_dtypes, all_shapes, jtu.rand_some_inf_and_nan, [],
check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("diff", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default, ["rev"]),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, inexact_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 2e-4, onp.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
# TODO(mattjj): lshift, rshift
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
numpy_version = tuple(map(int, onp.version.version.split('.')))
if numpy_version >= (1, 15):
JAX_COMPOUND_OP_RECORDS += [
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS += [
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
CombosWithReplacement = itertools.combinations_with_replacement
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, onp.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = onp.zeros([])
for shape in shapes:
try:
accumulator = accumulator + onp.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.
jnp and onp have different type promotion semantics; this decorator allows
tests make an onp reference implementation act more like an jnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: onp.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, onp_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if onp_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (onp.ndarray, onp.generic)) else a
for a in out]
return f
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
def testOp(self, onp_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
if onp_op is onp.float_power:
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(onp_op)
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(onp_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory()
# onp and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, #not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, # not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory()
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
CombosWithReplacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
def testBitwiseOp(self, onp_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory()
if not FLAGS.jax_enable_x64 and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, onp_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory()
@jtu.ignore_warning(category=onp.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
def onp_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(onp.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else onp.float32
return onp_op(x_cast, axis, dtype=t, keepdims=keepdims)
onp_fun = _promote_like_jnp(onp_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-3, onp.complex64: 1e-3,
onp.float64: 1e-5, onp.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for rec in JAX_REDUCER_NO_DTYPE_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True]))
def testReducerNoDtype(self, onp_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory()
onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)
onp_fun = _promote_like_jnp(onp_fun, inexact)
onp_fun = jtu.ignore_warning(category=onp.ComplexWarning)(onp_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.nonzero(x)
onp_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(onp_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, onp_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory()
if dtype == onp.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
def onp_fun(array_to_reduce):
return onp_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def onp_fun(a, b):
a = a.astype(onp.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(onp.float32) if rhs_dtype == jnp.bfloat16 else b
out = onp.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, onp.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-14,
onp.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
def onp_dot(x, y):
x = x.astype(onp.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(onp.float32) if rhs_dtype == jnp.bfloat16 else y
return onp.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(onp_dot, jnp.dot, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
def onp_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 2e-2, onp.float64: 1e-12,
onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 4e-2
self._CheckAgainstNumpy(onp_fun, jnp.matmul, args_maker,
check_dtypes=True, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def onp_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(onp.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(onp.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.tensordot(a, b, axes).astype(dtype)
tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12,
onp.complex64: 1e-3, onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testTensordotErrors(self):
a = onp.random.random((3, 2, 2))
b = onp.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": jtu.rand_default}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def onp_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(onp.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(onp.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-13}
if jtu.device_under_test() == "tpu":
tol_spec[onp.float32] = tol_spec[onp.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng_factory": jtu.rand_default}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-1, 1),
(-onp.ones(1), None),
(None, onp.ones(1)),
(-onp.ones(1), onp.ones(1))]))
def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
# TODO(phawkins): the promotion behavior changed in Numpy 1.17.
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals,
"rng_factory": jtu.rand_default}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals, rng_factory):
rng = rng_factory()
if jnp.issubdtype(dtype, onp.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
onp_fun = lambda x: onp.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, onp.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(onp.float32(7.532), 1),
round(jnp.float32(7.5), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(jnp.float32(1.234), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(onp.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_rpadwidth={}_rconstantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,
constant_values_rank),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width_rank": pad_width_rank,
"constant_values_rank": constant_values_rank,
"rng_factory": jtu.rand_default,
"irng_factory": partial(jtu.rand_int, 3)}
for mode, constant_values_rank, shapes in [
('constant', 0, all_shapes),
('constant', 1, all_shapes),
('constant', 2, all_shapes),
('symmetric', None, nonempty_shapes),
('reflect', None, nonempty_shapes),
('wrap', None, nonempty_shapes),
('edge', None, nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for pad_width_rank in range(3)))
def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,
rng_factory, irng_factory):
rng = rng_factory()
irng = irng_factory()
pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)
def onp_fun(x, kwargs):
if pad_width.size == 0:
return x
return onp.pad(x, pad_width, mode=mode, **kwargs)
def jnp_fun(x, kwargs):
return jnp.pad(x, pad_width, mode=mode, **kwargs)
def args_maker():
kwargs = {}
if constant_values_rank:
kwargs["constant_values"] = rng(
[len(shape), 2][2 - constant_values_rank:], dtype)
return rng(shape, dtype), kwargs
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps,
"rng_factory": jtu.rand_default}
for reps in [(), (2,), (3, 4), (2, 3, 4)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for num_arrs in [3]
for arg_dtypes in CombosWithReplacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(onp.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return onp.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for arg_dtypes in CombosWithReplacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(arr, values):
arr = arr.astype(onp.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(onp.float32) if values.dtype == jnp.bfloat16
else values)
out = onp.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
"rng_factory": jtu.rand_default}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), len(shape)))))
def testRepeat(self, axis, shape, dtype, repeats, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)
onp_fun = _promote_like_jnp(onp_fun)
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testIssue1233(self):
'''
Following numpy test suite from `test_repeat` at https://github.com/numpy/numpy/blob/master/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = onp.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, check_dtypes=True, rtol=tol, atol=tol)
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
m = jnp.array([1,2,3,4,5,6])
args_maker = lambda: [m]
for repeats in [2, [1,3,2,1,1,2], [1,3,0,1,1,2], [2], jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, None)
m_rect = m.reshape((2,3))
args_maker = lambda: [m_rect]
for repeats in [2, [2,1], [2], jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, [1,3,2], [2], jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
'''
Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save
'''
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
onp_input = onp.ones((1))
jnp_input = jnp.ones((1))
expected_onp_input_after_call = onp.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertIs(type(jnp.concatenate([onp_input])), jnp.DeviceArray)
attempt_sideeffect(onp_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(onp_input, expected_onp_input_after_call, check_dtypes=True)
self.assertAllClose(jnp_input, expected_jnp_input_after_call, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"rng_factory": jtu.rand_default,
"jnp_op": getattr(jnp, op),
"onp_op": getattr(onp, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in default_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, rng_factory, jnp_op, onp_op):
rng = rng_factory()
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
onp_fun = partial(onp_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode)
tol = 1e-2 if jtu.device_under_test() != "tpu" else 0.5
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default, "jnp_op": getattr(jnp, op),
"onp_op": getattr(onp, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, jnp_op, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)
onp_fun = jtu.ignore_warning(category=onp.ComplexWarning)(onp_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol = max(jtu.tolerance(dtype), jtu.tolerance(out_dtype))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
onp.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: getattr(onp, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
onp.testing.assert_equal(onp.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(onp.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
onp_fun = lambda: onp.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, onp.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, -1075, 1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jtu.skip_on_devices("tpu") # TODO(b/153053081)
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, onp.float16, onp.float32]
and not FLAGS.jax_enable_x64):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory()
x2_rng = x2_rng_factory()
onp_fun = lambda x1, x2: onp.ldexp(x1, x2)
onp_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(onp_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, onp.int32)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
@jtu.skip_on_devices("tpu")
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, onp.float16, onp.float32]
and not FLAGS.jax_enable_x64):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory()
onp_fun = lambda x: onp.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
def onp_fun(arg):
if out_dtype == jnp.bfloat16:
return onp.trace(arg, offset, axis1, axis2, onp.float32).astype(jnp.bfloat16)
else:
return onp.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
"shape": shape, "axis": axis, "dtypes": dtypes, "rng_factory": rng_factory}
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for rng_factory in [jtu.rand_default]))
def testStack(self, shape, axis, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_jnp(partial(onp.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
"shape": shape, "op": op, "dtypes": dtypes, "rng_factory": rng_factory}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for rng_factory in [jtu.rand_default]))
def testHVDStack(self, shape, op, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_jnp(getattr(onp, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
onp.dtype(out_dtype).name if out_dtype else "None"),
"shape": shape, "fill_value_dtype": fill_value_dtype,
"out_dtype": out_dtype, "rng_factory": jtu.rand_default}
for shape in array_shapes + [3, onp.array(7, dtype=onp.int32)]
for fill_value_dtype in default_dtypes
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"onp_op": getattr(onp, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), onp.array((4, 5, 6), dtype=onp.int32),
onp.array(4, dtype=onp.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, onp_op, jnp_op, shape, dtype):
rng = jtu.rand_default()
def args_maker(): return []
onp_op = partial(onp_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_filldtype={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
onp.dtype(fill_value_dtype).name,
onp.dtype(out_dtype).name),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default}
for shape in array_shapes
for in_dtype in default_dtypes
for fill_value_dtype in default_dtypes
for out_dtype in default_dtypes))
def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)
jnp_fun = lambda x, fill_value: jnp.full_like(x, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
onp_fun = lambda x: fn(onp, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim,
"rng_factory": jtu.rand_default}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in range(-len(arg_shape)+1, len(arg_shape))))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2,
"rng_factory": jtu.rand_default}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((1, 3, 1), (0, 2)),
((1, 4, 1), (0,))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3,), 0),
((1, 3), 1),
((1, 3, 1), (0, 1))]
for dtype in default_dtypes))
def testSqueezeFailsOnNonsingletonAxis(self, arg_shape, dtype, ax,
rng_factory):
rng = rng_factory()
x = jnp.zeros(arg_shape, dtype=dtype)
fun = lambda: jnp.squeeze(x, ax)
self.assertRaisesRegex(ValueError, "cannot select an axis to squeeze", fun)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"rng_factory": jtu.rand_default, "shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned, rng_factory):
rng = rng_factory()
if weights_shape is None:
onp_fun = lambda x: onp.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
onp_fun = _promote_like_jnp(onp_fun, inexact=True)
tol = {onp.float16: 1e-2, onp.float32: 1e-6, onp.float64: 1e-12,}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_arg{}_ndmin={}".format(i, ndmin),
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtype) in enumerate([
([True, False, True], jnp.bool_),
(3., jnp.float_),
([1, 2, 3], jnp.int_),
([1., 2., 3.], jnp.float_),
([[1, 2], [3, 4], [5, 6]], jnp.int_),
([[1, 2.], [3, 4], [5, 6]], jnp.float_),
([[1., 2j], [3., 4.], [5., 6.]], jnp.complex_),
([[3, onp.array(2, dtype=jnp.float_), 1],
onp.arange(3., dtype=jnp.float_)], jnp.float_),
])
for ndmin in [None, onp.ndim(arg), onp.ndim(arg) + 1, onp.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
dtype = dtypes.canonicalize_dtype(dtype)
if ndmin is not None:
onp_fun = partial(onp.array, ndmin=ndmin, dtype=dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin)
else:
onp_fun = partial(onp.array, dtype=dtype)
jnp_fun = jnp.array
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testIssue121(self):
assert not onp.isscalar(jnp.array(3))
def testArrayMethod(self):
class arraylike(object):
dtype = onp.float32
def __array__(self, dtype=None):
return 3.
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
@jtu.skip_on_devices("tpu") # TODO(b/32368900): TPUs don't support uint8 yet.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
onp.array([0x2a], dtype=onp.uint8),
check_dtypes=True)
def testIsClose(self):
c_isclose = api.jit(jnp.isclose)
c_isclose_nan = api.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = onp.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = onp.asarray(n * [onp.inf]).reshape([n, 1])
nan = onp.asarray(n * [onp.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = onp.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = onp.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
def testAllClose(self):
rng = onp.random.RandomState(0)
x = rng.randn(2, 2)
y = rng.randn(2)
def same(list1, list2):
allclose = functools.partial(jnp.allclose, atol=1e-3, rtol=1e-3)
elements_close = list(map(allclose, list1, list2))
return jnp.all(jnp.array(elements_close))
csame = api.jit(same)
a1 = same((x, y), (x, y))
a2 = csame((x, y), (x, y))
a3 = csame((x, y), (x, 2 * y))
self.assertTrue(a1)
self.assertTrue(a2)
self.assertFalse(a3)
@jtu.skip_on_devices("tpu") # TODO(mattjj): investigate this failure
def testOnesBroadcastingConstantHandler(self):
# TODO(mattjj): update this test for jax3
self.skipTest("test needs jax3 update")
def fun(x):
ones = jnp.ones((3, 4))
assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)
# To check that the constant handler generates a Broadcast for stride-zero
# arrays, we monkey-patch the client instance.
# TODO(mattjj): once we have better HLO dumping and inspecting facilities,
# we can check the HLO more directly.
c = x._node.c
Broadcast = c.Broadcast # pylint: disable=invalid-name
was_called = []
c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)
out = x + ones # the ndarray constant handler should call Broadcast here
assert was_called, "Broadcast was not called."
return out
fun = api.jit(fun)
out_val = fun(jnp.ones(4))
self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)
def testZeroStridesConstantHandler(self):
raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = onp.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = onp.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(TypeError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(TypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(onp.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] # Test negative axes
for rng_factory in [jtu.rand_default]))
def testFlip(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
onp_op = lambda x: onp.flip(x, axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFlipud(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
onp_op = lambda x: onp.flipud(x)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFliplr(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
onp_op = lambda x: onp.fliplr(x)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testRot90(self, shape, dtype, k, axes, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)
def testAstype(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
op = lambda x: x.astype(jnp.int32)
self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
# TODO(mattjj): test other ndarray-like method overrides
def testOnpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=jnp.float_), 0.)
ans = onp.mean(x)
self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
expected = onp.arange(0.0, 1.0, 0.1, dtype=jnp.float_)
ans = jnp.arange(0.0, 1.0, 0.1)
self.assertAllClose(expected, ans, check_dtypes=True)
def testSortManually(self):
# manual tests for sort are nice because we don't have to worry about ties.
# lax.sort is tested combinatorially.
ans = jnp.sort(onp.array([16, 15, 23, 42, 8, 4]))
expected = onp.array([4, 8, 15, 16, 23, 42])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a, axis=None)
expected = onp.array([1, 1, 3, 4])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a) # last axis
expected = onp.array([[1, 4], [1, 3]])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a, axis=0)
expected = onp.array([[1, 1], [3, 4]])
self.assertAllClose(expected, ans, check_dtypes=True)
def testArgsortManually(self):
x = onp.array([16, 15, 23, 42, 8, 4])
ans = jnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=0)
expected = onp.argsort(x, axis=0)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=1)
expected = onp.argsort(x, axis=1)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=None)
expected = onp.argsort(x, axis=None)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
def testMsortManually(self):
args_maker = lambda: [onp.random.randint(50, size=(5 ,5))]
jnp_op = lambda x: jnp.msort(x)
onp_op = lambda x: onp.msort(x)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "shifts": shifts,
"axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]
for rng_factory in [jtu.rand_default]))
def testRoll(self, shape, dtype, shifts, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
onp_op = partial(onp.roll, axis=axis)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]
for rng_factory in [jtu.rand_default]))
def testRollaxis(self, shape, dtype, start, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
onp_op = partial(onp.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [onp.uint8, onp.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for rng_factory in [jtu.rand_some_zero]))
def testPackbits(self, shape, dtype, axis, bitorder, rng_factory):
if numpy_version < (1, 17, 0):
raise SkipTest("bitorder arg added in numpy 1.17.0")
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
onp_op = partial(onp.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder, "count": count}
for dtype in [onp.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]
for rng_factory in [jtu.rand_int]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count, rng_factory):
if numpy_version < (1, 17, 0):
raise SkipTest("bitorder arg added in numpy 1.17.0")
rng = rng_factory(0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
onp_op = partial(onp.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"rng_factory": rng_factory, "rng_indices_factory": rng_indices_factory,
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in ['wrap', 'clip']
for rng_factory in [jtu.rand_default]
for rng_indices_factory in [partial(jtu.rand_int, -5, 5)]))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode,
rng_factory, rng_indices_factory):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = rng_factory()
rng_indices = rng_indices_factory()
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ishape={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype), i_shape, axis),
"rng_factory": rng_factory, "x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng_factory):
rng = rng_factory()
i_shape = onp.array(i_shape)
if axis is None:
i_shape = [onp.prod(i_shape, dtype=onp.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis]
i = rng(i_shape, onp.int32) % (2 * n - 1) - (n - 1)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(onp, "take_along_axis"):
onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing,
"rng_factory": jtu.rand_default}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing, rng_factory):
rng = rng_factory()
def onp_fun(arg):
arg = arg.astype(onp.float32) if dtype == jnp.bfloat16 else arg
return onp.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol={onp.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape],
[dtype]),
"rng_factory": jtu.rand_some_inf_and_nan, "shape": shape,
"dtype": dtype}
for shape in all_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, rng_factory, shape, dtype):
rng = rng_factory()
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
def onp_fun(x):
if dtype == jnp.bfloat16:
x = onp.where(onp.isnan(x), dtype(0), x)
x = onp.where(onp.isposinf(x), jnp.finfo(dtype).max, x)
x = onp.where(onp.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return onp.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (onp.int32,)),
(((3,), (4,)), (onp.int32, onp.int32)),
(((3,), (1,), (4,)), (onp.int32, onp.int32, onp.int32)),
)))
def testIx_(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp.ix_, jnp.ix_, args_maker,
check_dtypes=True)
self._CompileAndCheck(jnp.ix_, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_default(), "q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", jtu.rand_uniform(low=0., high=100.)),
("quantile", jtu.rand_uniform(low=0., high=1.)),
)
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [onp.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest', 'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
if op == "quantile" and numpy_version < (1, 15):
raise SkipTest("Numpy < 1.15 does not have np.quantile")
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def onp_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return getattr(onp, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_a_shape={}_axis={}_keepdims={}".format(
jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"a_rng": jtu.rand_default(),
"a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]))
def testMedian(self, a_rng, a_shape, a_dtype, axis, keepdims):
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def onp_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return onp.median(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(jnp.median, axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.where(x)
onp_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(onp_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 3))
for dtypes in CombosWithReplacement(all_dtypes, 3)))
def testWhereThreeArgument(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng_factory(), shapes, dtypes)
def onp_fun(cond, x, y):
return _promote_like_jnp(partial(onp.where, cond))(x, y)
self._CheckAgainstNumpy(onp_fun, jnp.where, args_maker,
check_dtypes=True)
self._CompileAndCheck(jnp.where, args_maker, check_dtypes=True)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, onp.dtype(onp.float32))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes,
(onp.bool_,) * n + dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for n in range(0, 3)
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 2 * n + 1))
for dtypes in CombosWithReplacement(all_dtypes, n + 1)))
def testSelect(self, rng_factory, shapes, dtypes):
rng = rng_factory()
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, onp.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def onp_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(onp.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return onp.select(condlist,
[onp.asarray(x, dtype=dtype) for x in choicelist],
onp.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(onp_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker, check_dtypes=True,
rtol={onp.float64: 1e-7, onp.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = onp.eye(3, dtype=onp.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because
# introducing the convention 0 * inf = 0 leads to silently wrong results in
# some cases. See this comment for details:
# https://github.com/google/jax/issues/1052#issuecomment-514083352
# def testIssue347(self):
# # https://github.com/google/jax/issues/347
# def test_fail(x):
# x = jnp.sqrt(jnp.sum(x ** 2, axis=1))
# ones = jnp.ones_like(x)
# x = jnp.where(x > 0.5, x, ones)
# return jnp.sum(x)
# x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)
# result = api.grad(test_fail)(x)
# assert not onp.any(onp.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = onp.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = onp.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
onp_fun = lambda arg: getattr(onp, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[onp.array(3)],
[onp.array([3])],
[[onp.array(3)]],
[[onp.array([3])]],
[3, 4, 5],
[
[onp.eye(2, dtype=onp.int32) * 2, onp.zeros((2, 3), dtype=onp.int32)],
[onp.ones((3, 2), dtype=onp.int32), onp.eye(3, dtype=onp.int32) * 3],
],
[onp.array([1, 2, 3]), onp.array([2, 3, 4]), 10],
[onp.ones((2, 2), dtype=onp.int32), onp.zeros((2, 2), dtype=onp.int32)],
[[onp.array([1, 2, 3])], [onp.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(onp.block, jnp.block, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp.block, args_maker, check_dtypes=True)
def testLongLong(self):
self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),
check_dtypes=True)
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/master/dask/array/tests/test_creation.py#L92
self.assertAllClose(jnp.arange(77),
onp.arange(77, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(2, 13),
onp.arange(2, 13, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(4, 21, 9),
onp.arange(4, 21, 9, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(53, 5, -3),
onp.arange(53, 5, -3, dtype=jnp.int_),
check_dtypes=True)
self.assertAllClose(jnp.arange(77, dtype=float),
onp.arange(77, dtype=float), check_dtypes=True)
self.assertAllClose(jnp.arange(2, 13, dtype=int),
onp.arange(2, 13, dtype=int), check_dtypes=True)
self.assertAllClose(jnp.arange(0, 1, -0.5),
onp.arange(0, 1, -0.5, dtype=jnp.float_),
check_dtypes=True)
self.assertRaises(TypeError, lambda: jnp.arange())
# test that jnp.arange(N) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77)), type(onp.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(onp.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(onp.arange(77, dtype=onp.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(onp.int32, 77)))
# test laziness for int dtypes
self.assertTrue(xla.is_device_constant(jnp.arange(77)))
self.assertTrue(xla.is_device_constant(jnp.arange(77, dtype=jnp.int32)))
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), onp.eye(5000))
self.assertEqual(0, onp.sum(jnp.eye(1050) - onp.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = api.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], onp.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),
check_dtypes=True)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=onp.float32)
# f = api.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), onp.array([0., 0., 0., 0.25], dtype=onp.float32),
# check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
onp_op = getattr(onp, op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(onp_op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(onp_op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(onp_op)
jnp_op = getattr(jnp, op)
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (onp.nan, -onp.inf, -100., -2., -1., 0., 1., 2., 100., onp.inf,
jnp.finfo(dtype).max, onp.sqrt(jnp.finfo(dtype).max),
onp.sqrt(jnp.finfo(dtype).max) * 2.):
if onp.isnan(x) and op in ("sinh", "cosh", "expm1", "exp"):
# TODO(b/133842876, b/133842870): these return wrong outputs on CPU for
# NaN inputs.
continue
if (op in ("sin", "cos", "tan", "arctan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789, b/134175194): fix and reenable.
x = dtype(x)
expected = onp_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {onp.float32: 1e-3, onp.float64: 1e-7})
self.assertAllClose(expected, actual, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
first_call = f(x, v)
second_call = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims, "rng_factory": rng_factory}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]
for rng_factory in [jtu.rand_default]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
def onp_fun(x):
out = onp.var(x.astype(jnp.promote_types(onp.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {onp.float16: 1e-1, onp.float32: 1e-3,
onp.float64: 1e-3, onp.complex128: 1e-6})
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}_ddof={}_bias={}".format(
shape, dtype, rowvar, ddof, bias),
"shape": shape, "dtype": dtype, "rowvar": rowvar, "ddof": ddof,
"bias": bias, "rng_factory": rng_factory}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for rowvar in [True, False]
for bias in [True, False]
for ddof in [None, 2, 3]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu") # TODO(b/138003641): test fails on GPU.
def testCov(self, shape, dtype, rowvar, ddof, bias, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
onp_fun = partial(onp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
jnp_fun = partial(jnp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
tol = {onp.float32: 1e-5, onp.float64: 1e-13, onp.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar,
"rng_factory": rng_factory}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]
for rng_factory in [jtu.rand_default]))
def testCorrCoef(self, shape, dtype, rowvar, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
mat = onp.asarray([rng(shape, dtype)])
onp_fun = partial(onp.corrcoef, rowvar=rowvar)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
if not onp.any(onp.isclose(onp.std(mat), 0.0)):
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=1e-2 if jtu.device_under_test() == "tpu" else None)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse, "rng_factory": rng_factory}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]
for rng_factory in [jtu.rand_default]))
def testMeshGrid(self, shapes, dtype, indexing, sparse, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
onp_fun = partial(onp.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep, dtype),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLinspace(self, start_shape, stop_shape, num, endpoint,
retstep, dtype, rng_factory):
if num == 1 and not endpoint and numpy_version < (1, 17, 5):
raise SkipTest("Numpy < 1.17.5 has a linspace bug.")
rng = rng_factory()
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else onp.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs.
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, onp.e]
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype, rng_factory):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not FLAGS.jax_enable_x64):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 2e-2, onp.float32: 1e-2, onp.float64: 1e-6,
onp.complex64: 1e-3, onp.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {onp.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}").format(
start_shape, stop_shape, num, endpoint, dtype),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, rng_factory):
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 4e-3, onp.float32: 2e-3, onp.complex128: 1e-14}
def args_maker():
"""Test the set of inputs onp.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# onp.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def onp_op(start, stop):
start = start.astype(onp.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(onp.float32) if dtype == jnp.bfloat16 else stop
return onp.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else onp.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@api.jit
def foo(x):
return jnp.stack(x)
foo(onp.zeros(2)) # doesn't crash
@api.jit
def foo(x):
return jnp.concatenate(x)
foo(onp.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
wrapped = linear_util.wrap_init(f)
pv = partial_eval.PartialVal.unknown(jax.ShapedArray((3, 4), onp.float32))
_, _, consts = partial_eval.trace_to_jaxpr(wrapped, [pv])
self.assertFalse(
any(onp.array_equal(x, onp.full((3, 4), 2., dtype=onp.float32))
for x in consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"rng_factory": rng_factory, "from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
]
for rng_factory in [jtu.rand_default])
def testBroadcastTo(self, from_shape, to_shape, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [from_shape], [onp.float32])
onp_op = lambda x: onp.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(onp.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), onp.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(onp.broadcast_to(10.0, ()), onp.ndarray)
def testPrecision(self):
ones_1d = onp.ones((2,))
ones_2d = onp.ones((2, 2))
ones_3d = onp.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_shape={}_axis={}_dtype={}").format(shape, axis, dtype),
"shape": shape,
"axis": axis,
"dtype": dtype, "rng_factory": rng_factory}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes
for rng_factory in [jtu.rand_default]))
def testGradient(self, shape, axis, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, axis=axis)
onp_fun = lambda y: onp.gradient(y, axis=axis)
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: api.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = onp.random.randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace(), check_dtypes=True)
self.assertAllClose(x.trace(), api.jit(lambda y: y.trace())(x),
check_dtypes=True)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
def num_float_bits(dtype):
return jnp.finfo(dtypes.canonicalize_dtype(dtype)).bits
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in CombosWithReplacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory()
tol = {onp.float32: 1e-1, onp.complex64: 1e-1}
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={onp.float32: 3e-3})
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
if __name__ == "__main__":
absltest.main()
| 45.622476 | 131 | 0.64307 | [
"ECL-2.0",
"Apache-2.0"
] | vballoli/jax | tests/lax_numpy_test.py | 140,061 | Python |
# -*- coding: utf-8 -*-
from .. import OratorTestCase
from lorator.support.collection import Collection
class CollectionTestCase(OratorTestCase):
def test_first_returns_first_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.first())
def test_last_returns_last_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.last())
def test_pop_removes_and_returns_last_item_or_specified_index(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.pop())
self.assertEqual("foo", c.last())
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.pop(0))
self.assertEqual("bar", c.first())
def test_shift_removes_and_returns_first_item(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.shift())
self.assertEqual("bar", c.first())
def test_empty_collection_is_empty(self):
c = Collection()
c2 = Collection([])
self.assertTrue(c.is_empty())
self.assertTrue(c2.is_empty())
def test_collection_is_constructed(self):
c = Collection("foo")
self.assertEqual(["foo"], c.all())
c = Collection(2)
self.assertEqual([2], c.all())
c = Collection(False)
self.assertEqual([False], c.all())
c = Collection(None)
self.assertEqual([], c.all())
c = Collection()
self.assertEqual([], c.all())
def test_offset_access(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c[1])
c[1] = "baz"
self.assertEqual("baz", c[1])
del c[0]
self.assertEqual("baz", c[0])
def test_forget(self):
c = Collection(["foo", "bar", "boom"])
c.forget(0)
self.assertEqual("bar", c[0])
c.forget(0, 1)
self.assertTrue(c.is_empty())
def test_get_avg_items_from_collection(self):
c = Collection([{"foo": 10}, {"foo": 20}])
self.assertEqual(15, c.avg("foo"))
c = Collection([1, 2, 3, 4, 5])
self.assertEqual(3, c.avg())
c = Collection()
self.assertIsNone(c.avg())
def test_collapse(self):
obj1 = object()
obj2 = object()
c = Collection([[obj1], [obj2]])
self.assertEqual([obj1, obj2], c.collapse().all())
def test_collapse_with_nested_collection(self):
c = Collection([Collection([1, 2, 3]), Collection([4, 5, 6])])
self.assertEqual([1, 2, 3, 4, 5, 6], c.collapse().all())
def test_contains(self):
c = Collection([1, 3, 5])
self.assertTrue(c.contains(1))
self.assertFalse(c.contains(2))
self.assertTrue(c.contains(lambda x: x < 5))
self.assertFalse(c.contains(lambda x: x > 5))
self.assertIn(3, c)
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
obj1 = type("lamdbaobject", (object,), {})()
obj1.v = 1
obj2 = type("lamdbaobject", (object,), {})()
obj2.v = 3
obj3 = type("lamdbaobject", (object,), {})()
obj3.v = 5
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
def test_countable(self):
c = Collection(["foo", "bar"])
self.assertEqual(2, c.count())
self.assertEqual(2, len(c))
def test_diff(self):
c = Collection(["foo", "bar"])
self.assertEqual(["foo"], c.diff(Collection(["bar", "baz"])).all())
def test_each(self):
original = ["foo", "bar", "baz"]
c = Collection(original)
result = []
c.each(lambda x: result.append(x))
self.assertEqual(result, original)
self.assertEqual(original, c.all())
def test_every(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 3, 5], c.every(2).all())
self.assertEqual([2, 4, 6], c.every(2, 1).all())
def test_filter(self):
c = Collection([{"id": 1, "name": "hello"}, {"id": 2, "name": "world"}])
self.assertEqual(
[{"id": 2, "name": "world"}], c.filter(lambda item: item["id"] == 2).all()
)
c = Collection(["", "hello", "", "world"])
self.assertEqual(["hello", "world"], c.filter().all())
def test_where(self):
c = Collection([{"v": 1}, {"v": 3}, {"v": 2}, {"v": 3}, {"v": 4}])
self.assertEqual([{"v": 3}, {"v": 3}], c.where("v", 3).all())
def test_implode(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection(
[{"name": "john", "email": "foo"}, {"name": "jane", "email": "bar"}]
)
self.assertEqual("foobar", c.implode("email"))
self.assertEqual("foo,bar", c.implode("email", ","))
c = Collection(["foo", "bar"])
self.assertEqual("foobar", c.implode(""))
self.assertEqual("foo,bar", c.implode(","))
def test_lists(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection([obj1, {"name": "jane", "email": "bar"}])
self.assertEqual({"john": "foo", "jane": "bar"}, c.lists("email", "name"))
self.assertEqual(["foo", "bar"], c.pluck("email").all())
def test_map(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([3, 4, 5, 6, 7], c.map(lambda x: x + 2).all())
def test_merge(self):
c = Collection([1, 2, 3])
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
c = Collection(Collection([1, 2, 3]))
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
def test_for_page(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([4, 5, 6], c.for_page(2, 3).all())
self.assertEqual([5, 6], c.for_page(2, 4).all())
def test_prepend(self):
c = Collection([4, 5, 6])
c.prepend(3)
self.assertEqual([3, 4, 5, 6], c.all())
def test_append(self):
c = Collection([3, 4, 5])
c.append(6)
self.assertEqual([3, 4, 5, 6], c.all())
def test_pull(self):
c = Collection([1, 2, 3, 4])
c.pull(2)
self.assertEqual([1, 2, 4], c.all())
def test_put(self):
c = Collection([1, 2, 4])
c.put(2, 3)
self.assertEqual([1, 2, 3], c.all())
def test_reject(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.reject(lambda x: x > 3).all())
def test_reverse(self):
c = Collection([1, 2, 3, 4])
self.assertEqual([4, 3, 2, 1], c.reverse().all())
def test_sort(self):
c = Collection([5, 3, 1, 2, 4])
sorted = c.sort(lambda x: x)
self.assertEqual([1, 2, 3, 4, 5], sorted.all())
def test_take(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.take(3).all())
self.assertEqual([4, 5, 6], c.take(-3).all())
def test_transform(self):
c = Collection([1, 2, 3, 4])
c.transform(lambda x: x + 2)
self.assertEqual([3, 4, 5, 6], c.all())
def test_zip(self):
c = Collection([1, 2, 3])
self.assertEqual([(1, 4), (2, 5), (3, 6)], c.zip([4, 5, 6]).all())
def test_only(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([2, 4], c.only(1, 3).all())
def test_without(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([1, 3, 5], c.without(1, 3).all())
self.assertEqual([1, 2, 3, 4, 5], c.all())
def test_flatten(self):
c = Collection({"foo": [5, 6], "bar": 7, "baz": {"boom": [1, 2, 3, 4]}})
self.assertEqual([1, 2, 3, 4, 5, 6, 7], c.flatten().sort().all())
c = Collection([1, [2, 3], 4])
self.assertEqual([1, 2, 3, 4], c.flatten().all())
| 31.030888 | 86 | 0.520343 | [
"MIT"
] | fenestron/lorator | tests/support/test_collection.py | 8,037 | Python |
#!/usr/bin/env python
#
# Copyright (c) 2018 SAP SE
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# this script checks for volume attachments of already deleted volumes in the cinder db
import argparse
import configparser
import datetime
import logging
import os
import sys
from openstack import connection, exceptions
from sqlalchemy import and_, MetaData, select, Table, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
# get all instances from nova
def get_nova_instances(conn):
nova_instances = dict()
# get all instance from nova
try:
for nova_instance in conn.compute.servers(details=False, all_projects=1):
nova_instances[nova_instance.id] = nova_instance
if not nova_instances:
raise RuntimeError('- PLEASE CHECK MANUALLY - did not get any nova instances back from the nova api - this should in theory never happen ...')
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALLY - got an http exception connecting to openstack: %s", str(e))
sys.exit(1)
except exceptions.SDKException as e:
log.warn("- PLEASE CHECK MANUALLY - got an sdk exception connecting to openstack: %s", str(e))
sys.exit(1)
#for i in nova_instances:
# print nova_instances[i].id
if not nova_instances:
raise RuntimeError('Did not get any nova instances back.')
return nova_instances
# get all volume attachments for volumes
def get_orphan_volume_attachments(meta):
orphan_volume_attachments = {}
orphan_volume_attachment_t = Table('volume_attachment', meta, autoload=True)
columns = [orphan_volume_attachment_t.c.id, orphan_volume_attachment_t.c.instance_uuid]
orphan_volume_attachment_q = select(columns=columns, whereclause=and_(orphan_volume_attachment_t.c.deleted == 0))
# return a dict indexed by orphan_volume_attachment_id and with the value nova_instance_uuid for non deleted orphan_volume_attachments
for (orphan_volume_attachment_id, nova_instance_uuid) in orphan_volume_attachment_q.execute():
orphan_volume_attachments[orphan_volume_attachment_id] = nova_instance_uuid
return orphan_volume_attachments
# get all the volume attachments in the cinder db for already deleted instances in nova
def get_wrong_orphan_volume_attachments(nova_instances, orphan_volume_attachments):
wrong_orphan_volume_attachments = {}
for orphan_volume_attachment_id in orphan_volume_attachments:
if nova_instances.get(orphan_volume_attachments[orphan_volume_attachment_id]) is None:
wrong_orphan_volume_attachments[orphan_volume_attachment_id] = orphan_volume_attachments[orphan_volume_attachment_id]
return wrong_orphan_volume_attachments
# delete volume attachments in the cinder db for already deleted instances in nova
def fix_wrong_orphan_volume_attachments(meta, wrong_orphan_volume_attachments, fix_limit):
if len(wrong_orphan_volume_attachments) <= int(fix_limit):
orphan_volume_attachment_t = Table('volume_attachment', meta, autoload=True)
for orphan_volume_attachment_id in wrong_orphan_volume_attachments:
log.info ("-- action: deleting orphan volume attachment id: %s", orphan_volume_attachment_id)
now = datetime.datetime.utcnow()
delete_orphan_volume_attachment_q = orphan_volume_attachment_t.update().\
where(orphan_volume_attachment_t.c.id == orphan_volume_attachment_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_orphan_volume_attachment_q.execute()
else:
log.warn("- PLEASE CHECK MANUALLY - too many (more than %s) wrong orphan volume attachments - denying to fix them automatically", str(fix_limit))
# get all the volumes in state "error_deleting"
def get_error_deleting_volumes(meta):
error_deleting_volumes = []
volumes_t = Table('volumes', meta, autoload=True)
error_deleting_volumes_q = select(columns=[volumes_t.c.id]).where(and_(volumes_t.c.status == "error_deleting",volumes_t.c.deleted == 0))
# convert the query result into a list
for i in error_deleting_volumes_q.execute():
error_deleting_volumes.append(i[0])
return error_deleting_volumes
# delete all the volumes in state "error_deleting"
def fix_error_deleting_volumes(meta, error_deleting_volumes):
volumes_t = Table('volumes', meta, autoload=True)
volume_attachment_t = Table('volume_attachment', meta, autoload=True)
volume_metadata_t = Table('volume_metadata', meta, autoload=True)
volume_admin_metadata_t = Table('volume_admin_metadata', meta, autoload=True)
for error_deleting_volumes_id in error_deleting_volumes:
now = datetime.datetime.utcnow()
log.info("-- action: deleting possible volume admin metadata for volume id: %s", error_deleting_volumes_id)
delete_volume_admin_metadata_q = volume_admin_metadata_t.update().\
where(volume_admin_metadata_t.c.volume_id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_admin_metadata_q.execute()
log.info("-- action: deleting possible volume metadata for volume id: %s", error_deleting_volumes_id)
delete_volume_metadata_q = volume_metadata_t.update().\
where(volume_metadata_t.c.volume_id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_metadata_q.execute()
log.info("-- action: deleting possible volume attachments for volume id: %s", error_deleting_volumes_id)
delete_volume_attachment_q = volume_attachment_t.update().\
where(volume_attachment_t.c.volume_id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_attachment_q.execute()
log.info("-- action: deleting volume id: %s", error_deleting_volumes_id)
delete_volume_q = volumes_t.update().\
where(volumes_t.c.id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_q.execute()
# get all the snapshots in state "error_deleting"
def get_error_deleting_snapshots(meta):
error_deleting_snapshots = []
snapshots_t = Table('snapshots', meta, autoload=True)
error_deleting_snapshots_q = select(columns=[snapshots_t.c.id]).where(and_(snapshots_t.c.status == "error_deleting",snapshots_t.c.deleted == 0))
# convert the query result into a list
for i in error_deleting_snapshots_q.execute():
error_deleting_snapshots.append(i[0])
return error_deleting_snapshots
# delete all the snapshots in state "error_deleting"
def fix_error_deleting_snapshots(meta, error_deleting_snapshots):
snapshots_t = Table('snapshots', meta, autoload=True)
for error_deleting_snapshots_id in error_deleting_snapshots:
log.info("-- action: deleting snapshot id: %s", error_deleting_snapshots_id)
now = datetime.datetime.utcnow()
delete_snapshot_q = snapshots_t.update().\
where(snapshots_t.c.id == error_deleting_snapshots_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_snapshot_q.execute()
# get all the rows with a volume_admin_metadata still defined where the corresponding volume is already deleted
def get_wrong_volume_admin_metadata(meta):
wrong_admin_metadata = {}
volume_admin_metadata_t = Table('volume_admin_metadata', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
admin_metadata_join = volume_admin_metadata_t.join(volumes_t,volume_admin_metadata_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_admin_metadata_t.c.id, volume_admin_metadata_t.c.deleted]
wrong_volume_admin_metadata_q = select(columns=columns).select_from(admin_metadata_join).\
where(and_(volumes_t.c.deleted == 1, volume_admin_metadata_t.c.deleted == 0))
# return a dict indexed by volume_admin_metadata_id and with the value volume_id for non deleted volume_admin_metadata
for (volume_id, volume_deleted, volume_admin_metadata_id, volume_admin_metadata_deleted) in wrong_volume_admin_metadata_q.execute():
wrong_admin_metadata[volume_admin_metadata_id] = volume_id
return wrong_admin_metadata
# delete volume_admin_metadata still defined where the corresponding volume is already deleted
def fix_wrong_volume_admin_metadata(meta, wrong_admin_metadata):
volume_admin_metadata_t = Table('volume_admin_metadata', meta, autoload=True)
for volume_admin_metadata_id in wrong_admin_metadata:
log.info("-- action: deleting volume_admin_metadata id: %s", volume_admin_metadata_id)
now = datetime.datetime.utcnow()
delete_volume_admin_metadata_q = volume_admin_metadata_t.update().\
where(volume_admin_metadata_t.c.id == volume_admin_metadata_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_admin_metadata_q.execute()
# get all the rows with a volume_glance_metadata still defined where the corresponding volume is already deleted
def get_wrong_volume_glance_metadata(meta):
wrong_glance_metadata = {}
volume_glance_metadata_t = Table('volume_glance_metadata', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
glance_metadata_join = volume_glance_metadata_t.join(volumes_t,volume_glance_metadata_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_glance_metadata_t.c.id, volume_glance_metadata_t.c.deleted]
wrong_volume_glance_metadata_q = select(columns=columns).select_from(glance_metadata_join).\
where(and_(volumes_t.c.deleted == 1, volume_glance_metadata_t.c.deleted == 0))
# return a dict indexed by volume_glance_metadata_id and with the value volume_id for non deleted volume_glance_metadata
for (volume_id, volume_deleted, volume_glance_metadata_id, volume_glance_metadata_deleted) in wrong_volume_glance_metadata_q.execute():
wrong_glance_metadata[volume_glance_metadata_id] = volume_id
return wrong_glance_metadata
# delete volume_glance_metadata still defined where the corresponding volume is already deleted
def fix_wrong_volume_glance_metadata(meta, wrong_glance_metadata):
volume_glance_metadata_t = Table('volume_glance_metadata', meta, autoload=True)
for volume_glance_metadata_id in wrong_glance_metadata:
log.info("-- action: deleting volume_glance_metadata id: %s", volume_glance_metadata_id)
now = datetime.datetime.utcnow()
delete_volume_glance_metadata_q = volume_glance_metadata_t.update().\
where(volume_glance_metadata_t.c.id == volume_glance_metadata_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_glance_metadata_q.execute()
# get all the rows with a volume_metadata still defined where the corresponding volume is already deleted
def get_wrong_volume_metadata(meta):
wrong_metadata = {}
volume_metadata_t = Table('volume_metadata', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
metadata_join = volume_metadata_t.join(volumes_t,volume_metadata_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_metadata_t.c.id, volume_metadata_t.c.deleted]
wrong_volume_metadata_q = select(columns=columns).select_from(metadata_join).\
where(and_(volumes_t.c.deleted == 1, volume_metadata_t.c.deleted == 0))
# return a dict indexed by volume_metadata_id and with the value volume_id for non deleted volume_metadata
for (volume_id, volume_deleted, volume_metadata_id, volume_metadata_deleted) in wrong_volume_metadata_q.execute():
wrong_metadata[volume_metadata_id] = volume_id
return wrong_metadata
# delete volume_metadata still defined where the corresponding volume is already deleted
def fix_wrong_volume_metadata(meta, wrong_metadata):
volume_metadata_t = Table('volume_metadata', meta, autoload=True)
for volume_metadata_id in wrong_metadata:
log.info("-- action: deleting volume_metadata id: %s", volume_metadata_id)
now = datetime.datetime.utcnow()
delete_volume_metadata_q = volume_metadata_t.update().\
where(volume_metadata_t.c.id == volume_metadata_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_metadata_q.execute()
# get all the rows with a volume attachment still defined where the corresponding volume is already deleted
def get_wrong_volume_attachments(meta):
wrong_attachments = {}
volume_attachment_t = Table('volume_attachment', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
attachment_join = volume_attachment_t.join(volumes_t,volume_attachment_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_attachment_t.c.id, volume_attachment_t.c.deleted]
wrong_volume_attachment_q = select(columns=columns).select_from(attachment_join).\
where(and_(volumes_t.c.deleted == 1, volume_attachment_t.c.deleted == 0))
# return a dict indexed by volume_attachment_id and with the value volume_id for non deleted volume_attachments
for (volume_id, volume_deleted, volume_attachment_id, volume_attachment_deleted) in wrong_volume_attachment_q.execute():
wrong_attachments[volume_attachment_id] = volume_id
return wrong_attachments
# delete volume attachment still defined where the corresponding volume is already deleted
def fix_wrong_volume_attachments(meta, wrong_attachments, fix_limit):
if len(wrong_attachments) <= int(fix_limit):
volume_attachment_t = Table('volume_attachment', meta, autoload=True)
for volume_attachment_id in wrong_attachments:
log.info("-- action: deleting volume attachment id: %s", volume_attachment_id)
now = datetime.datetime.utcnow()
delete_volume_attachment_q = volume_attachment_t.update().\
where(volume_attachment_t.c.id == volume_attachment_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_attachment_q.execute()
else:
log.warn("- PLEASE CHECK MANUALLY - too many (more than %s) wrong volume attachments - denying to fix them automatically", str(fix_limit))
# get all the rows, which have the deleted flag set, but not the delete_at column
def get_missing_deleted_at(meta, table_names):
missing_deleted_at = {}
for t in table_names:
a_table_t = Table(t, meta, autoload=True)
a_table_select_deleted_at_q = a_table_t.select().where(
and_(a_table_t.c.deleted == 1, a_table_t.c.deleted_at == None))
for row in a_table_select_deleted_at_q.execute():
missing_deleted_at[row.id] = t
return missing_deleted_at
# set deleted_at to updated_at value if not set for marked as deleted rows
def fix_missing_deleted_at(meta, table_names):
now = datetime.datetime.utcnow()
for t in table_names:
a_table_t = Table(t, meta, autoload=True)
log.info("- action: fixing columns with missing deleted_at times in the %s table", t)
a_table_set_deleted_at_q = a_table_t.update().where(
and_(a_table_t.c.deleted == 1, a_table_t.c.deleted_at == None)).values(
deleted_at=now)
a_table_set_deleted_at_q.execute()
# get all the rows with a service still defined where the corresponding volume is already deleted
def get_deleted_services_still_used_in_volumes(meta):
deleted_services_still_used_in_volumes = {}
services_t = Table('services', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
services_volumes_join = services_t.join(volumes_t,services_t.c.uuid == volumes_t.c.service_uuid)
columns = [services_t.c.uuid, services_t.c.deleted, volumes_t.c.id, volumes_t.c.deleted]
deleted_services_still_used_in_volumes_q = select(columns=columns).select_from(services_volumes_join).\
where(and_(volumes_t.c.deleted == 0, services_t.c.deleted == 1))
# return a dict indexed by service_uuid and with the value volume_id for deleted but still referenced services
for (service_uuid, service_deleted, volume_id, volume_deleted) in deleted_services_still_used_in_volumes_q.execute():
deleted_services_still_used_in_volumes[service_uuid] = volume_id
return deleted_services_still_used_in_volumes
# delete services still defined where the corresponding volume is already deleted
def fix_deleted_services_still_used_in_volumes(meta, deleted_services_still_used_in_volumes):
services_t = Table('services', meta, autoload=True)
for deleted_services_still_used_in_volumes_id in deleted_services_still_used_in_volumes:
log.info("-- action: undeleting service uuid: %s", deleted_services_still_used_in_volumes_id)
undelete_services_q = services_t.update().where(services_t.c.uuid == deleted_services_still_used_in_volumes_id).values(deleted=0,deleted_at=None)
undelete_services_q.execute()
# establish an openstack connection
def makeOsConnection():
try:
conn = connection.Connection(auth_url=os.getenv('OS_AUTH_URL'),
project_name=os.getenv('OS_PROJECT_NAME'),
project_domain_name=os.getenv('OS_PROJECT_DOMAIN_NAME'),
username=os.getenv('OS_USERNAME'),
user_domain_name=os.getenv('OS_USER_DOMAIN_NAME'),
password=os.getenv('OS_PASSWORD'),
identity_api_version="3")
except Exception as e:
log.warn("- PLEASE CHECK MANUALLY - problems connecting to openstack: %s",
str(e))
sys.exit(1)
return conn
# establish a database connection and return the handle
def makeConnection(db_url):
engine = create_engine(db_url)
engine.connect()
Session = sessionmaker(bind=engine)
thisSession = Session()
metadata = MetaData()
metadata.bind = engine
Base = declarative_base()
return thisSession, metadata, Base
# return the database connection string from the config file
def get_db_url(config_file):
parser = configparser.SafeConfigParser()
try:
parser.read(config_file)
db_url = parser.get('database', 'connection', raw=True)
except:
log.info("ERROR: Check Cinder configuration file.")
sys.exit(2)
return db_url
# cmdline handling
def parse_cmdline_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config",
default='./cinder.conf',
help='configuration file')
parser.add_argument("--dry-run",
action="store_true",
help='print only what would be done without actually doing it')
parser.add_argument("--fix-limit",
default=25,
help='maximum number of inconsistencies to fix automatically - if there are more, automatic fixing is denied')
return parser.parse_args()
def main():
try:
args = parse_cmdline_args()
except Exception as e:
log.error("Check command line arguments (%s)", e.strerror)
# connect to openstack
conn = makeOsConnection()
# connect to the DB
db_url = get_db_url(args.config)
cinder_session, cinder_metadata, cinder_Base = makeConnection(db_url)
# fixing volume attachments at no longer existing instances
orphan_volume_attachments = get_orphan_volume_attachments(cinder_metadata)
nova_instances = get_nova_instances(conn)
wrong_orphan_volume_attachments = get_wrong_orphan_volume_attachments(nova_instances, orphan_volume_attachments)
if len(wrong_orphan_volume_attachments) != 0:
log.info("- orphan volume attachments found:")
# print out what we would delete
for orphan_volume_attachment_id in wrong_orphan_volume_attachments:
log.info("-- orphan volume attachment (id in cinder db: %s) for non existent instance in nova: %s", orphan_volume_attachment_id,
orphan_volume_attachments[orphan_volume_attachment_id])
if not args.dry_run:
log.info("- deleting orphan volume attachment inconsistencies found")
fix_wrong_orphan_volume_attachments(cinder_metadata, wrong_orphan_volume_attachments, args.fix_limit)
else:
log.info("- no orphan volume attachments found")
# fixing possible volumes in state "error-deleting"
error_deleting_volumes = get_error_deleting_volumes(cinder_metadata)
if len(error_deleting_volumes) != 0:
log.info("- volumes in state error_deleting found")
# print out what we would delete
for error_deleting_volumes_id in error_deleting_volumes:
log.info("-- volume id: %s", error_deleting_volumes_id)
if not args.dry_run:
log.info("- deleting volumes in state error_deleting")
fix_error_deleting_volumes(cinder_metadata, error_deleting_volumes)
else:
log.info("- no volumes in state error_deleting found")
# fixing possible snapshots in state "error-deleting"
error_deleting_snapshots = get_error_deleting_snapshots(cinder_metadata)
if len(error_deleting_snapshots) != 0:
log.info("- snapshots in state error_deleting found")
# print out what we would delete
for error_deleting_snapshots_id in error_deleting_snapshots:
log.info("-- snapshot id: %s", error_deleting_snapshots_id)
if not args.dry_run:
log.info("- deleting snapshots in state error_deleting")
fix_error_deleting_snapshots(cinder_metadata, error_deleting_snapshots)
else:
log.info("- no snapshots in state error_deleting found")
# fixing possible wrong admin_metadata entries
wrong_admin_metadata = get_wrong_volume_admin_metadata(cinder_metadata)
if len(wrong_admin_metadata) != 0:
log.info("- volume_admin_metadata inconsistencies found")
# print out what we would delete
for volume_admin_metadata_id in wrong_admin_metadata:
log.info("-- volume_admin_metadata id: %s - deleted volume id: %s", volume_admin_metadata_id, wrong_admin_metadata[volume_admin_metadata_id])
if not args.dry_run:
log.info("- removing volume_admin_metadata inconsistencies found")
fix_wrong_volume_admin_metadata(cinder_metadata, wrong_admin_metadata)
else:
log.info("- volume_admin_metadata entries are consistent")
# fixing possible wrong glance_metadata entries
wrong_glance_metadata = get_wrong_volume_glance_metadata(cinder_metadata)
if len(wrong_glance_metadata) != 0:
log.info("- volume_glance_metadata inconsistencies found")
# print out what we would delete
for volume_glance_metadata_id in wrong_glance_metadata:
log.info("-- volume_glance_metadata id: %s - deleted volume id: %s", volume_glance_metadata_id, wrong_glance_metadata[volume_glance_metadata_id])
if not args.dry_run:
log.info("- removing volume_glance_metadata inconsistencies found")
fix_wrong_volume_glance_metadata(cinder_metadata, wrong_glance_metadata)
else:
log.info("- volume_glance_metadata entries are consistent")
# fixing possible wrong metadata entries
wrong_metadata = get_wrong_volume_metadata(cinder_metadata)
if len(wrong_metadata) != 0:
log.info("- volume_metadata inconsistencies found")
# print out what we would delete
for volume_metadata_id in wrong_metadata:
log.info("-- volume_metadata id: %s - deleted volume id: %s", volume_metadata_id, wrong_metadata[volume_metadata_id])
if not args.dry_run:
log.info("- removing volume_metadata inconsistencies found")
fix_wrong_volume_metadata(cinder_metadata, wrong_metadata)
else:
log.info("- volume_metadata entries are consistent")
# fixing possible wrong attachment entries
wrong_attachments = get_wrong_volume_attachments(cinder_metadata)
if len(wrong_attachments) != 0:
log.info("- volume attachment inconsistencies found")
# print out what we would delete
for volume_attachment_id in wrong_attachments:
log.info("-- volume attachment id: %s - deleted volume id: %s", volume_attachment_id, wrong_attachments[volume_attachment_id])
if not args.dry_run:
log.info("- removing volume attachment inconsistencies found")
fix_wrong_volume_attachments(cinder_metadata, wrong_attachments, args.fix_limit)
else:
log.info("- volume attachments are consistent")
# fixing possible missing deleted_at timestamps in some tables
# tables which sometimes have missing deleted_at values
table_names = [ 'snapshots', 'volume_attachment' ]
missing_deleted_at = get_missing_deleted_at(cinder_metadata, table_names)
if len(missing_deleted_at) != 0:
log.info("- missing deleted_at values found:")
# print out what we would delete
for missing_deleted_at_id in missing_deleted_at:
log.info("--- id %s of the %s table is missing deleted_at time", missing_deleted_at_id, missing_deleted_at[missing_deleted_at_id])
if not args.dry_run:
log.info("- setting missing deleted_at values")
fix_missing_deleted_at(cinder_metadata, table_names)
else:
log.info("- no missing deleted_at values")
deleted_services_still_used_in_volumes = get_deleted_services_still_used_in_volumes(cinder_metadata)
if len(deleted_services_still_used_in_volumes) != 0:
log.info("- deleted services still used in volumes found:")
# print out what we would delete
for deleted_services_still_used_in_volumes_id in deleted_services_still_used_in_volumes:
log.info("--- deleted service uuid %s still used in volumes table entry %s", deleted_services_still_used_in_volumes_id, deleted_services_still_used_in_volumes[deleted_services_still_used_in_volumes_id])
if not args.dry_run:
log.info("- undeleting service uuid still used in volumes table")
fix_deleted_services_still_used_in_volumes(cinder_metadata, deleted_services_still_used_in_volumes)
else:
log.info("- deleted services still used in volumes")
if __name__ == "__main__":
main()
| 48.716846 | 214 | 0.739038 | [
"Apache-2.0"
] | DEiselt/openstack-nannies | scripts/cinder-consistency.py | 27,184 | Python |
"""
Evrything Docs
https://dashboard.evrythng.com/documentation/api/actiontypes
"""
from evrythng import assertions, utils
field_specs = {
'datatypes': {
'name': 'str',
'customFields': 'dict',
'tags': 'dict_of_str',
'scopes': 'dict',
},
'required': ('name',),
'readonly': ('id', 'createdAt', 'updatedAt'),
'writable': ('customFields', 'tags', 'scopes'),
}
def create_action_type(name, customFields=None, tags=None, scopes=None,
api_key=None, request_kwargs=None):
"""Create an Action Type"""
kwargs = locals()
del kwargs['request_kwargs']
api_key = kwargs.pop('api_key', None)
assertions.validate_field_specs(kwargs, field_specs)
return utils.request('POST', '/actions', data=kwargs, api_key=api_key,
**(request_kwargs or {}))
def delete_action_type(name, api_key=None, request_kwargs=None):
"""Delete an Action Type"""
assertions.datatype_str('name', name)
url = '/actions/{}'.format(name)
return utils.request('DELETE', url, api_key=api_key, **(request_kwargs or {}))
def update_action_type(name, customFields=None, tags=None, scopes=None,
api_key=None, request_kwargs=None):
"""Update an Action Type"""
kwargs = locals()
del kwargs['request_kwargs']
api_key = kwargs.pop('api_key', None)
assertions.validate_field_specs(kwargs, field_specs)
url = '/actions/{}'.format(name)
return utils.request('POST', '/actions', data=kwargs, api_key=api_key,
**(request_kwargs or {}))
def list_action_types(api_key=None, request_kwargs=None):
"""List Action Types"""
url = '/actions'
return utils.request('GET', url, api_key=api_key, **(request_kwargs or {}))
| 32.490909 | 82 | 0.632904 | [
"MIT"
] | jwpthng/evrythng-python-sdk | src/evrythng/entities/action_types.py | 1,787 | Python |
import json
# try:
# redis_connection = redis.Redis(host='dorresteinappshub.ucsd.edu', port=6378, db=0)
# except:
# redis_connection = None
redis_connection = None
def acquire_motifdb(db_list):
db_list_key = json.dumps(db_list)
if redis_connection is not None:
if redis_connection.exists(db_list_key):
cached_data = json.loads(redis_connection.get(db_list_key))
return cached_data["motifdb_spectra"], cached_data["motifdb_metadata"], set(cached_data["motifdb_features"])
client = requests.session()
token_output = client.get(server_url + 'initialise_api/').json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
output = client.post(server_url + 'get_motifset/', data=data).json()
motifdb_spectra = output['motifs']
motifdb_metadata = output['metadata']
motifdb_features = set()
for m, spec in motifdb_spectra.items():
for f in spec:
motifdb_features.add(f)
# Trying to cache
if redis_connection is not None:
data_cache = {}
data_cache["motifdb_spectra"] = motifdb_spectra
data_cache["motifdb_metadata"] = motifdb_metadata
data_cache["motifdb_features"] = list(motifdb_features)
redis_connection.set(db_list_key, json.dumps(data_cache))
return motifdb_spectra, motifdb_metadata, motifdb_features
"""Grabbing the latest Motifs from MS2LDA"""
import requests
server_url = 'http://ms2lda.org/motifdb/'
server_url = 'http://localhost:8000/motifdb/'
motifset_dict = requests.get(server_url + 'list_motifsets/').json()
# db_list = ['gnps_binned_005'] # Can update this later with multiple motif sets
db_list = []
# db_list.append(2)
# db_list.append(4)
# db_list.append(1)
# db_list.append(3)
# db_list.append(5)
# db_list.append(6)
# db_list.append(16)
db_list = list(set(db_list))
# Acquire motifset from MS2LDA.org
motifdb_spectra, motifdb_metadata, motifdb_features = acquire_motifdb(db_list)
| 32 | 120 | 0.708984 | [
"MIT"
] | glasgowcompbio/ms2ldaviz | lda/offline_analysis/ms2lda_runfull_test.py | 2,048 | Python |
"""
Mapping from iana timezones to windows timezones and vice versa
"""
from datetime import tzinfo
import pytz
# noinspection SpellCheckingInspection
IANA_TO_WIN = {
"Africa/Abidjan": "Greenwich Standard Time",
"Africa/Accra": "Greenwich Standard Time",
"Africa/Addis_Ababa": "E. Africa Standard Time",
"Africa/Algiers": "W. Central Africa Standard Time",
"Africa/Asmara": "E. Africa Standard Time",
"Africa/Asmera": "E. Africa Standard Time",
"Africa/Bamako": "Greenwich Standard Time",
"Africa/Bangui": "W. Central Africa Standard Time",
"Africa/Banjul": "Greenwich Standard Time",
"Africa/Bissau": "Greenwich Standard Time",
"Africa/Blantyre": "South Africa Standard Time",
"Africa/Brazzaville": "W. Central Africa Standard Time",
"Africa/Bujumbura": "South Africa Standard Time",
"Africa/Cairo": "Egypt Standard Time",
"Africa/Casablanca": "Morocco Standard Time",
"Africa/Ceuta": "Romance Standard Time",
"Africa/Conakry": "Greenwich Standard Time",
"Africa/Dakar": "Greenwich Standard Time",
"Africa/Dar_es_Salaam": "E. Africa Standard Time",
"Africa/Djibouti": "E. Africa Standard Time",
"Africa/Douala": "W. Central Africa Standard Time",
"Africa/El_Aaiun": "Morocco Standard Time",
"Africa/Freetown": "Greenwich Standard Time",
"Africa/Gaborone": "South Africa Standard Time",
"Africa/Harare": "South Africa Standard Time",
"Africa/Johannesburg": "South Africa Standard Time",
"Africa/Juba": "E. Africa Standard Time",
"Africa/Kampala": "E. Africa Standard Time",
"Africa/Khartoum": "Sudan Standard Time",
"Africa/Kigali": "South Africa Standard Time",
"Africa/Kinshasa": "W. Central Africa Standard Time",
"Africa/Lagos": "W. Central Africa Standard Time",
"Africa/Libreville": "W. Central Africa Standard Time",
"Africa/Lome": "Greenwich Standard Time",
"Africa/Luanda": "W. Central Africa Standard Time",
"Africa/Lubumbashi": "South Africa Standard Time",
"Africa/Lusaka": "South Africa Standard Time",
"Africa/Malabo": "W. Central Africa Standard Time",
"Africa/Maputo": "South Africa Standard Time",
"Africa/Maseru": "South Africa Standard Time",
"Africa/Mbabane": "South Africa Standard Time",
"Africa/Mogadishu": "E. Africa Standard Time",
"Africa/Monrovia": "Greenwich Standard Time",
"Africa/Nairobi": "E. Africa Standard Time",
"Africa/Ndjamena": "W. Central Africa Standard Time",
"Africa/Niamey": "W. Central Africa Standard Time",
"Africa/Nouakchott": "Greenwich Standard Time",
"Africa/Ouagadougou": "Greenwich Standard Time",
"Africa/Porto-Novo": "W. Central Africa Standard Time",
"Africa/Sao_Tome": "Sao Tome Standard Time",
"Africa/Timbuktu": "Greenwich Standard Time",
"Africa/Tripoli": "Libya Standard Time",
"Africa/Tunis": "W. Central Africa Standard Time",
"Africa/Windhoek": "Namibia Standard Time",
"America/Adak": "Aleutian Standard Time",
"America/Anchorage": "Alaskan Standard Time",
"America/Anguilla": "SA Western Standard Time",
"America/Antigua": "SA Western Standard Time",
"America/Araguaina": "Tocantins Standard Time",
"America/Argentina/Buenos_Aires": "Argentina Standard Time",
"America/Argentina/Catamarca": "Argentina Standard Time",
"America/Argentina/ComodRivadavia": "Argentina Standard Time",
"America/Argentina/Cordoba": "Argentina Standard Time",
"America/Argentina/Jujuy": "Argentina Standard Time",
"America/Argentina/La_Rioja": "Argentina Standard Time",
"America/Argentina/Mendoza": "Argentina Standard Time",
"America/Argentina/Rio_Gallegos": "Argentina Standard Time",
"America/Argentina/Salta": "Argentina Standard Time",
"America/Argentina/San_Juan": "Argentina Standard Time",
"America/Argentina/San_Luis": "Argentina Standard Time",
"America/Argentina/Tucuman": "Argentina Standard Time",
"America/Argentina/Ushuaia": "Argentina Standard Time",
"America/Aruba": "SA Western Standard Time",
"America/Asuncion": "Paraguay Standard Time",
"America/Atikokan": "SA Pacific Standard Time",
"America/Atka": "Aleutian Standard Time",
"America/Bahia": "Bahia Standard Time",
"America/Bahia_Banderas": "Central Standard Time (Mexico)",
"America/Barbados": "SA Western Standard Time",
"America/Belem": "SA Eastern Standard Time",
"America/Belize": "Central America Standard Time",
"America/Blanc-Sablon": "SA Western Standard Time",
"America/Boa_Vista": "SA Western Standard Time",
"America/Bogota": "SA Pacific Standard Time",
"America/Boise": "Mountain Standard Time",
"America/Buenos_Aires": "Argentina Standard Time",
"America/Cambridge_Bay": "Mountain Standard Time",
"America/Campo_Grande": "Central Brazilian Standard Time",
"America/Cancun": "Eastern Standard Time (Mexico)",
"America/Caracas": "Venezuela Standard Time",
"America/Catamarca": "Argentina Standard Time",
"America/Cayenne": "SA Eastern Standard Time",
"America/Cayman": "SA Pacific Standard Time",
"America/Chicago": "Central Standard Time",
"America/Chihuahua": "Mountain Standard Time (Mexico)",
"America/Coral_Harbour": "SA Pacific Standard Time",
"America/Cordoba": "Argentina Standard Time",
"America/Costa_Rica": "Central America Standard Time",
"America/Creston": "US Mountain Standard Time",
"America/Cuiaba": "Central Brazilian Standard Time",
"America/Curacao": "SA Western Standard Time",
"America/Danmarkshavn": "UTC",
"America/Dawson": "Pacific Standard Time",
"America/Dawson_Creek": "US Mountain Standard Time",
"America/Denver": "Mountain Standard Time",
"America/Detroit": "Eastern Standard Time",
"America/Dominica": "SA Western Standard Time",
"America/Edmonton": "Mountain Standard Time",
"America/Eirunepe": "SA Pacific Standard Time",
"America/El_Salvador": "Central America Standard Time",
"America/Ensenada": "Pacific Standard Time (Mexico)",
"America/Fort_Nelson": "US Mountain Standard Time",
"America/Fort_Wayne": "US Eastern Standard Time",
"America/Fortaleza": "SA Eastern Standard Time",
"America/Glace_Bay": "Atlantic Standard Time",
"America/Godthab": "Greenland Standard Time",
"America/Goose_Bay": "Atlantic Standard Time",
"America/Grand_Turk": "Turks And Caicos Standard Time",
"America/Grenada": "SA Western Standard Time",
"America/Guadeloupe": "SA Western Standard Time",
"America/Guatemala": "Central America Standard Time",
"America/Guayaquil": "SA Pacific Standard Time",
"America/Guyana": "SA Western Standard Time",
"America/Halifax": "Atlantic Standard Time",
"America/Havana": "Cuba Standard Time",
"America/Hermosillo": "US Mountain Standard Time",
"America/Indiana/Indianapolis": "US Eastern Standard Time",
"America/Indiana/Knox": "Central Standard Time",
"America/Indiana/Marengo": "US Eastern Standard Time",
"America/Indiana/Petersburg": "Eastern Standard Time",
"America/Indiana/Tell_City": "Central Standard Time",
"America/Indiana/Vevay": "US Eastern Standard Time",
"America/Indiana/Vincennes": "Eastern Standard Time",
"America/Indiana/Winamac": "Eastern Standard Time",
"America/Indianapolis": "US Eastern Standard Time",
"America/Inuvik": "Mountain Standard Time",
"America/Iqaluit": "Eastern Standard Time",
"America/Jamaica": "SA Pacific Standard Time",
"America/Jujuy": "Argentina Standard Time",
"America/Juneau": "Alaskan Standard Time",
"America/Kentucky/Louisville": "Eastern Standard Time",
"America/Kentucky/Monticello": "Eastern Standard Time",
"America/Knox_IN": "Central Standard Time",
"America/Kralendijk": "SA Western Standard Time",
"America/La_Paz": "SA Western Standard Time",
"America/Lima": "SA Pacific Standard Time",
"America/Los_Angeles": "Pacific Standard Time",
"America/Louisville": "Eastern Standard Time",
"America/Lower_Princes": "SA Western Standard Time",
"America/Maceio": "SA Eastern Standard Time",
"America/Managua": "Central America Standard Time",
"America/Manaus": "SA Western Standard Time",
"America/Marigot": "SA Western Standard Time",
"America/Martinique": "SA Western Standard Time",
"America/Matamoros": "Central Standard Time",
"America/Mazatlan": "Mountain Standard Time (Mexico)",
"America/Mendoza": "Argentina Standard Time",
"America/Menominee": "Central Standard Time",
"America/Merida": "Central Standard Time (Mexico)",
"America/Metlakatla": "Alaskan Standard Time",
"America/Mexico_City": "Central Standard Time (Mexico)",
"America/Miquelon": "Saint Pierre Standard Time",
"America/Moncton": "Atlantic Standard Time",
"America/Monterrey": "Central Standard Time (Mexico)",
"America/Montevideo": "Montevideo Standard Time",
"America/Montreal": "Eastern Standard Time",
"America/Montserrat": "SA Western Standard Time",
"America/Nassau": "Eastern Standard Time",
"America/New_York": "Eastern Standard Time",
"America/Nipigon": "Eastern Standard Time",
"America/Nome": "Alaskan Standard Time",
"America/Noronha": "UTC-02",
"America/North_Dakota/Beulah": "Central Standard Time",
"America/North_Dakota/Center": "Central Standard Time",
"America/North_Dakota/New_Salem": "Central Standard Time",
"America/Ojinaga": "Mountain Standard Time",
"America/Panama": "SA Pacific Standard Time",
"America/Pangnirtung": "Eastern Standard Time",
"America/Paramaribo": "SA Eastern Standard Time",
"America/Phoenix": "US Mountain Standard Time",
"America/Port-au-Prince": "Haiti Standard Time",
"America/Port_of_Spain": "SA Western Standard Time",
"America/Porto_Acre": "SA Pacific Standard Time",
"America/Porto_Velho": "SA Western Standard Time",
"America/Puerto_Rico": "SA Western Standard Time",
"America/Punta_Arenas": "Magallanes Standard Time",
"America/Rainy_River": "Central Standard Time",
"America/Rankin_Inlet": "Central Standard Time",
"America/Recife": "SA Eastern Standard Time",
"America/Regina": "Canada Central Standard Time",
"America/Resolute": "Central Standard Time",
"America/Rio_Branco": "SA Pacific Standard Time",
"America/Rosario": "Argentina Standard Time",
"America/Santa_Isabel": "Pacific Standard Time (Mexico)",
"America/Santarem": "SA Eastern Standard Time",
"America/Santiago": "Pacific SA Standard Time",
"America/Santo_Domingo": "SA Western Standard Time",
"America/Sao_Paulo": "E. South America Standard Time",
"America/Scoresbysund": "Azores Standard Time",
"America/Shiprock": "Mountain Standard Time",
"America/Sitka": "Alaskan Standard Time",
"America/St_Barthelemy": "SA Western Standard Time",
"America/St_Johns": "Newfoundland Standard Time",
"America/St_Kitts": "SA Western Standard Time",
"America/St_Lucia": "SA Western Standard Time",
"America/St_Thomas": "SA Western Standard Time",
"America/St_Vincent": "SA Western Standard Time",
"America/Swift_Current": "Canada Central Standard Time",
"America/Tegucigalpa": "Central America Standard Time",
"America/Thule": "Atlantic Standard Time",
"America/Thunder_Bay": "Eastern Standard Time",
"America/Tijuana": "Pacific Standard Time (Mexico)",
"America/Toronto": "Eastern Standard Time",
"America/Tortola": "SA Western Standard Time",
"America/Vancouver": "Pacific Standard Time",
"America/Virgin": "SA Western Standard Time",
"America/Whitehorse": "Pacific Standard Time",
"America/Winnipeg": "Central Standard Time",
"America/Yakutat": "Alaskan Standard Time",
"America/Yellowknife": "Mountain Standard Time",
"Antarctica/Casey": "W. Australia Standard Time",
"Antarctica/Davis": "SE Asia Standard Time",
"Antarctica/DumontDUrville": "West Pacific Standard Time",
"Antarctica/Macquarie": "Central Pacific Standard Time",
"Antarctica/Mawson": "West Asia Standard Time",
"Antarctica/McMurdo": "New Zealand Standard Time",
"Antarctica/Palmer": "Magallanes Standard Time",
"Antarctica/Rothera": "SA Eastern Standard Time",
"Antarctica/South_Pole": "New Zealand Standard Time",
"Antarctica/Syowa": "E. Africa Standard Time",
"Antarctica/Vostok": "Central Asia Standard Time",
"Arctic/Longyearbyen": "W. Europe Standard Time",
"Asia/Aden": "Arab Standard Time",
"Asia/Almaty": "Central Asia Standard Time",
"Asia/Amman": "Jordan Standard Time",
"Asia/Anadyr": "Russia Time Zone 11",
"Asia/Aqtau": "West Asia Standard Time",
"Asia/Aqtobe": "West Asia Standard Time",
"Asia/Ashgabat": "West Asia Standard Time",
"Asia/Ashkhabad": "West Asia Standard Time",
"Asia/Atyrau": "West Asia Standard Time",
"Asia/Baghdad": "Arabic Standard Time",
"Asia/Bahrain": "Arab Standard Time",
"Asia/Baku": "Azerbaijan Standard Time",
"Asia/Bangkok": "SE Asia Standard Time",
"Asia/Barnaul": "Altai Standard Time",
"Asia/Beirut": "Middle East Standard Time",
"Asia/Bishkek": "Central Asia Standard Time",
"Asia/Brunei": "Singapore Standard Time",
"Asia/Calcutta": "India Standard Time",
"Asia/Chita": "Transbaikal Standard Time",
"Asia/Choibalsan": "Ulaanbaatar Standard Time",
"Asia/Chongqing": "China Standard Time",
"Asia/Chungking": "China Standard Time",
"Asia/Colombo": "Sri Lanka Standard Time",
"Asia/Dacca": "Bangladesh Standard Time",
"Asia/Damascus": "Syria Standard Time",
"Asia/Dhaka": "Bangladesh Standard Time",
"Asia/Dili": "Tokyo Standard Time",
"Asia/Dubai": "Arabian Standard Time",
"Asia/Dushanbe": "West Asia Standard Time",
"Asia/Famagusta": "GTB Standard Time",
"Asia/Gaza": "West Bank Standard Time",
"Asia/Harbin": "China Standard Time",
"Asia/Hebron": "West Bank Standard Time",
"Asia/Ho_Chi_Minh": "SE Asia Standard Time",
"Asia/Hong_Kong": "China Standard Time",
"Asia/Hovd": "W. Mongolia Standard Time",
"Asia/Irkutsk": "North Asia East Standard Time",
"Asia/Istanbul": "Turkey Standard Time",
"Asia/Jakarta": "SE Asia Standard Time",
"Asia/Jayapura": "Tokyo Standard Time",
"Asia/Jerusalem": "Israel Standard Time",
"Asia/Kabul": "Afghanistan Standard Time",
"Asia/Kamchatka": "Kamchatka Standard Time",
"Asia/Karachi": "Pakistan Standard Time",
"Asia/Kashgar": "Central Asia Standard Time",
"Asia/Kathmandu": "Nepal Standard Time",
"Asia/Katmandu": "Nepal Standard Time",
"Asia/Khandyga": "Yakutsk Standard Time",
"Asia/Kolkata": "India Standard Time",
"Asia/Krasnoyarsk": "North Asia Standard Time",
"Asia/Kuala_Lumpur": "Singapore Standard Time",
"Asia/Kuching": "Singapore Standard Time",
"Asia/Kuwait": "Arab Standard Time",
"Asia/Macao": "China Standard Time",
"Asia/Macau": "China Standard Time",
"Asia/Magadan": "Magadan Standard Time",
"Asia/Makassar": "Singapore Standard Time",
"Asia/Manila": "Singapore Standard Time",
"Asia/Muscat": "Arabian Standard Time",
"Asia/Nicosia": "GTB Standard Time",
"Asia/Novokuznetsk": "North Asia Standard Time",
"Asia/Novosibirsk": "N. Central Asia Standard Time",
"Asia/Omsk": "Omsk Standard Time",
"Asia/Oral": "West Asia Standard Time",
"Asia/Phnom_Penh": "SE Asia Standard Time",
"Asia/Pontianak": "SE Asia Standard Time",
"Asia/Pyongyang": "North Korea Standard Time",
"Asia/Qatar": "Arab Standard Time",
"Asia/Qostanay": "Central Asia Standard Time",
"Asia/Qyzylorda": "Qyzylorda Standard Time",
"Asia/Rangoon": "Myanmar Standard Time",
"Asia/Riyadh": "Arab Standard Time",
"Asia/Saigon": "SE Asia Standard Time",
"Asia/Sakhalin": "Sakhalin Standard Time",
"Asia/Samarkand": "West Asia Standard Time",
"Asia/Seoul": "Korea Standard Time",
"Asia/Shanghai": "China Standard Time",
"Asia/Singapore": "Singapore Standard Time",
"Asia/Srednekolymsk": "Russia Time Zone 10",
"Asia/Taipei": "Taipei Standard Time",
"Asia/Tashkent": "West Asia Standard Time",
"Asia/Tbilisi": "Georgian Standard Time",
"Asia/Tehran": "Iran Standard Time",
"Asia/Tel_Aviv": "Israel Standard Time",
"Asia/Thimbu": "Bangladesh Standard Time",
"Asia/Thimphu": "Bangladesh Standard Time",
"Asia/Tokyo": "Tokyo Standard Time",
"Asia/Tomsk": "Tomsk Standard Time",
"Asia/Ujung_Pandang": "Singapore Standard Time",
"Asia/Ulaanbaatar": "Ulaanbaatar Standard Time",
"Asia/Ulan_Bator": "Ulaanbaatar Standard Time",
"Asia/Urumqi": "Central Asia Standard Time",
"Asia/Ust-Nera": "Vladivostok Standard Time",
"Asia/Vientiane": "SE Asia Standard Time",
"Asia/Vladivostok": "Vladivostok Standard Time",
"Asia/Yakutsk": "Yakutsk Standard Time",
"Asia/Yangon": "Myanmar Standard Time",
"Asia/Yekaterinburg": "Ekaterinburg Standard Time",
"Asia/Yerevan": "Caucasus Standard Time",
"Atlantic/Azores": "Azores Standard Time",
"Atlantic/Bermuda": "Atlantic Standard Time",
"Atlantic/Canary": "GMT Standard Time",
"Atlantic/Cape_Verde": "Cape Verde Standard Time",
"Atlantic/Faeroe": "GMT Standard Time",
"Atlantic/Faroe": "GMT Standard Time",
"Atlantic/Jan_Mayen": "W. Europe Standard Time",
"Atlantic/Madeira": "GMT Standard Time",
"Atlantic/Reykjavik": "Greenwich Standard Time",
"Atlantic/South_Georgia": "UTC-02",
"Atlantic/St_Helena": "Greenwich Standard Time",
"Atlantic/Stanley": "SA Eastern Standard Time",
"Australia/ACT": "AUS Eastern Standard Time",
"Australia/Adelaide": "Cen. Australia Standard Time",
"Australia/Brisbane": "E. Australia Standard Time",
"Australia/Broken_Hill": "Cen. Australia Standard Time",
"Australia/Canberra": "AUS Eastern Standard Time",
"Australia/Currie": "Tasmania Standard Time",
"Australia/Darwin": "AUS Central Standard Time",
"Australia/Eucla": "Aus Central W. Standard Time",
"Australia/Hobart": "Tasmania Standard Time",
"Australia/LHI": "Lord Howe Standard Time",
"Australia/Lindeman": "E. Australia Standard Time",
"Australia/Lord_Howe": "Lord Howe Standard Time",
"Australia/Melbourne": "AUS Eastern Standard Time",
"Australia/NSW": "AUS Eastern Standard Time",
"Australia/North": "AUS Central Standard Time",
"Australia/Perth": "W. Australia Standard Time",
"Australia/Queensland": "E. Australia Standard Time",
"Australia/South": "Cen. Australia Standard Time",
"Australia/Sydney": "AUS Eastern Standard Time",
"Australia/Tasmania": "Tasmania Standard Time",
"Australia/Victoria": "AUS Eastern Standard Time",
"Australia/West": "W. Australia Standard Time",
"Australia/Yancowinna": "Cen. Australia Standard Time",
"Brazil/Acre": "SA Pacific Standard Time",
"Brazil/DeNoronha": "UTC-02",
"Brazil/East": "E. South America Standard Time",
"Brazil/West": "SA Western Standard Time",
"CET": "Romance Standard Time",
"CST6CDT": "Central Standard Time",
"Canada/Atlantic": "Atlantic Standard Time",
"Canada/Central": "Central Standard Time",
"Canada/East-Saskatchewan": "Canada Central Standard Time",
"Canada/Eastern": "Eastern Standard Time",
"Canada/Mountain": "Mountain Standard Time",
"Canada/Newfoundland": "Newfoundland Standard Time",
"Canada/Pacific": "Pacific Standard Time",
"Canada/Saskatchewan": "Canada Central Standard Time",
"Canada/Yukon": "Pacific Standard Time",
"Chile/Continental": "Pacific SA Standard Time",
"Chile/EasterIsland": "Easter Island Standard Time",
"Cuba": "Cuba Standard Time",
"EET": "GTB Standard Time",
"EST": "SA Pacific Standard Time",
"EST5EDT": "Eastern Standard Time",
"Egypt": "Egypt Standard Time",
"Eire": "GMT Standard Time",
"Etc/GMT": "UTC",
"Etc/GMT+0": "UTC",
"Etc/GMT+1": "Cape Verde Standard Time",
"Etc/GMT+10": "Hawaiian Standard Time",
"Etc/GMT+11": "UTC-11",
"Etc/GMT+12": "Dateline Standard Time",
"Etc/GMT+2": "Mid-Atlantic Standard Time",
"Etc/GMT+3": "SA Eastern Standard Time",
"Etc/GMT+4": "SA Western Standard Time",
"Etc/GMT+5": "SA Pacific Standard Time",
"Etc/GMT+6": "Central America Standard Time",
"Etc/GMT+7": "US Mountain Standard Time",
"Etc/GMT+8": "UTC-08",
"Etc/GMT+9": "UTC-09",
"Etc/GMT-0": "UTC",
"Etc/GMT-1": "W. Central Africa Standard Time",
"Etc/GMT-10": "West Pacific Standard Time",
"Etc/GMT-11": "Central Pacific Standard Time",
"Etc/GMT-12": "UTC+12",
"Etc/GMT-13": "UTC+13",
"Etc/GMT-14": "Line Islands Standard Time",
"Etc/GMT-2": "South Africa Standard Time",
"Etc/GMT-3": "E. Africa Standard Time",
"Etc/GMT-4": "Arabian Standard Time",
"Etc/GMT-5": "West Asia Standard Time",
"Etc/GMT-6": "Central Asia Standard Time",
"Etc/GMT-7": "SE Asia Standard Time",
"Etc/GMT-8": "Singapore Standard Time",
"Etc/GMT-9": "Tokyo Standard Time",
"Etc/GMT0": "UTC",
"Etc/Greenwich": "UTC",
"Etc/UCT": "UTC",
"Etc/UTC": "UTC",
"Etc/Universal": "UTC",
"Etc/Zulu": "UTC",
"Europe/Amsterdam": "W. Europe Standard Time",
"Europe/Andorra": "W. Europe Standard Time",
"Europe/Astrakhan": "Astrakhan Standard Time",
"Europe/Athens": "GTB Standard Time",
"Europe/Belfast": "GMT Standard Time",
"Europe/Belgrade": "Central European Standard Time",
"Europe/Berlin": "W. Europe Standard Time",
"Europe/Bratislava": "Central Europe Standard Time",
"Europe/Brussels": "Romance Standard Time",
"Europe/Bucharest": "GTB Standard Time",
"Europe/Budapest": "Central Europe Standard Time",
"Europe/Busingen": "W. Europe Standard Time",
"Europe/Chisinau": "E. Europe Standard Time",
"Europe/Copenhagen": "Romance Standard Time",
"Europe/Dublin": "GMT Standard Time",
"Europe/Gibraltar": "W. Europe Standard Time",
"Europe/Guernsey": "GMT Standard Time",
"Europe/Helsinki": "FLE Standard Time",
"Europe/Isle_of_Man": "GMT Standard Time",
"Europe/Istanbul": "Turkey Standard Time",
"Europe/Jersey": "GMT Standard Time",
"Europe/Kaliningrad": "Kaliningrad Standard Time",
"Europe/Kiev": "FLE Standard Time",
"Europe/Kirov": "Russian Standard Time",
"Europe/Lisbon": "GMT Standard Time",
"Europe/Ljubljana": "Central European Standard Time",
"Europe/London": "GMT Standard Time",
"Europe/Luxembourg": "W. Europe Standard Time",
"Europe/Madrid": "Romance Standard Time",
"Europe/Malta": "W. Europe Standard Time",
"Europe/Mariehamn": "FLE Standard Time",
"Europe/Minsk": "Belarus Standard Time",
"Europe/Monaco": "W. Europe Standard Time",
"Europe/Moscow": "Russian Standard Time",
"Europe/Nicosia": "GTB Standard Time",
"Europe/Oslo": "W. Europe Standard Time",
"Europe/Paris": "Romance Standard Time",
"Europe/Podgorica": "Central European Standard Time",
"Europe/Prague": "Central Europe Standard Time",
"Europe/Riga": "FLE Standard Time",
"Europe/Rome": "W. Europe Standard Time",
"Europe/Samara": "Russia Time Zone 3",
"Europe/San_Marino": "W. Europe Standard Time",
"Europe/Sarajevo": "Central European Standard Time",
"Europe/Saratov": "Saratov Standard Time",
"Europe/Simferopol": "Russian Standard Time",
"Europe/Skopje": "Central European Standard Time",
"Europe/Sofia": "FLE Standard Time",
"Europe/Stockholm": "W. Europe Standard Time",
"Europe/Tallinn": "FLE Standard Time",
"Europe/Tirane": "Central Europe Standard Time",
"Europe/Tiraspol": "E. Europe Standard Time",
"Europe/Ulyanovsk": "Astrakhan Standard Time",
"Europe/Uzhgorod": "FLE Standard Time",
"Europe/Vaduz": "W. Europe Standard Time",
"Europe/Vatican": "W. Europe Standard Time",
"Europe/Vienna": "W. Europe Standard Time",
"Europe/Vilnius": "FLE Standard Time",
"Europe/Volgograd": "Volgograd Standard Time",
"Europe/Warsaw": "Central European Standard Time",
"Europe/Zagreb": "Central European Standard Time",
"Europe/Zaporozhye": "FLE Standard Time",
"Europe/Zurich": "W. Europe Standard Time",
"GB": "GMT Standard Time",
"GB-Eire": "GMT Standard Time",
"GMT": "UTC",
"GMT+0": "UTC",
"GMT-0": "UTC",
"GMT0": "UTC",
"Greenwich": "UTC",
"HST": "Hawaiian Standard Time",
"Hongkong": "China Standard Time",
"Iceland": "Greenwich Standard Time",
"Indian/Antananarivo": "E. Africa Standard Time",
"Indian/Chagos": "Central Asia Standard Time",
"Indian/Christmas": "SE Asia Standard Time",
"Indian/Cocos": "Myanmar Standard Time",
"Indian/Comoro": "E. Africa Standard Time",
"Indian/Kerguelen": "West Asia Standard Time",
"Indian/Mahe": "Mauritius Standard Time",
"Indian/Maldives": "West Asia Standard Time",
"Indian/Mauritius": "Mauritius Standard Time",
"Indian/Mayotte": "E. Africa Standard Time",
"Indian/Reunion": "Mauritius Standard Time",
"Iran": "Iran Standard Time",
"Israel": "Israel Standard Time",
"Jamaica": "SA Pacific Standard Time",
"Japan": "Tokyo Standard Time",
"Kwajalein": "UTC+12",
"Libya": "Libya Standard Time",
"MET": "W. Europe Standard Time",
"MST": "US Mountain Standard Time",
"MST7MDT": "Mountain Standard Time",
"Mexico/BajaNorte": "Pacific Standard Time (Mexico)",
"Mexico/BajaSur": "Mountain Standard Time (Mexico)",
"Mexico/General": "Central Standard Time (Mexico)",
"NZ": "New Zealand Standard Time",
"NZ-CHAT": "Chatham Islands Standard Time",
"Navajo": "Mountain Standard Time",
"PRC": "China Standard Time",
"PST8PDT": "Pacific Standard Time",
"Pacific/Apia": "Samoa Standard Time",
"Pacific/Auckland": "New Zealand Standard Time",
"Pacific/Bougainville": "Bougainville Standard Time",
"Pacific/Chatham": "Chatham Islands Standard Time",
"Pacific/Chuuk": "West Pacific Standard Time",
"Pacific/Easter": "Easter Island Standard Time",
"Pacific/Efate": "Central Pacific Standard Time",
"Pacific/Enderbury": "UTC+13",
"Pacific/Fakaofo": "UTC+13",
"Pacific/Fiji": "Fiji Standard Time",
"Pacific/Funafuti": "UTC+12",
"Pacific/Galapagos": "Central America Standard Time",
"Pacific/Gambier": "UTC-09",
"Pacific/Guadalcanal": "Central Pacific Standard Time",
"Pacific/Guam": "West Pacific Standard Time",
"Pacific/Honolulu": "Hawaiian Standard Time",
"Pacific/Johnston": "Hawaiian Standard Time",
"Pacific/Kiritimati": "Line Islands Standard Time",
"Pacific/Kosrae": "Central Pacific Standard Time",
"Pacific/Kwajalein": "UTC+12",
"Pacific/Majuro": "UTC+12",
"Pacific/Marquesas": "Marquesas Standard Time",
"Pacific/Midway": "UTC-11",
"Pacific/Nauru": "UTC+12",
"Pacific/Niue": "UTC-11",
"Pacific/Norfolk": "Norfolk Standard Time",
"Pacific/Noumea": "Central Pacific Standard Time",
"Pacific/Pago_Pago": "UTC-11",
"Pacific/Palau": "Tokyo Standard Time",
"Pacific/Pitcairn": "UTC-08",
"Pacific/Pohnpei": "Central Pacific Standard Time",
"Pacific/Ponape": "Central Pacific Standard Time",
"Pacific/Port_Moresby": "West Pacific Standard Time",
"Pacific/Rarotonga": "Hawaiian Standard Time",
"Pacific/Saipan": "West Pacific Standard Time",
"Pacific/Samoa": "UTC-11",
"Pacific/Tahiti": "Hawaiian Standard Time",
"Pacific/Tarawa": "UTC+12",
"Pacific/Tongatapu": "Tonga Standard Time",
"Pacific/Truk": "West Pacific Standard Time",
"Pacific/Wake": "UTC+12",
"Pacific/Wallis": "UTC+12",
"Pacific/Yap": "West Pacific Standard Time",
"Poland": "Central European Standard Time",
"Portugal": "GMT Standard Time",
"ROC": "Taipei Standard Time",
"ROK": "Korea Standard Time",
"Singapore": "Singapore Standard Time",
"Turkey": "Turkey Standard Time",
"UCT": "UTC",
"US/Alaska": "Alaskan Standard Time",
"US/Aleutian": "Aleutian Standard Time",
"US/Arizona": "US Mountain Standard Time",
"US/Central": "Central Standard Time",
"US/East-Indiana": "US Eastern Standard Time",
"US/Eastern": "Eastern Standard Time",
"US/Hawaii": "Hawaiian Standard Time",
"US/Indiana-Starke": "Central Standard Time",
"US/Michigan": "Eastern Standard Time",
"US/Mountain": "Mountain Standard Time",
"US/Pacific": "Pacific Standard Time",
"US/Pacific-New": "Pacific Standard Time",
"US/Samoa": "UTC-11",
"UTC": "UTC",
"Universal": "UTC",
"W-SU": "Russian Standard Time",
"WET": "GMT Standard Time",
"Zulu": "UTC"
}
# when converting to Iana, only consider for win UTC the value Iana UTC
WIN_TO_IANA = {v: k for k, v in IANA_TO_WIN.items() if v != 'UTC' or (v == 'UTC' and k == 'UTC')}
def get_iana_tz(windows_tz):
""" Returns a valid pytz TimeZone (Iana/Olson Timezones) from a given
windows TimeZone
:param windows_tz: windows format timezone usually returned by
microsoft api response
:return:
:rtype:
"""
timezone = WIN_TO_IANA.get(windows_tz)
if timezone is None:
# Nope, that didn't work. Try adding "Standard Time",
# it seems to work a lot of times:
timezone = WIN_TO_IANA.get(windows_tz + ' Standard Time')
# Return what we have.
if timezone is None:
raise pytz.UnknownTimeZoneError(
"Can't find Windows TimeZone " + windows_tz)
return timezone
def get_windows_tz(iana_tz):
""" Returns a valid windows TimeZone from a given pytz TimeZone
(Iana/Olson Timezones)
Note: Windows Timezones are SHIT!... no ... really THEY ARE
HOLY FUCKING SHIT!.
"""
timezone = IANA_TO_WIN.get(
iana_tz.zone if isinstance(iana_tz, tzinfo) else iana_tz)
if timezone is None:
raise pytz.UnknownTimeZoneError(
"Can't find Iana TimeZone " + iana_tz.zone)
return timezone
| 46.488372 | 97 | 0.675971 | [
"Apache-2.0"
] | Feelixe-tin/python-o365 | O365/utils/windows_tz.py | 29,985 | Python |
# pip install freegames
# Click on screen to control ball
# import modules
from random import *
import turtle as t
from freegames import vector
# Set window title, color and icon
t.title("Flappy Ball")
root = t.Screen()._root
root.iconbitmap("logo-ico.ico")
t.bgcolor('#80ffd4')
bird = vector(0, 0)
balls = []
# Functions
# Move bird up in response to screen tap
def tap(x, y):
up = vector(0, 30)
bird.move(up)
# Return True if point on screen
def inside(point):
return -200 < point.x < 200 and -200 < point.y < 200
# Draw screen objects
def draw(alive):
t.clear()
t.goto(bird.x, bird.y)
if alive:
t.dot(13, 'green')
else:
t.dot(13, 'red')
for ball in balls:
t.goto(ball.x, ball.y)
t.dot(20, '#862d2d')
t.update()
def move():
# Update object positions
bird.y -= 5
for ball in balls:
ball.x -= 3
if randrange(10) == 0:
y = randrange(-199, 199)
ball = vector(199, y)
balls.append(ball)
while len(balls) > 0 and not inside(balls[0]):
balls.pop(0)
if not inside(bird):
draw(False)
return
for ball in balls:
if abs(ball - bird) < 15:
draw(False)
return
draw(True)
t.ontimer(move, 50)
t.setup(420, 420, 370, 0)
t.hideturtle()
t.up()
t.tracer(False)
t.onscreenclick(tap)
move()
t.done()
| 16.630952 | 56 | 0.583393 | [
"MIT"
] | jayamithun/py-box | games/Flappy.py | 1,397 | Python |
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
| 28.655303 | 79 | 0.59458 | [
"MIT"
] | jmcrawford45/rbtools | rbtools/testing/testcase.py | 7,565 | Python |
#!/usr/bin/env python
'''
Pull random words from http://world.std.com/~reinhold/diceware.wordlist.asc
Written 2013 Hal Canary.
Dedicated to the public domain.
'''
import random,math,sys,os
useDevRandom = True
dicewareWordlist = '~/Downloads/diceware.wordlist.asc'
with open(os.path.expanduser(dicewareWordlist)) as f:
WordList = [line.split()[1]
for nu,line in enumerate(f) if 2 <= nu < 7778]
def GetRandom():
if useDevRandom:
with open('/dev/random', 'rb') as f:
random.seed(f.read(16))
return random
else:
return random.SystemRandom()
required_entropy = 128
numwords = int(math.ceil(required_entropy / math.log(len(WordList),2)))
s = ' '.join(GetRandom().choice(WordList) for i in xrange(numwords))
sys.stdout.write(s)
sys.stdout.flush()
sys.stderr.write('\n')
| 28.888889 | 75 | 0.723077 | [
"Apache-2.0"
] | Makemeproud/BitcoinGenerator | RandomWords.py | 780 | Python |
from django.conf.urls import url
from . import views
urlpatterns = [
# 商品列表页
url(r'^list/(?P<category_id>\d+)/(?P<page_num>\d+)/$', views.ListView.as_view(), name='list'),
# 热销排行数据
url(r'^hot/(?P<category_id>\d+)/$', views.HotGoodsView.as_view()),
# 商品详情页
url(r'^detail/(?P<sku_id>\d+)/$', views.DetailView.as_view(), name='detail'),
# 统计分类商品访问量
url(r'^detail/visit/(?P<category_id>\d+)/$', views.DetailVisitView.as_view()),
# 浏览记录
url(r'^browse_histories/$', views.UserBrowseHistory.as_view()),
]
| 31.764706 | 98 | 0.627778 | [
"MIT"
] | ambushonallsides1/E_business_project | E_business_project/apps/goods/urls.py | 598 | Python |
import asyncio
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
import wavelink
import json
from dotenv import load_dotenv
import os
load_dotenv()
# Initiate json
file = open("config.json")
data = json.load(file)
# Public variables
guildID = data["guildID"][0]
class musicPlay(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_wavelink_node_ready(self, node: wavelink.Node):
wavelink.NodePool.get_node(identifier=node.identifier)
@commands.Cog.listener()
async def on_wavelink_track_end(
self, player: wavelink.Player, track: wavelink.Track, reason
):
"""When a track ends, check if there is another one in the queue."""
await asyncio.sleep(5)
if not player.queue.is_empty:
next_track = player.queue.get()
await player.play(next_track)
@slash_command(guild_ids=[guildID], description="Play a song!")
async def play(
self, ctx, value: Option(str, required=True, description="Search for the song!")
):
track = await wavelink.YouTubeTrack.search(query=value, return_first=True)
if not ctx.user.voice:
await ctx.respond("You must be in a voice channel to use music commands!")
else:
if not ctx.voice_client:
vc: wavelink.Player = await ctx.author.voice.channel.connect(
cls=wavelink.Player
)
else:
vc: wavelink.Player = ctx.voice_client
if vc.is_playing():
await vc.queue.put_wait(track)
await ctx.respond(
f"{track.title} has been added to queue! Check the queue status using /queue!"
)
else:
await vc.play(track)
await ctx.respond(f"Now playing: {track.title}")
@play.error
async def play_error(self, ctx, error):
await ctx.respond(f"`{error}`")
def setup(bot):
bot.add_cog(musicPlay(bot))
| 30.15942 | 98 | 0.623258 | [
"MIT"
] | Aggis15/T4NK0R | Music/play.py | 2,081 | Python |
import timeit
mapx = 512
mapy = 512
# Good seeds:
# 772855 Spaced out continents
# 15213 Tight continents
# 1238 What I've been working with, for the most part
# 374539 Sparse continents
# 99999
seed = 773202
sea_level = 0.6
DEBUG = 0
GFXDEBUG = 0
setup_time = timeit.default_timer()
tiles = [[None] * mapx for _ in range(mapy)]
lands = []
towns = []
countries = []
have_savefile = False
class Clock():
def __init__(self,t):
self.time_minutes = t
def inc(self,t):
self.time_minutes += t
self.time_minutes = self.time_minutes % (60*24)
def fmt_time(self):
m = self.time_minutes % 60
h = self.time_minutes // 60
return ("%02d%02dZ" % (h, m))
clock = Clock(9*60) # 9 AM | 18.184211 | 53 | 0.677279 | [
"MIT"
] | ghlmtz/airline-sim | ginit.py | 691 | Python |
import sympy
import antlr4
from antlr4.error.ErrorListener import ErrorListener
from sympy.core.operations import AssocOp
try:
from gen.PSParser import PSParser
from gen.PSLexer import PSLexer
from gen.PSListener import PSListener
except Exception:
from .gen.PSParser import PSParser
from .gen.PSLexer import PSLexer
from .gen.PSListener import PSListener
from sympy.printing.str import StrPrinter
from sympy.parsing.sympy_parser import parse_expr
import hashlib
VARIABLE_VALUES = {}
def process_sympy(sympy, variable_values={}):
# variable values
global VARIABLE_VALUES
if len(variable_values) > 0:
VARIABLE_VALUES = variable_values
else:
VARIABLE_VALUES = {}
# setup listener
matherror = MathErrorListener(sympy)
# stream input
stream = antlr4.InputStream(sympy)
lex = PSLexer(stream)
lex.removeErrorListeners()
lex.addErrorListener(matherror)
tokens = antlr4.CommonTokenStream(lex)
parser = PSParser(tokens)
# remove default console error listener
parser.removeErrorListeners()
parser.addErrorListener(matherror)
# process the input
return_data = None
math = parser.math()
# if a list
if math.relation_list():
return_data = []
# go over list items
relation_list = math.relation_list().relation_list_content()
for list_item in relation_list.relation():
expr = convert_relation(list_item)
return_data.append(expr)
# if not, do default
else:
relation = math.relation()
return_data = convert_relation(relation)
return return_data
class MathErrorListener(ErrorListener):
def __init__(self, src):
super(ErrorListener, self).__init__()
self.src = src
def syntaxError(self, recog, symbol, line, col, msg, e):
fmt = "%s\n%s\n%s"
marker = "~" * col + "^"
if msg.startswith("missing"):
err = fmt % (msg, self.src, marker)
elif msg.startswith("no viable"):
err = fmt % ("I expected something else here", self.src, marker)
elif msg.startswith("mismatched"):
names = PSParser.literalNames
expected = [names[i] for i in e.getExpectedTokens() if i < len(names)]
if len(expected) < 10:
expected = " ".join(expected)
err = (fmt % ("I expected one of these: " + expected,
self.src, marker))
else:
err = (fmt % ("I expected something else here", self.src, marker))
else:
err = fmt % ("I don't understand this", self.src, marker)
raise Exception(err)
def convert_relation(rel):
if rel.expr():
return convert_expr(rel.expr())
lh = convert_relation(rel.relation(0))
rh = convert_relation(rel.relation(1))
if rel.LT():
return sympy.StrictLessThan(lh, rh, evaluate=False)
elif rel.LTE():
return sympy.LessThan(lh, rh, evaluate=False)
elif rel.GT():
return sympy.StrictGreaterThan(lh, rh, evaluate=False)
elif rel.GTE():
return sympy.GreaterThan(lh, rh, evaluate=False)
elif rel.EQUAL():
return sympy.Eq(lh, rh, evaluate=False)
elif rel.UNEQUAL():
return sympy.Ne(lh, rh, evaluate=False)
def convert_expr(expr):
if expr.additive():
return convert_add(expr.additive())
def convert_matrix(matrix):
# build matrix
row = matrix.matrix_row()
tmp = []
rows = 0
for r in row:
tmp.append([])
for expr in r.expr():
tmp[rows].append(convert_expr(expr))
rows = rows + 1
# return the matrix
return sympy.Matrix(tmp)
def add_flat(lh, rh):
if hasattr(lh, 'is_Add') and lh.is_Add or hasattr(rh, 'is_Add') and rh.is_Add:
args = []
if hasattr(lh, 'is_Add') and lh.is_Add:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_Add') and rh.is_Add:
args = args + list(rh.args)
else:
args += [rh]
return sympy.Add(*args, evaluate=False)
else:
return sympy.Add(lh, rh, evaluate=False)
def mat_add_flat(lh, rh):
if hasattr(lh, 'is_MatAdd') and lh.is_MatAdd or hasattr(rh, 'is_MatAdd') and rh.is_MatAdd:
args = []
if hasattr(lh, 'is_MatAdd') and lh.is_MatAdd:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_MatAdd') and rh.is_MatAdd:
args = args + list(rh.args)
else:
args += [rh]
return sympy.MatAdd(*args, evaluate=False)
else:
return sympy.MatAdd(lh, rh, evaluate=False)
def mul_flat(lh, rh):
if hasattr(lh, 'is_Mul') and lh.is_Mul or hasattr(rh, 'is_Mul') and rh.is_Mul:
args = []
if hasattr(lh, 'is_Mul') and lh.is_Mul:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_Mul') and rh.is_Mul:
args = args + list(rh.args)
else:
args += [rh]
return sympy.Mul(*args, evaluate=False)
else:
return sympy.Mul(lh, rh, evaluate=False)
def mat_mul_flat(lh, rh):
if hasattr(lh, 'is_MatMul') and lh.is_MatMul or hasattr(rh, 'is_MatMul') and rh.is_MatMul:
args = []
if hasattr(lh, 'is_MatMul') and lh.is_MatMul:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_MatMul') and rh.is_MatMul:
args = args + list(rh.args)
else:
args += [rh]
return sympy.MatMul(*args, evaluate=False)
else:
return sympy.MatMul(lh, rh, evaluate=False)
def convert_add(add):
if add.ADD():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
if lh.is_Matrix or rh.is_Matrix:
return mat_add_flat(lh, rh)
else:
return add_flat(lh, rh)
elif add.SUB():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
if lh.is_Matrix or rh.is_Matrix:
return mat_add_flat(lh, mat_mul_flat(-1, rh))
else:
# If we want to force ordering for variables this should be:
# return Sub(lh, rh, evaluate=False)
if not rh.is_Matrix and rh.func.is_Number:
rh = -rh
else:
rh = mul_flat(-1, rh)
return add_flat(lh, rh)
else:
return convert_mp(add.mp())
def convert_mp(mp):
if hasattr(mp, 'mp'):
mp_left = mp.mp(0)
mp_right = mp.mp(1)
else:
mp_left = mp.mp_nofunc(0)
mp_right = mp.mp_nofunc(1)
if mp.MUL() or mp.CMD_TIMES() or mp.CMD_CDOT():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if lh.is_Matrix or rh.is_Matrix:
return mat_mul_flat(lh, rh)
else:
return mul_flat(lh, rh)
elif mp.DIV() or mp.CMD_DIV() or mp.COLON():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if lh.is_Matrix or rh.is_Matrix:
return sympy.MatMul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)
else:
return sympy.Mul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)
elif mp.CMD_MOD():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if rh.is_Matrix:
raise Exception("Cannot perform modulo operation with a matrix as an operand")
else:
return sympy.Mod(lh, rh, evaluate=False)
else:
if hasattr(mp, 'unary'):
return convert_unary(mp.unary())
else:
return convert_unary(mp.unary_nofunc())
def convert_unary(unary):
if hasattr(unary, 'unary'):
nested_unary = unary.unary()
else:
nested_unary = unary.unary_nofunc()
if hasattr(unary, 'postfix_nofunc'):
first = unary.postfix()
tail = unary.postfix_nofunc()
postfix = [first] + tail
else:
postfix = unary.postfix()
if unary.ADD():
return convert_unary(nested_unary)
elif unary.SUB():
tmp_convert_nested_unary = convert_unary(nested_unary)
if tmp_convert_nested_unary.is_Matrix:
return mat_mul_flat(-1, tmp_convert_nested_unary, evaluate=False)
else:
if tmp_convert_nested_unary.func.is_Number:
return -tmp_convert_nested_unary
else:
return mul_flat(-1, tmp_convert_nested_unary)
elif postfix:
return convert_postfix_list(postfix)
def convert_postfix_list(arr, i=0):
if i >= len(arr):
raise Exception("Index out of bounds")
res = convert_postfix(arr[i])
if isinstance(res, sympy.Expr) or isinstance(res, sympy.Matrix) or res is sympy.S.EmptySet:
if i == len(arr) - 1:
return res # nothing to multiply by
else:
# multiply by next
rh = convert_postfix_list(arr, i + 1)
if res.is_Matrix or rh.is_Matrix:
return mat_mul_flat(res, rh)
else:
return mul_flat(res, rh)
else: # must be derivative
wrt = res[0]
if i == len(arr) - 1:
raise Exception("Expected expression for derivative")
else:
expr = convert_postfix_list(arr, i + 1)
return sympy.Derivative(expr, wrt)
def do_subs(expr, at):
if at.expr():
at_expr = convert_expr(at.expr())
syms = at_expr.atoms(sympy.Symbol)
if len(syms) == 0:
return expr
elif len(syms) > 0:
sym = next(iter(syms))
return expr.subs(sym, at_expr)
elif at.equality():
lh = convert_expr(at.equality().expr(0))
rh = convert_expr(at.equality().expr(1))
return expr.subs(lh, rh)
def convert_postfix(postfix):
if hasattr(postfix, 'exp'):
exp_nested = postfix.exp()
else:
exp_nested = postfix.exp_nofunc()
exp = convert_exp(exp_nested)
for op in postfix.postfix_op():
if op.BANG():
if isinstance(exp, list):
raise Exception("Cannot apply postfix to derivative")
exp = sympy.factorial(exp, evaluate=False)
elif op.eval_at():
ev = op.eval_at()
at_b = None
at_a = None
if ev.eval_at_sup():
at_b = do_subs(exp, ev.eval_at_sup())
if ev.eval_at_sub():
at_a = do_subs(exp, ev.eval_at_sub())
if at_b is not None and at_a is not None:
exp = add_flat(at_b, mul_flat(at_a, -1))
elif at_b is not None:
exp = at_b
elif at_a is not None:
exp = at_a
return exp
def convert_exp(exp):
if hasattr(exp, 'exp'):
exp_nested = exp.exp()
else:
exp_nested = exp.exp_nofunc()
if exp_nested:
base = convert_exp(exp_nested)
if isinstance(base, list):
raise Exception("Cannot raise derivative to power")
if exp.atom():
exponent = convert_atom(exp.atom())
elif exp.expr():
exponent = convert_expr(exp.expr())
return sympy.Pow(base, exponent, evaluate=False)
else:
if hasattr(exp, 'comp'):
return convert_comp(exp.comp())
else:
return convert_comp(exp.comp_nofunc())
def convert_comp(comp):
if comp.group():
return convert_expr(comp.group().expr())
elif comp.abs_group():
return sympy.Abs(convert_expr(comp.abs_group().expr()), evaluate=False)
elif comp.floor_group():
return handle_floor(convert_expr(comp.floor_group().expr()))
elif comp.ceil_group():
return handle_ceil(convert_expr(comp.ceil_group().expr()))
elif comp.atom():
return convert_atom(comp.atom())
elif comp.frac():
return convert_frac(comp.frac())
elif comp.binom():
return convert_binom(comp.binom())
elif comp.matrix():
return convert_matrix(comp.matrix())
elif comp.func():
return convert_func(comp.func())
def convert_atom(atom):
if atom.LETTER_NO_E():
subscriptName = ''
s = atom.LETTER_NO_E().getText()
if s == "I":
return sympy.I
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = '_{' + StrPrinter().doprint(subscript) + '}'
return sympy.Symbol(atom.LETTER_NO_E().getText() + subscriptName, real=True)
elif atom.GREEK_LETTER():
s = atom.GREEK_LETTER().getText()[1:]
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
s += '_{' + subscriptName + '}'
return sympy.Symbol(s, real=True)
elif atom.accent():
# get name for accent
name = atom.accent().start.text[1:]
# exception: check if bar or overline which are treated both as bar
if name in ["bar", "overline"]:
name = "bar"
# get the base (variable)
base = atom.accent().base.getText()
# set string to base+name
s = base + name
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
s += '_{' + subscriptName + '}'
return sympy.Symbol(s, real=True)
elif atom.SYMBOL():
s = atom.SYMBOL().getText().replace("\\$", "").replace("\\%", "")
if s == "\\infty":
return sympy.oo
elif s == '\\pi':
return sympy.pi
elif s == '\\emptyset':
return sympy.S.EmptySet
else:
raise Exception("Unrecognized symbol")
elif atom.NUMBER():
s = atom.NUMBER().getText().replace(",", "")
try:
sr = sympy.Rational(s)
return sr
except (TypeError, ValueError):
return sympy.Number(s)
elif atom.E_NOTATION():
s = atom.E_NOTATION().getText().replace(",", "")
try:
sr = sympy.Rational(s)
return sr
except (TypeError, ValueError):
return sympy.Number(s)
elif atom.DIFFERENTIAL():
var = get_differential_var(atom.DIFFERENTIAL())
return sympy.Symbol('d' + var.name, real=True)
elif atom.mathit():
text = rule2text(atom.mathit().mathit_text())
return sympy.Symbol(text, real=True)
elif atom.VARIABLE():
text = atom.VARIABLE().getText()
is_percent = text.endswith("\\%")
trim_amount = 3 if is_percent else 1
name = text[10:]
name = name[0:len(name) - trim_amount]
# add hash to distinguish from regular symbols
# hash = hashlib.md5(name.encode()).hexdigest()
# symbol_name = name + hash
symbol_name = name
# replace the variable for already known variable values
if name in VARIABLE_VALUES:
# if a sympy class
if isinstance(VARIABLE_VALUES[name], tuple(sympy.core.all_classes)):
symbol = VARIABLE_VALUES[name]
# if NOT a sympy class
else:
symbol = parse_expr(str(VARIABLE_VALUES[name]))
else:
symbol = sympy.Symbol(symbol_name, real=True)
if is_percent:
return sympy.Mul(symbol, sympy.Pow(100, -1, evaluate=False), evaluate=False)
# return the symbol
return symbol
elif atom.PERCENT_NUMBER():
text = atom.PERCENT_NUMBER().getText().replace("\\%", "").replace(",", "")
try:
number = sympy.Rational(text)
except (TypeError, ValueError):
number = sympy.Number(text)
percent = sympy.Rational(number, 100)
return percent
def rule2text(ctx):
stream = ctx.start.getInputStream()
# starting index of starting token
startIdx = ctx.start.start
# stopping index of stopping token
stopIdx = ctx.stop.stop
return stream.getText(startIdx, stopIdx)
def convert_frac(frac):
diff_op = False
partial_op = False
lower_itv = frac.lower.getSourceInterval()
lower_itv_len = lower_itv[1] - lower_itv[0] + 1
if (frac.lower.start == frac.lower.stop and
frac.lower.start.type == PSLexer.DIFFERENTIAL):
wrt = get_differential_var_str(frac.lower.start.text)
diff_op = True
elif (lower_itv_len == 2 and
frac.lower.start.type == PSLexer.SYMBOL and
frac.lower.start.text == '\\partial' and
(frac.lower.stop.type == PSLexer.LETTER_NO_E or frac.lower.stop.type == PSLexer.SYMBOL)):
partial_op = True
wrt = frac.lower.stop.text
if frac.lower.stop.type == PSLexer.SYMBOL:
wrt = wrt[1:]
if diff_op or partial_op:
wrt = sympy.Symbol(wrt, real=True)
if (diff_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.LETTER_NO_E and
frac.upper.start.text == 'd'):
return [wrt]
elif (partial_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.SYMBOL and
frac.upper.start.text == '\\partial'):
return [wrt]
upper_text = rule2text(frac.upper)
expr_top = None
if diff_op and upper_text.startswith('d'):
expr_top = process_sympy(upper_text[1:])
elif partial_op and frac.upper.start.text == '\\partial':
expr_top = process_sympy(upper_text[len('\\partial'):])
if expr_top:
return sympy.Derivative(expr_top, wrt)
expr_top = convert_expr(frac.upper)
expr_bot = convert_expr(frac.lower)
if expr_top.is_Matrix or expr_bot.is_Matrix:
return sympy.MatMul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)
else:
return sympy.Mul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)
def convert_binom(binom):
expr_top = convert_expr(binom.upper)
expr_bot = convert_expr(binom.lower)
return sympy.binomial(expr_top, expr_bot)
def convert_func(func):
if func.func_normal_single_arg():
if func.L_PAREN(): # function called with parenthesis
arg = convert_func_arg(func.func_single_arg())
else:
arg = convert_func_arg(func.func_single_arg_noparens())
name = func.func_normal_single_arg().start.text[1:]
# change arc<trig> -> a<trig>
if name in ["arcsin", "arccos", "arctan", "arccsc", "arcsec",
"arccot"]:
name = "a" + name[3:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name in ["arsinh", "arcosh", "artanh"]:
name = "a" + name[2:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name in ["arcsinh", "arccosh", "arctanh"]:
name = "a" + name[3:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name == "operatorname":
operatorname = func.func_normal_single_arg().func_operator_name.getText()
if operatorname in ["arsinh", "arcosh", "artanh"]:
operatorname = "a" + operatorname[2:]
expr = getattr(sympy.functions, operatorname)(arg, evaluate=False)
elif operatorname in ["arcsinh", "arccosh", "arctanh"]:
operatorname = "a" + operatorname[3:]
expr = getattr(sympy.functions, operatorname)(arg, evaluate=False)
elif operatorname == "floor":
expr = handle_floor(arg)
elif operatorname == "ceil":
expr = handle_ceil(arg)
elif name in ["log", "ln"]:
if func.subexpr():
if func.subexpr().atom():
base = convert_atom(func.subexpr().atom())
else:
base = convert_expr(func.subexpr().expr())
elif name == "log":
base = 10
elif name == "ln":
base = sympy.E
expr = sympy.log(arg, base, evaluate=False)
elif name in ["exp", "exponentialE"]:
expr = sympy.exp(arg)
elif name == "floor":
expr = handle_floor(arg)
elif name == "ceil":
expr = handle_ceil(arg)
func_pow = None
should_pow = True
if func.supexpr():
if func.supexpr().expr():
func_pow = convert_expr(func.supexpr().expr())
else:
func_pow = convert_atom(func.supexpr().atom())
if name in ["sin", "cos", "tan", "csc", "sec", "cot", "sinh", "cosh", "tanh"]:
if func_pow == -1:
name = "a" + name
should_pow = False
expr = getattr(sympy.functions, name)(arg, evaluate=False)
if func_pow and should_pow:
expr = sympy.Pow(expr, func_pow, evaluate=False)
return expr
elif func.func_normal_multi_arg():
if func.L_PAREN(): # function called with parenthesis
args = func.func_multi_arg().getText().split(",")
else:
args = func.func_multi_arg_noparens().split(",")
args = list(map(lambda arg: process_sympy(arg, VARIABLE_VALUES), args))
name = func.func_normal_multi_arg().start.text[1:]
if name == "operatorname":
operatorname = func.func_normal_multi_arg().func_operator_name.getText()
if operatorname in ["gcd", "lcm"]:
expr = handle_gcd_lcm(operatorname, args)
elif name in ["gcd", "lcm"]:
expr = handle_gcd_lcm(name, args)
elif name in ["max", "min"]:
name = name[0].upper() + name[1:]
expr = getattr(sympy.functions, name)(*args, evaluate=False)
func_pow = None
should_pow = True
if func.supexpr():
if func.supexpr().expr():
func_pow = convert_expr(func.supexpr().expr())
else:
func_pow = convert_atom(func.supexpr().atom())
if func_pow and should_pow:
expr = sympy.Pow(expr, func_pow, evaluate=False)
return expr
# elif func.LETTER_NO_E() or func.SYMBOL():
# print('LETTER_NO_E or symbol')
# if func.LETTER_NO_E():
# fname = func.LETTER_NO_E().getText()
# elif func.SYMBOL():
# fname = func.SYMBOL().getText()[1:]
# fname = str(fname) # can't be unicode
# if func.subexpr():
# subscript = None
# if func.subexpr().expr(): # subscript is expr
# subscript = convert_expr(func.subexpr().expr())
# else: # subscript is atom
# subscript = convert_atom(func.subexpr().atom())
# subscriptName = StrPrinter().doprint(subscript)
# fname += '_{' + subscriptName + '}'
# input_args = func.args()
# output_args = []
# while input_args.args(): # handle multiple arguments to function
# output_args.append(convert_expr(input_args.expr()))
# input_args = input_args.args()
# output_args.append(convert_expr(input_args.expr()))
# return sympy.Function(fname)(*output_args)
elif func.FUNC_INT():
return handle_integral(func)
elif func.FUNC_SQRT():
expr = convert_expr(func.base)
if func.root:
r = convert_expr(func.root)
return sympy.Pow(expr, 1 / r, evaluate=False)
else:
return sympy.Pow(expr, sympy.S.Half, evaluate=False)
elif func.FUNC_SUM():
return handle_sum_or_prod(func, "summation")
elif func.FUNC_PROD():
return handle_sum_or_prod(func, "product")
elif func.FUNC_LIM():
return handle_limit(func)
elif func.EXP_E():
return handle_exp(func)
def convert_func_arg(arg):
if hasattr(arg, 'expr'):
return convert_expr(arg.expr())
else:
return convert_mp(arg.mp_nofunc())
def handle_integral(func):
if func.additive():
integrand = convert_add(func.additive())
elif func.frac():
integrand = convert_frac(func.frac())
else:
integrand = 1
int_var = None
if func.DIFFERENTIAL():
int_var = get_differential_var(func.DIFFERENTIAL())
else:
for sym in integrand.atoms(sympy.Symbol):
s = str(sym)
if len(s) > 1 and s[0] == 'd':
if s[1] == '\\':
int_var = sympy.Symbol(s[2:], real=True)
else:
int_var = sympy.Symbol(s[1:], real=True)
int_sym = sym
if int_var:
integrand = integrand.subs(int_sym, 1)
else:
# Assume dx by default
int_var = sympy.Symbol('x', real=True)
if func.subexpr():
if func.subexpr().atom():
lower = convert_atom(func.subexpr().atom())
else:
lower = convert_expr(func.subexpr().expr())
if func.supexpr().atom():
upper = convert_atom(func.supexpr().atom())
else:
upper = convert_expr(func.supexpr().expr())
return sympy.Integral(integrand, (int_var, lower, upper))
else:
return sympy.Integral(integrand, int_var)
def handle_sum_or_prod(func, name):
val = convert_mp(func.mp())
iter_var = convert_expr(func.subeq().equality().expr(0))
start = convert_expr(func.subeq().equality().expr(1))
if func.supexpr().expr(): # ^{expr}
end = convert_expr(func.supexpr().expr())
else: # ^atom
end = convert_atom(func.supexpr().atom())
if name == "summation":
return sympy.Sum(val, (iter_var, start, end))
elif name == "product":
return sympy.Product(val, (iter_var, start, end))
def handle_limit(func):
sub = func.limit_sub()
if sub.LETTER_NO_E():
var = sympy.Symbol(sub.LETTER_NO_E().getText(), real=True)
elif sub.GREEK_LETTER():
var = sympy.Symbol(sub.GREEK_LETTER().getText()[1:], real=True)
else:
var = sympy.Symbol('x', real=True)
if sub.SUB():
direction = "-"
else:
direction = "+"
approaching = convert_expr(sub.expr())
content = convert_mp(func.mp())
return sympy.Limit(content, var, approaching, direction)
def handle_exp(func):
if func.supexpr():
if func.supexpr().expr(): # ^{expr}
exp_arg = convert_expr(func.supexpr().expr())
else: # ^atom
exp_arg = convert_atom(func.supexpr().atom())
else:
exp_arg = 1
return sympy.exp(exp_arg)
def handle_gcd_lcm(f, args):
"""
Return the result of gcd() or lcm(), as UnevaluatedExpr
f: str - name of function ("gcd" or "lcm")
args: List[Expr] - list of function arguments
"""
args = tuple(map(sympy.nsimplify, args))
# gcd() and lcm() don't support evaluate=False
return sympy.UnevaluatedExpr(getattr(sympy, f)(args))
def handle_floor(expr):
"""
Apply floor() then return the floored expression.
expr: Expr - sympy expression as an argument to floor()
"""
return sympy.functions.floor(expr, evaluate=False)
def handle_ceil(expr):
"""
Apply ceil() then return the ceil-ed expression.
expr: Expr - sympy expression as an argument to ceil()
"""
return sympy.functions.ceiling(expr, evaluate=False)
def get_differential_var(d):
text = get_differential_var_str(d.getText())
return sympy.Symbol(text, real=True)
def get_differential_var_str(text):
for i in range(1, len(text)):
c = text[i]
if not (c == " " or c == "\r" or c == "\n" or c == "\t"):
idx = i
break
text = text[idx:]
if text[0] == "\\":
text = text[1:]
return text
| 33.15127 | 99 | 0.571633 | [
"MIT"
] | ConsoleBit/latex2sympy | latex2sympy.py | 28,709 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourceSkusOperations(object):
"""ResourceSkusOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter=None, # type: Optional[str]
include_extended_locations=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceSkusResult"]
"""Gets the list of Microsoft.Compute SKUs available for your Subscription.
:param filter: The filter to apply on the operation. Only **location** filter is supported
currently.
:type filter: str
:param include_extended_locations: To Include Extended Locations information or not in the
response.
:type include_extended_locations: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceSkusResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_07_01.models.ResourceSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if include_extended_locations is not None:
query_parameters['includeExtendedLocations'] = self._serialize.query("include_extended_locations", include_extended_locations, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus'} # type: ignore
| 46.309524 | 153 | 0.65587 | [
"MIT"
] | AlexGhiondea/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/operations/_resource_skus_operations.py | 5,835 | Python |
import os
import signal
import sys
from builtins import id as identifier
from toga.command import CommandSet
from toga.handlers import wrapped_handler
from toga.icons import Icon
from toga.platform import get_platform_factory
from toga.window import Window
class MainWindow(Window):
_WINDOW_CLASS = 'MainWindow'
def __init__(self, id=None, title=None, position=(100, 100), size=(640, 480), factory=None):
super().__init__(id=id, title=title, position=position, size=size, factory=factory)
class App:
""" The App is the top level of any GUI program. It is the manager of all
the other bits of the GUI app: the main window and events that window
generates like user input.
When you create an App you need to provide it a name, an id for uniqueness
(by convention, the identifier is a "reversed domain name".) and an
optional startup function which should run once the App has initialised.
The startup function typically constructs some initial user interface.
Once the app is created you should invoke the main_loop() method, which
will hand over execution of your program to Toga to make the App interface
do its thing.
Args:
name (str): Is the name of the application.
app_id (str): The unique application identifier, the reversed domain name, e.g. 'org.beeware.me'
icon (str): Path to the icon for the application.
id (str): The DOM identifier for the app (optional)
startup(``callable``): The callback method before starting the app, typically to add the components.
Must be a ``callable`` that expects a single argument of :class:`toga.App`.
factory (:obj:`module`): A python module that is capable to return a
implementation of this class with the same name. (optional & normally not needed)
Examples:
>>> # Here is the absolute minimum App::
>>> app = toga.App('Empty App', 'org.beeware.empty')
>>> app.main_loop()
"""
app = None
def __init__(self, name, app_id,
id=None, icon=None, startup=None, on_exit=None, factory=None):
self.factory = get_platform_factory(factory)
# Keep an accessible copy of the app instance
App.app = self
App.app_module = self.__module__.split('.')[0]
App.app_dir = os.path.dirname(sys.modules[App.app_module].__file__)
self.name = name
self._app_id = app_id
self._id = id if id else identifier(self)
self.commands = CommandSet(factory=self.factory)
self._startup_method = startup
self.default_icon = Icon('tiberius', system=True)
self.icon = icon
self._main_window = None
self._on_exit = None
self._full_screen_windows = None
self._impl = self._create_impl()
self.on_exit = on_exit
def _create_impl(self):
return self.factory.App(interface=self)
@property
def app_id(self):
""" The identifier for the app.
This is the reversed domain name, often used for targetting resources, etc.
Returns:
The identifier as a ``str``.
"""
return self._app_id
@property
def id(self):
""" The DOM identifier for the app. This id can be used to target CSS directives.
Returns:
The identifier for the app as a ``str``.
"""
return self._id
@property
def icon(self):
""" The Icon for the app. On setting, the icon is loaded automatically.
Returns:
The icon of the app ``toga.Icon``.
"""
return self._icon
@icon.setter
def icon(self, name):
self._icon = Icon.load(name, default=self.default_icon)
@property
def main_window(self):
"""The main Windows for the app.
Returns:
The main Window of the app.
"""
return self._main_window
@main_window.setter
def main_window(self, window):
self._main_window = window
window.app = self
@property
def current_window(self):
"""Return the currently active content window"""
return self._impl.current_window().interface
@property
def is_full_screen(self):
"""Is the app currently in full screen mode?"""
return self._full_screen_windows is not None
def set_full_screen(self, *windows):
"""Make one or more windows full screen.
Full screen is not the same as "maximized"; full screen mode
is when all window borders and other chrome is no longer
visible.
Args:
windows: The list of windows to go full screen,
in order of allocation to screens. If the number of
windows exceeds the number of available displays,
those windows will not be visible. If no windows
are specified, the app will exit full screen mode.
"""
if not windows:
self.exit_full_screen()
else:
self._impl.enter_full_screen(windows)
self._full_screen_windows = windows
def exit_full_screen(self):
"""Exit full screen mode."""
if self.is_full_screen:
self._impl.exit_full_screen(self._full_screen_windows)
self._full_screen_windows = None
def show_cursor(self):
"""Show cursor."""
self._impl.show_cursor()
def hide_cursor(self):
"""Hide cursor from view."""
self._impl.hide_cursor()
def startup(self):
""" Create and show the main window for the application
"""
self.main_window = MainWindow(title=self.name, factory=self.factory)
if self._startup_method:
self.main_window.content = self._startup_method(self)
self.main_window.show()
def main_loop(self):
""" Invoke the application to handle user input.
This method typically only returns once the application is exiting.
"""
# Modify signal handlers to make sure Ctrl-C is caught and handled.
signal.signal(signal.SIGINT, signal.SIG_DFL)
self._impl.main_loop()
def exit(self):
""" Quit the application gracefully.
"""
self._impl.exit()
@property
def on_exit(self):
"""The handler to invoke before the application exits.
Returns:
The function ``callable`` that is called on application exit.
"""
return self._on_exit
@on_exit.setter
def on_exit(self, handler):
"""Set the handler to invoke before the app exits.
Args:
handler (:obj:`callable`): The handler to invoke before the app exits.
"""
self._on_exit = wrapped_handler(self, handler)
self._impl.set_on_exit(self._on_exit)
class DocumentApp(App):
"""
A document-based application.
Definition and arguments are the same as a base App, plus the following:
Args:
document_types (:obj:`list` of :obj:`str`): Document types.
"""
def __init__(self, name, app_id,
id=None, icon=None, startup=None, document_types=None, on_exit=None, factory=None):
self.document_types = document_types
self._documents = []
super().__init__(name, app_id,
id=id, icon=icon, startup=startup, on_exit=on_exit, factory=factory)
def _create_impl(self):
return self.factory.DocumentApp(interface=self)
@property
def documents(self):
""" Return the list of documents associated with this app.
Returns:
A ``list`` of ``str``.
"""
return self._documents
| 31.457143 | 108 | 0.631374 | [
"BSD-3-Clause"
] | UncleGoogle/toga | src/core/toga/app.py | 7,707 | Python |
# this file is deprecated and will soon be folded into all.py
from collections import namedtuple
from pycoin.serialize import h2b
NetworkValues = namedtuple('NetworkValues',
('network_name', 'subnet_name', 'code', 'wif', 'address',
'pay_to_script', 'prv32', 'pub32'))
NETWORKS = (
# VIA viacoin mainnet : xprv/xpub
NetworkValues("Viacoin", "mainnet", "VIA", b'\xc7', b'\x47', b'\x21', h2b('0488ADE4'), h2b('0488B21E')),
# VIA viacoin testnet : tprv/tpub
NetworkValues("Viacoin", "testnet", "TVI", b'\xff', b'\x7f', b'\xc4', h2b('04358394'), h2b('043587CF')),
# FTC feathercoin mainnet : xprv/xpub
NetworkValues(
"Feathercoin", "mainnet", "FTC", b'\x8e', b'\x0e', b'\x60', h2b('0488ADE4'), h2b('0488B21E')),
# FTC feathercoin testnet : tprv/tpub
NetworkValues(
"Feathercoin", "testnet", "FTX", b'\xC1', b'\x41', b'\xc4', h2b('04358394'), h2b('043587CF')),
# DOGE Dogecoin mainnet : dogv/dogp
NetworkValues(
"Dogecoin", "mainnet", "DOGE", b'\x9e', b'\x1e', b'\x16', h2b("02FD3955"), h2b("02FD3929")),
# DOGE Dogecoin testnet : tgpv/tgub
NetworkValues(
"Dogecoin", "testnet", "XDT", b'\xf1', b'\x71', b'\xc4', h2b("0432a9a8"), h2b("0432a243")),
# BC BlackCoin mainnet : bcpv/bcpb
NetworkValues("Blackcoin", "mainnet", "BC", b'\x99', b'\x19', None, h2b("02cfbf60"), h2b("02cfbede")),
# DRK Dash mainnet : drkv/drkp
NetworkValues(
"Dash", "mainnet", "DASH", b'\xcc', b'\x4c', b'\x10', h2b("02fe52f8"), h2b("02fe52cc")),
# DRK Dash testnet : DRKV/DRKP
NetworkValues(
"Dash", "testnet", "tDASH", b'\xef', b'\x8c', b'\x13', h2b("3a8061a0"), h2b("3a805837")),
# MEC Megacoin mainnet : mecv/mecp
NetworkValues("Megacoin", "mainnet", "MEC", b'\xb2', b'\x32', None, h2b("03a04db7"), h2b("03a04d8b")),
NetworkValues(
"Myriadcoin", "mainnet", "MYR", b'\xb2', b'\x32', b'\x09', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues(
"Unobtanium", "mainnet", "UNO", b'\xe0', b'\x82', b'\x1e', h2b('0488ADE4'), h2b('0488B21E')),
# JBS Jumbucks mainnet : jprv/jpub
NetworkValues("Jumbucks", "mainnet", "JBS", b'\xab', b'\x2b', None, h2b('037a6460'), h2b('037a689a')),
# MZC Mazacoin mainnet: xprv/xpub
NetworkValues("Mazacoin", "mainnet", "MZC", b'\xe0', b'\x32', b'\9', h2b("0488ADE4"), h2b("0488B21E")),
NetworkValues(
"Riecoin", "mainnet", "RIC", b'\x80', b'\x3c', b'\x05', h2b('0488ADE4'), h2b('0488B21E')),
# DFC Defcoin mainnet: dfcv/dfcp
NetworkValues("DEFCOIN", "mainnet", "DFC", b'\x9e', b'\x1e', b'\5', h2b("02FA54D7"), h2b("02FA54AD")),
# FAI faircoin mainnet : xprv/xpub
NetworkValues(
"Faircoin", "mainnet", "FAI", b'\xdf', b'\x5f', b'\x24', h2b("0488ADE4"), h2b("0488B21E")),
# ARG argentum mainnet : xprv/xpub
NetworkValues("Argentum", "mainnet", "ARG", b'\x97', b'\x17', b'\5', h2b("0488ADE4"), h2b("0488B21E")),
# ZEC Zcash mainnet : xprv/xpub
NetworkValues("Zcash", "mainnet", "ZEC", b'\x80', b'\x1C\xB8',
b'\x1C\xBD', h2b("0488ADE4"), h2b("0488B21E")),
# BTCD BitcoinDark mainnet : xprv/xpub
NetworkValues("BitcoinDark", "mainnet", "BTCD", b'\x44', b'\x3C', b'\55', h2b('0488ADE4'), h2b('0488B21E')),
# DCR Decred mainnet : dprv/dpub
NetworkValues("Decred", "mainnet", "DCR", b'\x22\xDE', b'\x07\x3F', b'\x07\x1A', h2b('02FDA4E8'), h2b('02FDA926')),
# DCR Decred testnet : tprv/tpub
NetworkValues("Decred", "testnet", "DCRT", b'\x23\x0E', b'\x0F\x21', b'\x0E\x6C', h2b('04358397'), h2b('043587D1')),
)
| 42.325581 | 120 | 0.58956 | [
"MIT"
] | Kexkey/pycoin | pycoin/networks/legacy_networks.py | 3,640 | Python |
# -*- coding: utf-8 -*-
__title__ = "Universal Notifications"
__version__ = "1.5.0"
__author__ = "Pawel Krzyzaniak"
__license__ = "MIT"
__copyright__ = "Copyright 2017-2018 Arabella; 2018+ Ro"
# Version synonym
VERSION = __version__
| 23.4 | 56 | 0.726496 | [
"MIT"
] | ArabellaTech/universal_notifications | universal_notifications/__init__.py | 234 | Python |
#
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2009-2019 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
from django.conf.urls import url
from . import views
app_name = 'mirage'
urlpatterns = [
url('^$', views.index, name='index'),
]
| 26.3 | 69 | 0.741445 | [
"EPL-1.0"
] | cmusatyalab/opendiamond | opendiamond/scopeserver/mirage/urls.py | 526 | Python |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
import random
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until, softfork_active
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self, cmpct_version):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
self.cmpct_version = cmpct_version
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.utxos = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
block = self.build_block_on_tip(self.nodes[0])
self.segwit_node.send_and_ping(msg_no_witness_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.nodes[0].generatetoaddress(100, self.nodes[0].getnewaddress(address_type="bech32"))
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.segwit_node.send_and_ping(msg_no_witness_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, test_node, old_node=None):
preferred_version = test_node.cmpct_version
node = self.nodes[0]
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version + 1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.segwit_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, test_node, use_witness_address=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert segwit_tx_generated # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%064x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert entry.tx.wit.is_null()
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, test_node, segwit=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_blocktxn()
else:
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
with_witness = (version == 2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_no_witness_blocktxn()
if with_witness:
msg_bt = msg_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert block.vtx[1].hash in node.getrawmempool()
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert tx.hash in mempool
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_no_witness_blocktxn()
if version == 2:
msg = msg_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version == 2:
test_node.send_and_ping(msg_block(block))
else:
test_node.send_and_ping(msg_no_witness_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert tx.wit.is_null()
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, test_node):
node = self.nodes[0]
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, listeners):
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, test_node, use_segwit=True):
node = self.nodes[0]
assert len(self.utxos)
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer):
node = self.nodes[0]
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = peer.cmpct_version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer):
node = self.nodes[0]
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=1), services=NODE_NETWORK)
self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
assert softfork_active(self.nodes[0], "segwit")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.segwit_node, old_node=self.old_node)
self.test_sendcmpct(self.additional_segwit_node)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.old_node)
self.test_compactblock_construction(self.segwit_node)
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.segwit_node)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.segwit_node)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.segwit_node)
self.test_getblocktxn_handler(self.old_node)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.segwit_node)
self.test_compactblocks_not_at_tip(self.old_node)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.segwit_node)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.segwit_node, self.additional_segwit_node)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.old_node)
self.request_cb_announcements(self.segwit_node)
self.test_end_to_end_block_relay([self.segwit_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.segwit_node)
self.test_invalid_tx_in_compactblock(self.old_node)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| 46.568421 | 500 | 0.672092 | [
"MIT"
] | 0mfq/0mfq | test/functional/p2p_compactblocks.py | 39,816 | Python |
#!/usr/bin/python
import requests, time
from flask import Flask, Response, stream_with_context
app = Flask(__name__)
START = time.time()
def elapsed():
running = time.time() - START
minutes, seconds = divmod(running, 60)
hours, minutes = divmod(minutes, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
@app.route('/<path:url>')
def root(url):
req = requests.get("http://%s" % url, stream=True)
return Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type'])
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| 27.090909 | 104 | 0.674497 | [
"Apache-2.0"
] | datawire/hello-forge-network | edge/app.py | 596 | Python |
"""Training a face recognizer with TensorFlow using softmax cross entropy loss
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import random
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import h5py
import math
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def main(args):
network = importlib.import_module(args.model_def)
image_size = (args.image_size, args.image_size)
subdir = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-softmax-'+args.model_def.split(".")[-1]+"-"+args.data_dir.split("/")[-1])
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
stat_file_name = os.path.join(log_dir, 'stat.h5')
# Write arguments to a text file
facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
random.seed(args.seed)
dataset = facenet.get_dataset(args.data_dir)
if args.filter_filename:
dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename),
args.filter_percentile, args.filter_min_nrof_images_per_class)
if args.validation_set_split_ratio>0.0:
train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
else:
train_set, val_set = dataset, []
nrof_classes = len(train_set)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Get a list of image paths and their labels
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
assert len(image_list)>0, 'The training set should not be empty'
val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)
# Create a queue that produces indices into the image_list and label_list
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
range_size = array_ops.shape(labels)[0]
index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
shuffle=True, seed=None, capacity=32)
index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
nrof_preprocess_threads = 4
input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
label_batch = tf.identity(label_batch, 'label_batch')
print('Number of classes in training set: %d' % nrof_classes)
print('Number of examples in training set: %d' % len(image_list))
print('Number of classes in validation set: %d' % len(val_set))
print('Number of examples in validation set: %d' % len(val_image_list))
print('Building training graph')
# Build the inference graph
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay)
logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
# Norm for the prelogits
eps = 1e-4
prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)
# Add center loss
prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Calculate the average cross entropy loss across the batch
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_batch, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
# Calculate the total losses
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
# Build a Graph that trains the model with one batch of examples and updates the model parameters
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
ckpt = tf.train.get_checkpoint_state(pretrained_model)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
# Training and validation loop
print('Running training')
nrof_steps = args.max_nrof_epochs*args.epoch_size
nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs)) # Validate every validate_every_n_epochs as well as in the last epoch
stat = {
'loss': np.zeros((nrof_steps,), np.float32),
'center_loss': np.zeros((nrof_steps,), np.float32),
'reg_loss': np.zeros((nrof_steps,), np.float32),
'xent_loss': np.zeros((nrof_steps,), np.float32),
'prelogits_norm': np.zeros((nrof_steps,), np.float32),
'accuracy': np.zeros((nrof_steps,), np.float32),
'val_loss': np.zeros((nrof_val_samples,), np.float32),
'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate2': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate3': np.zeros((args.max_nrof_epochs,), np.float32),
'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
}
for epoch in range(1,args.max_nrof_epochs+1):
step = sess.run(global_step, feed_dict=None)
# Train for one epoch
t = time.time()
cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy, learning_rate,
prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
stat['time_train'][epoch-1] = time.time() - t
if not cont:
break
t = time.time()
if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
stat['time_validate'][epoch-1] = time.time() - t
# Save variables and the metagraph if it doesn't exist already
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)
# Evaluate on LFW
t = time.time()
if args.lfw_dir:
evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch,
args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
stat['time_evaluate'][epoch-1] = time.time() - t
print('Saving statistics')
with h5py.File(stat_file_name, 'w') as f:
for key, value in stat.items():
f.create_dataset(key, data=value)
return model_dir
def find_threshold(var, percentile):
hist, bin_edges = np.histogram(var, 100)
cdf = np.float32(np.cumsum(hist)) / np.sum(hist)
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2
#plt.plot(bin_centers, cdf)
threshold = np.interp(percentile*0.01, cdf, bin_centers)
return threshold
def filter_dataset(dataset, data_filename, percentile, min_nrof_images_per_class):
with h5py.File(data_filename,'r') as f:
distance_to_center = np.array(f.get('distance_to_center'))
label_list = np.array(f.get('label_list'))
image_list = np.array(f.get('image_list'))
distance_to_center_threshold = find_threshold(distance_to_center, percentile)
indices = np.where(distance_to_center>=distance_to_center_threshold)[0]
filtered_dataset = dataset
removelist = []
for i in indices:
label = label_list[i]
image = image_list[i]
if image in filtered_dataset[label].image_paths:
filtered_dataset[label].image_paths.remove(image)
if len(filtered_dataset[label].image_paths)<min_nrof_images_per_class:
removelist.append(label)
ix = sorted(list(set(removelist)), reverse=True)
for i in ix:
del(filtered_dataset[i])
return filtered_dataset
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, step,
loss, train_op, summary_op, summary_writer, reg_losses, learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy,
learning_rate, prelogits, prelogits_center_loss, random_rotate, random_crop, random_flip, prelogits_norm, prelogits_hist_max, use_fixed_image_standardization):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
if lr<=0:
return False
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
control_value = facenet.RANDOM_ROTATE * random_rotate + facenet.RANDOM_CROP * random_crop + facenet.RANDOM_FLIP * random_flip + facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
control_array = np.ones_like(labels_array) * control_value
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}
tensor_list = [loss, train_op, step, reg_losses, prelogits, cross_entropy_mean, learning_rate, prelogits_norm, accuracy, prelogits_center_loss]
if batch_number % 100 == 0:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_, summary_str = sess.run(tensor_list + [summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step_)
else:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_ = sess.run(tensor_list, feed_dict=feed_dict)
duration = time.time() - start_time
stat['loss'][step_-1] = loss_
stat['center_loss'][step_-1] = center_loss_
stat['reg_loss'][step_-1] = np.sum(reg_losses_)
stat['xent_loss'][step_-1] = cross_entropy_mean_
stat['prelogits_norm'][step_-1] = prelogits_norm_
stat['learning_rate'][epoch-1] = lr_
stat['accuracy'][step_-1] = accuracy_
stat['prelogits_hist'][epoch-1,:] += np.histogram(np.minimum(np.abs(prelogits_), prelogits_hist_max), bins=1000, range=(0.0, prelogits_hist_max))[0]
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tXent %2.3f\tRegLoss %2.3f\tAccuracy %2.3f\tLr %2.5f\tCl %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, loss_, cross_entropy_mean_, np.sum(reg_losses_), accuracy_, lr_, center_loss_))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, global_step=step_)
return True
def validate(args, sess, epoch, image_list, label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, loss, regularization_losses, cross_entropy_mean, accuracy, validate_every_n_epochs, use_fixed_image_standardization):
print('Running forward pass on validation set')
nrof_batches = len(label_list) // args.lfw_batch_size
nrof_images = nrof_batches * args.lfw_batch_size
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_list[:nrof_images]),1)
image_paths_array = np.expand_dims(np.array(image_list[:nrof_images]),1)
control_array = np.ones_like(labels_array, np.int32)*facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
loss_array = np.zeros((nrof_batches,), np.float32)
xent_array = np.zeros((nrof_batches,), np.float32)
accuracy_array = np.zeros((nrof_batches,), np.float32)
# Training loop
start_time = time.time()
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:args.lfw_batch_size}
loss_, cross_entropy_mean_, accuracy_ = sess.run([loss, cross_entropy_mean, accuracy], feed_dict=feed_dict)
loss_array[i], xent_array[i], accuracy_array[i] = (loss_, cross_entropy_mean_, accuracy_)
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
duration = time.time() - start_time
val_index = (epoch-1)//validate_every_n_epochs
stat['val_loss'][val_index] = np.mean(loss_array)
stat['val_xent_loss'][val_index] = np.mean(xent_array)
stat['val_accuracy'][val_index] = np.mean(accuracy_array)
print('Validation Epoch: %d\tTime %.3f\tLoss %2.3f\tXent %2.3f\tAccuracy %2.3f' %
(epoch, duration, np.mean(loss_array), np.mean(xent_array), np.mean(accuracy_array)))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization):
start_time = time.time()
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair
nrof_flips = 2 if use_flipped_images else 1
nrof_images = nrof_embeddings * nrof_flips
labels_array = np.expand_dims(np.arange(0,nrof_images),1)
image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1)
control_array = np.zeros_like(labels_array, np.int32)
if use_fixed_image_standardization:
control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION
if use_flipped_images:
# Flip every second image
control_array += (labels_array % 2)*facenet.FLIP
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
embedding_size = int(embeddings.get_shape()[1])
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images,))
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
lab_array[lab] = lab
emb_array[lab, :] = emb
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips))
if use_flipped_images:
# Concatenate embeddings for flipped and non flipped version of the images
embeddings[:,:embedding_size] = emb_array[0::2,:]
embeddings[:,embedding_size:] = emb_array[1::2,:]
else:
embeddings = emb_array
assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
_, _, accuracy, val2, val_std2, far2, val3, val_std3, far3 = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val2, val_std2, far2))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val3, val_std3, far3))
lfw_time = time.time() - start_time
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
summary.value.add(tag='lfw/val_rate2', simple_value=val2)
summary.value.add(tag='lfw/val_rate3', simple_value=val3)
summary.value.add(tag='time/lfw', simple_value=lfw_time)
summary_writer.add_summary(summary, step)
with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
f.write('%d\t%.5f\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val2, val3))
stat['lfw_accuracy'][epoch-1] = np.mean(accuracy)
stat['lfw_valrate2'][epoch-1] = val2
stat['lfw_valrate3'][epoch-1] = val3
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/save_variables', simple_value=save_time_variables)
summary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)
summary_writer.add_summary(summary, step)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=3860)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--use_fixed_image_standardization',
help='Performs fixed standardization of images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--prelogits_norm_loss_factor', type=float,
help='Loss based on the norm of the activations in the prelogits layer.', default=0.0)
parser.add_argument('--prelogits_norm_p', type=float,
help='Norm to use for prelogits norm loss.', default=1.0)
parser.add_argument('--prelogits_hist_max', type=float,
help='The max value for the prelogits histogram.', default=10.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augmentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
parser.add_argument('--filter_filename', type=str,
help='File containing image data used for dataset filtering', default='')
parser.add_argument('--filter_percentile', type=float,
help='Keep only the percentile images closed to its class center', default=100.0)
parser.add_argument('--filter_min_nrof_images_per_class', type=int,
help='Keep only the classes with this number of examples or more', default=0)
parser.add_argument('--validate_every_n_epochs', type=int,
help='Number of epoch between validation', default=5)
parser.add_argument('--validation_set_split_ratio', type=float,
help='The ratio of the total dataset to use for validation', default=0.0)
parser.add_argument('--min_nrof_val_images_per_class', type=float,
help='Classes with fewer images will be removed from the validation set', default=0)
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
parser.add_argument('--lfw_distance_metric', type=int,
help='Type of distance metric to use. 0: Euclidian, 1:Cosine similarity distance.', default=0)
parser.add_argument('--lfw_use_flipped_images',
help='Concatenates embeddings for the image and its horizontally flipped counterpart.', action='store_true')
parser.add_argument('--lfw_subtract_mean',
help='Subtract feature mean before calculating distance.', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 55.692699 | 209 | 0.698137 | [
"MIT"
] | govindjeevan/facenet | src/train_softmax.py | 32,803 | Python |
from django.utils.translation import gettext_lazy as _
from generic_chooser.widgets import AdminChooser, LinkedFieldMixin
from areas.models import CongressionalDistrict
class CongressionalDistrictChooser(LinkedFieldMixin, AdminChooser):
#icon = 'user'
model = CongressionalDistrict
page_title = _("Choose a district")
choose_modal_url_name = 'congressional_district_chooser:choose' | 36.363636 | 67 | 0.8175 | [
"MIT"
] | polrev-github/polrev-django | polrev/areas/widgets/congressional_district_widgets.py | 400 | Python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Constant values used by this library.
"""
from enum import Enum
class DataCategory(Enum):
"""
Enumeration of data categories in compliant machine learning.
Values:
- PRIVATE: data which is private. Researchers may not view this.
- PUBLIC: data which may safely be viewed by researchers.
"""
PRIVATE = 1
PUBLIC = 2
| 19.5 | 68 | 0.687646 | [
"MIT"
] | Anbang-Hu/shrike | shrike/compliant_logging/constants.py | 429 | Python |
#VERSION: 2.3
#AUTHORS: Vikas Yadav (https://github.com/v1k45 | http://v1k45.com)
#CONTRIBUTORS: Diego de las Heras ([email protected])
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from html.parser import HTMLParser
from helpers import retrieve_url
from novaprinter import prettyPrinter
class leetx(object):
url = "https://1337x.to"
name = "1337x"
supported_categories = {
'all': 'All',
'movies': 'Movies',
'tv': 'TV',
'music': 'Music',
'games': 'Games',
'anime': 'Anime',
'software': 'Apps'
}
class MyHtmlParser(HTMLParser):
A, TABLE, TR, TD, SPAN = ('a', 'table', 'tr', 'td', 'span')
""" Sub-class for parsing results """
def __init__(self, results, url):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.current_result = {}
self.current_item = None
self.inside_table = False
self.inside_row = False
def handle_starttag(self, tag, attrs):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
self.inside_table = self.inside_table or tag == self.TABLE
if not self.inside_table:
return
# convert attrs tuple to dictionary
attrs = dict(attrs)
# for torrent name and link
link = attrs.get('href', '')
if tag == self.A and link.startswith('/torrent'):
self.current_result['link'] = self.url + link
self.current_result['desc_link'] = self.url + link
self.current_result['engine_url'] = self.url
self.current_item = 'name'
# to ignore uploader name attached to the torrent size in span tag
if tag == self.SPAN:
self.current_item = None
# if this is a <td> there can be seeds, leeches or size inside it.
if tag == self.TD:
self.inside_row = True
# find apporipate data key using class name of td
for item in ['seeds', 'leech', 'size']:
if item in attrs.get('class', ''):
self.current_item = item
break
def handle_data(self, data):
# if we are not inside the table, no need to process any further
if not self.inside_table:
return
# do not process data if we are not inside the table body
if self.current_item:
prev_value = self.current_result.get(self.current_item, '')
self.current_result[self.current_item] = prev_value + data
def handle_endtag(self, tag):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
if tag == self.TABLE:
self.inside_table = False
if not self.inside_table:
return
# exiting the table data and maybe moving td or tr element
if self.inside_row and tag == self.TD:
self.inside_row = False
self.current_item = None
# exiting the tr element, which means all necessary data for a torrent has been
# extracted, we should save it and clean the object's state.
if self.current_result and tag == self.TR:
if 'size' in self.current_result:
self.current_result['size'] = self.current_result['size'].replace(',', '')
# skip malformed names (eg. with @)
if 'name' in self.current_result:
prettyPrinter(self.current_result)
self.results.append('a')
self.current_result = {}
self.current_item = None
def download_torrent(self, download_url):
# since 1337x does not provide torrent links in the search results,
# we will have to fetch the page and extract the magnet link
torrent_page = retrieve_url(download_url)
magnet_match = re.search(r"href\s*\=\s*\"(magnet[^\"]+)\"", torrent_page)
if magnet_match and magnet_match.groups():
print(magnet_match.groups()[0] + " " + download_url)
else:
raise Exception('Error, please fill a bug report!')
def search(self, what, cat='all'):
cat = cat.lower()
# decide which type of search to perform based on category
search_page = "search" if cat == 'all' else 'category-search'
search_url = "{url}/{search_page}/{search_query}/".format(
url=self.url, search_page=search_page, search_query=what)
# apply search category to url, if any.
if cat != 'all':
search_url += self.supported_categories[cat] + "/"
# try to get 15 pages (20 * 15 = 300 results) and stop when we don't found results
results_list = []
parser = self.MyHtmlParser(results_list, self.url)
page = 1
while page < 16:
# download the page
html = retrieve_url(search_url + str(page) + '/')
parser.feed(html)
if len(results_list) < 1:
break
del results_list[:]
page += 1
parser.close()
| 41.654545 | 94 | 0.60614 | [
"MIT"
] | Kira9204/Wireguard-qBittorrent | container_data/.config/qBittorrent/plugins/nova3/engines/leetx.py | 6,873 | Python |
import random
import string
from django.conf import settings
from nacl.signing import SigningKey
from nacl.encoding import Base64Encoder
import segno
import io
import cairosvg
from django.template.loader import render_to_string
import base64
import PyPDF2
import os
# Will generate a random alphanumeric string with 62^length possible combinations
def generate_random_key(length=8):
return "".join(random.choices(string.ascii_letters + string.digits, k=length))
def generate_signature_key():
"""
Generate a new random signing key and return the hex-encoded bytestring
"""
signing_key = SigningKey.generate()
return signing_key.encode(encoder=Base64Encoder).decode("utf-8")
def load_signature_key():
"""
Load the signature key from the environment
"""
try:
key = settings.QRCODE_SIGNATURE_PRIVATE_KEY
key_bytes = key.encode("utf-8")
except AttributeError:
print("Missing QRCode signing key")
raise
try:
signing_key = SigningKey(key_bytes, encoder=Base64Encoder)
except TypeError:
print("Faulty QRCode signing key")
raise
return signing_key
def generate_payload(location):
payload = "{short_code}\n{name}\n{address}, {city}".format(
short_code=location.short_code,
name=location.name,
address=location.address,
city=location.city,
)
return payload
def sign_payload(payload):
payload_bytes = payload.encode()
signing_key = load_signature_key()
signed_b64 = signing_key.sign(payload_bytes, encoder=Base64Encoder)
return signed_b64.decode()
def generate_qrcode(url):
qrcode = segno.make_qr(url)
buffer = io.BytesIO()
qrcode.save(buffer, kind="svg", xmldecl=False, scale=5, omitsize=True)
return buffer.getvalue().decode()
def get_signed_qrcode(location):
# Create payload
payload = generate_payload(location)
# Sign payload
signed = sign_payload(payload)
# Build URL
url_prefix = "https://alpha.canada.ca/covid-alert.html#"
url = url_prefix + str(signed)
qrcode = generate_qrcode(url)
return qrcode
def get_pdf_poster(location, lang="en"):
# Generate the qr code
qr_code = get_signed_qrcode(location)
poster_template = "register/posters/{lang}.svg".format(lang=lang)
address_details = "{city}, {province} {postal_code}".format(
city=location.city,
province=location.province,
postal_code=location.postal_code,
)
# Render the qr code and address details into the svg template
rendered = render_to_string(
poster_template,
{
"qr_code": qr_code,
"name": location.name,
"address": location.address,
"address_details": address_details,
},
)
buffer = io.BytesIO()
# Convert the rendered SVG to PDF
cairosvg.svg2pdf(
bytestring=rendered.encode("UTF-8"),
write_to=buffer,
output_width=815,
)
# Get instructions PDF
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
instructions = os.path.join(
BASE_DIR,
"register/templates/register/posters/instructions-{lang}.pdf".format(lang=lang),
)
pdf_instructions = PyPDF2.PdfFileReader(instructions)
# Merge the pdfs
mergeFile = PyPDF2.PdfFileMerger()
mergeFile.append(pdf_instructions)
mergeFile.append(buffer)
# Write it back to the puffer
mergeFile.write(buffer)
buffer.seek(0)
return buffer
def get_encoded_poster(location, lang="en"):
poster = get_pdf_poster(location, lang)
poster_str = poster.read()
# Base64-encode the poster for attaching
poster_encoded = base64.b64encode(poster_str).decode()
return poster_encoded
| 25.821918 | 88 | 0.687268 | [
"MIT"
] | cds-snc/covid-alert-portal | register/utils.py | 3,770 | Python |
from pfrl.wrappers.cast_observation import CastObservation # NOQA
from pfrl.wrappers.cast_observation import CastObservationToFloat32 # NOQA
from pfrl.wrappers.continuing_time_limit import ContinuingTimeLimit # NOQA
from pfrl.wrappers.monitor import Monitor # NOQA
from pfrl.wrappers.normalize_action_space import NormalizeActionSpace # NOQA
from pfrl.wrappers.randomize_action import RandomizeAction # NOQA
from pfrl.wrappers.render import Render # NOQA
from pfrl.wrappers.scale_reward import ScaleReward # NOQA
from pfrl.wrappers.vector_frame_stack import VectorFrameStack # NOQA
| 35.176471 | 77 | 0.837793 | [
"MIT"
] | TMats/pfrl | pfrl/wrappers/__init__.py | 598 | Python |
"""
Test functions for models.formula
"""
import string
import numpy as np
import numpy.random as R
import numpy.linalg as L
from numpy.testing import assert_almost_equal, assert_equal, assert_, \
assert_raises
from statsmodels.sandbox import formula #, contrast #, utils
from statsmodels.sandbox import contrast_old as contrast
class TestTerm(object):
def test_init(self):
t1 = formula.Term("trivial")
sqr = lambda x: x*x
t2 = formula.Term("not_so_trivial", sqr, "sqr")
assert_raises(ValueError, formula.Term, "name", termname=0)
def test_str(self):
t = formula.Term("name")
s = str(t)
def test_add(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 + t2
assert_(isinstance(f, formula.Formula))
assert_(f.hasterm(t1))
assert_(f.hasterm(t2))
def test_mul(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 * t2
assert_(isinstance(f, formula.Formula))
intercept = formula.Term("intercept")
f = t1 * intercept
assert_equal(str(f), str(formula.Formula(t1)))
f = intercept * t1
assert_equal(str(f), str(formula.Formula(t1)))
class TestFormula(object):
def setup(self):
self.X = R.standard_normal((40,10))
self.namespace = {}
self.terms = []
for i in range(10):
name = '%s' % string.ascii_uppercase[i]
self.namespace[name] = self.X[:,i]
self.terms.append(formula.Term(name))
self.formula = self.terms[0]
for i in range(1, 10):
self.formula += self.terms[i]
self.formula.namespace = self.namespace
def test_namespace(self):
space1 = {'X':np.arange(50), 'Y':np.arange(50)*2}
space2 = {'X':np.arange(20), 'Y':np.arange(20)*2}
space3 = {'X':np.arange(30), 'Y':np.arange(30)*2}
X = formula.Term('X')
Y = formula.Term('Y')
X.namespace = space1
assert_almost_equal(X(), np.arange(50))
Y.namespace = space2
assert_almost_equal(Y(), np.arange(20)*2)
f = X + Y
f.namespace = space1
assert_equal(f().shape, (2,50))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space2
assert_equal(f().shape, (2,20))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space3
assert_equal(f().shape, (2,30))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
xx = X**2
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * formula.I
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * X
assert_equal(xx.namespace, X.namespace)
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = {'X':np.arange(50), 'Y':np.arange(50)*2}
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = X.namespace
xx = X+Y
assert_equal(xx.namespace, Y.namespace)
def test_termcolumns(self):
t1 = formula.Term("A")
t2 = formula.Term("B")
f = t1 + t2 + t1 * t2
def other(val):
return np.array([3.2*val,4.342*val**2, 5.234*val**3])
q = formula.Quantitative(['other%d' % i for i in range(1,4)], termname='other', func=t1, transform=other)
f += q
q.namespace = f.namespace = self.formula.namespace
a = q()
b = f()
c = f.termcolumns(q)
b = b[c]
assert_almost_equal(a,b)
def test_str(self):
s = str(self.formula)
def test_call(self):
x = self.formula()
assert_equal(np.array(x).shape, (10, 40))
def test_design(self):
x = self.formula.design()
assert_equal(x.shape, (40, 10))
def test_product(self):
prod = self.formula['A'] * self.formula['C']
f = self.formula + prod
f.namespace = self.namespace
x = f.design()
p = f['A*C']
p.namespace = self.namespace
col = f.termcolumns(prod, dict=False)
assert_almost_equal(np.squeeze(x[:,col]), self.X[:,0] * self.X[:,2])
assert_almost_equal(np.squeeze(p()), self.X[:,0] * self.X[:,2])
def test_intercept1(self):
prod = self.terms[0] * self.terms[2]
f = self.formula + formula.I
icol = f.names().index('intercept')
f.namespace = self.namespace
assert_almost_equal(f()[icol], np.ones((40,)))
def test_intercept3(self):
t = self.formula['A']
t.namespace = self.namespace
prod = t * formula.I
prod.namespace = self.formula.namespace
assert_almost_equal(np.squeeze(prod()), t())
def test_contrast1(self):
term = self.terms[0] + self.terms[2]
c = contrast.Contrast(term, self.formula)
col1 = self.formula.termcolumns(self.terms[0], dict=False)
col2 = self.formula.termcolumns(self.terms[1], dict=False)
test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]
assert_almost_equal(c.matrix, test)
def test_contrast2(self):
dummy = formula.Term('zero')
self.namespace['zero'] = np.zeros((40,), np.float64)
term = dummy + self.terms[2]
c = contrast.Contrast(term, self.formula)
test = [0]*2 + [1] + [0]*7
assert_almost_equal(c.matrix, test)
def test_contrast3(self):
X = self.formula.design()
P = np.dot(X, L.pinv(X))
dummy = formula.Term('noise')
resid = np.identity(40) - P
self.namespace['noise'] = np.transpose(np.dot(resid, R.standard_normal((40,5))))
terms = dummy + self.terms[2]
terms.namespace = self.formula.namespace
c = contrast.Contrast(terms, self.formula)
assert_equal(c.matrix.shape, (10,))
def test_power(self):
t = self.terms[2]
t2 = t**2
t.namespace = t2.namespace = self.formula.namespace
assert_almost_equal(t()**2, t2())
def test_quantitative(self):
t = self.terms[2]
sint = formula.Quantitative('t', func=t, transform=np.sin)
t.namespace = sint.namespace = self.formula.namespace
assert_almost_equal(np.sin(t()), sint())
def test_factor1(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(list(fac.values()), f)
def test_factor2(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac().shape, (3,30))
def test_factor3(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=1)
m.namespace = fac.namespace
assert_equal(m().shape, (2,30))
def test_factor4(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=2)
m.namespace = fac.namespace
r = np.array([np.identity(3)]*10)
r.shape = (30,3)
r = r.T
_m = np.array([r[0]-r[2],r[1]-r[2]])
assert_almost_equal(_m, m())
def test_factor5(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac(), [[1,0,0]*3,
[0,1,0]*3,
[0,0,1]*3])
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [0,1,2]*3)
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor2(self):
f = ['b','c', 'a']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [1,2,0]*3)
assert_equal(fac['a'], [0,0,1]*3)
assert_equal(fac['b'], [1,0,0]*3)
assert_equal(fac['c'], [0,1,0]*3)
def test_contrast4(self):
f = self.formula + self.terms[5] + self.terms[5]
f.namespace = self.namespace
estimable = False
c = contrast.Contrast(self.terms[5], f)
assert_equal(estimable, False)
def test_interactions(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'a*b', 'a*c', 'b*c']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3)
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[1,2,3])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[3])
assert_equal(set(f.termnames()), set(['a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
def test_subtract(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
ff = f - f['a*b']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c']))
ff = f - f['a*b'] - f['a*c']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
ff = f - (f['a*b'] + f['a*c'])
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
| 31.639241 | 145 | 0.534407 | [
"BSD-3-Clause"
] | jarvmiller/statsmodels | statsmodels/sandbox/tests/test_formula.py | 9,998 | Python |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from local_file_system import LocalFileSystem
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Request, Response
from test_util import ReadFile
class _RenderServletDelegate(RenderServlet.Delegate):
def CreateServerInstance(self):
return ServerInstance.ForTest(LocalFileSystem.Create())
class RenderServletTest(unittest.TestCase):
def _Render(self, path):
return RenderServlet(Request.ForTest(path),
_RenderServletDelegate()).Get()
def testExtensionAppRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=False),
self._Render('storage.html'))
def testChannelRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=True),
self._Render('stable/extensions/storage.html'))
def testNotFound(self):
def create_404_response(real_path):
real_404 = self._Render(real_path)
self.assertEqual(200, real_404.status)
real_404.status = 404
return real_404
root_404 = create_404_response('404.html')
extensions_404 = create_404_response('extensions/404.html')
apps_404 = create_404_response('apps/404.html')
# Note: would test that root_404 != extensions and apps but it's not
# necessarily true.
self.assertNotEqual(extensions_404, apps_404)
self.assertEqual(root_404, self._Render('not_found.html'))
self.assertEqual(root_404, self._Render('not_found/not_found.html'))
self.assertEqual(extensions_404, self._Render('extensions/not_found.html'))
self.assertEqual(
extensions_404, self._Render('extensions/manifest/not_found.html'))
self.assertEqual(
extensions_404,
self._Render('extensions/manifest/not_found/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/manifest/not_found.html'))
self.assertEqual(
apps_404, self._Render('apps/manifest/not_found/not_found.html'))
def testSampleFile(self):
sample_file = 'extensions/talking_alarm_clock/background.js'
response = self._Render('extensions/examples/%s' % sample_file)
self.assertEqual(200, response.status)
self.assertTrue(response.headers['Content-Type'] in (
'application/javascript; charset=utf-8',
'application/x-javascript; charset=utf-8'))
self.assertEqual(ReadFile('%s/%s' % (EXAMPLES, sample_file)),
response.content.ToString())
def testSampleZip(self):
sample_dir = 'extensions/talking_alarm_clock'
response = self._Render('extensions/examples/%s.zip' % sample_dir)
self.assertEqual(200, response.status)
self.assertEqual('application/zip', response.headers['Content-Type'])
def testStaticFile(self):
static_file = 'css/site.css'
response = self._Render('static/%s' % static_file)
self.assertEqual(200, response.status)
self.assertEqual('text/css; charset=utf-8',
response.headers['Content-Type'])
self.assertEqual(ReadFile('%s/%s' % (STATIC_DOCS, static_file)),
response.content.ToString())
def testHtmlTemplate(self):
html_file = 'extensions/storage.html'
response = self._Render(html_file)
self.assertEqual(200, response.status)
self.assertEqual('text/html; charset=utf-8',
response.headers.get('Content-Type'))
# Can't really test rendering all that well.
self.assertTrue(len(response.content) >
len(ReadFile('%s/%s' % (PUBLIC_TEMPLATES, html_file))))
def testDevelopersGoogleComRedirect(self):
def assert_redirect(request_path):
response = self._Render(request_path)
self.assertEqual(('//developers.google.com/chrome', False),
response.GetRedirect())
assert_redirect('')
assert_redirect('index.html')
def testIndexRedirect(self):
response = self._Render('extensions')
self.assertEqual(('/extensions/index.html', False),
response.GetRedirect())
def testOtherRedirectsJsonRedirect(self):
response = self._Render('apps/webview_tag.html')
self.assertEqual(('/apps/tags/webview.html', False),
response.GetRedirect())
if __name__ == '__main__':
unittest.main()
| 38.198347 | 79 | 0.706621 | [
"BSD-3-Clause"
] | AOSPA/android_external_chromium_org | chrome/common/extensions/docs/server2/render_servlet_test.py | 4,622 | Python |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def LicenseServerSource(vim, *args, **kwargs):
'''Specify a license server reachable via IPv4 network.'''
obj = vim.client.factory.create('{urn:vim25}LicenseServerSource')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'licenseServer' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 30.515152 | 124 | 0.60576 | [
"MIT"
] | Infinidat/pyvisdk | pyvisdk/do/license_server_source.py | 1,007 | Python |
from neuwon.database import Database
import numpy as np
import numba
class GameOfLife:
class _CellBaseClass:
__slots__ = ()
@classmethod
def _add_to_database(cls, database):
cell_data = database.add_class("Cell", cls)
cell_data.add_attribute("coordinates", shape=(2,), dtype=np.int32)
cell_data.add_attribute("alive", False, dtype=np.bool)
cell_data.add_connectivity_matrix("neighbors", "Cell")
return cell_data.get_instance_type()
def __init__(self, shape):
self.db = Database()
self.Cell = self._CellBaseClass._add_to_database(self.db)
self.shape = shape
self.grid = np.empty(self.shape, dtype=object)
for x in range(self.shape[0]):
for y in range(self.shape[1]):
self.grid[x,y] = self.Cell(coordinates=(x,y))
for x in range(self.shape[0]):
for y in range(self.shape[1]):
cell = self.grid[x,y]
neighbors = []
for x_offset in [-1, 0, 1]:
for y_offset in [-1, 0, 1]:
nx = x - x_offset
ny = y - y_offset
if nx < 0: nx = 0
if ny < 0: ny = 0
if nx >= self.shape[0]: nx = self.shape[0] - 1
if ny >= self.shape[1]: ny = self.shape[1] - 1
neighbor = self.grid[nx, ny]
if cell != neighbor:
neighbors.append(neighbor)
cell.neighbors = neighbors
self.db.get("Cell.neighbors").to_csr()
def randomize(self, alive_fraction):
a = self.db.get_data("Cell.alive")
a.fill(False)
a[np.random.uniform(size=a.shape) < alive_fraction] = True
def get_num_alive(self):
return sum(self.db.get_data("Cell.alive"))
def advance(self):
a = self.db.get_data("Cell.alive")
n = self.db.get_data("Cell.neighbors")
# C is the number of living neighbors for each cell.
c = n * np.array(a, dtype=np.int32)
_advance(a, c)
@numba.njit(parallel=True)
def _advance(a, c):
for idx in numba.prange(len(a)):
ci = c[idx]
if a[idx]:
if ci not in range(2, 4):
a[idx] = False
else:
if ci == 3:
a[idx] = True
| 35.720588 | 78 | 0.516674 | [
"MIT"
] | ctrl-z-9000-times/NEUWON | neuwon/database/examples/life/model.py | 2,429 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# ------------------------------------------------------------
# Purpose: Test the parametric functions.
# ------------------------------------------------------------
class TestParametricFunctions(vtk.test.Testing.vtkTest):
def testParametricFunctions(self):
# ------------------------------------------------------------
# Get a texture
# ------------------------------------------------------------
textureReader = vtk.vtkJPEGReader()
textureReader.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
texture = vtk.vtkTexture()
texture.SetInputConnection(textureReader.GetOutputPort())
# ------------------------------------------------------------
# For each parametric surface:
# 1) Create it
# 2) Assign mappers and actors
# 3) Position this object
# 5) Add a label
# ------------------------------------------------------------
# ------------------------------------------------------------
# Create a torus
# ------------------------------------------------------------
torus = vtk.vtkParametricTorus()
torusSource = vtk.vtkParametricFunctionSource()
torusSource.SetParametricFunction(torus)
torusSource.SetScalarModeToPhase()
torusMapper = vtk.vtkPolyDataMapper()
torusMapper.SetInputConnection(torusSource.GetOutputPort())
torusMapper.SetScalarRange(0, 360)
torusActor = vtk.vtkActor()
torusActor.SetMapper(torusMapper)
torusActor.SetPosition(0, 12, 0)
torusTextMapper = vtk.vtkTextMapper()
torusTextMapper.SetInput("Torus")
torusTextMapper.GetTextProperty().SetJustificationToCentered()
torusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
torusTextMapper.GetTextProperty().SetColor(1, 0, 0)
torusTextMapper.GetTextProperty().SetFontSize(14)
torusTextActor = vtk.vtkActor2D()
torusTextActor.SetMapper(torusTextMapper)
torusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
torusTextActor.GetPositionCoordinate().SetValue(0, 9.5, 0)
# ------------------------------------------------------------
# Create a klein bottle
# ------------------------------------------------------------
klein = vtk.vtkParametricKlein()
kleinSource = vtk.vtkParametricFunctionSource()
kleinSource.SetParametricFunction(klein)
kleinSource.SetScalarModeToU0V0()
kleinMapper = vtk.vtkPolyDataMapper()
kleinMapper.SetInputConnection(kleinSource.GetOutputPort())
kleinMapper.SetScalarRange(0, 3)
kleinActor = vtk.vtkActor()
kleinActor.SetMapper(kleinMapper)
kleinActor.SetPosition(8, 10.5, 0)
kleinTextMapper = vtk.vtkTextMapper()
kleinTextMapper.SetInput("Klein")
kleinTextMapper.GetTextProperty().SetJustificationToCentered()
kleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
kleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
kleinTextMapper.GetTextProperty().SetFontSize(14)
kleinTextActor = vtk.vtkActor2D()
kleinTextActor.SetMapper(kleinTextMapper)
kleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
kleinTextActor.GetPositionCoordinate().SetValue(8, 9.5, 0)
# ------------------------------------------------------------
# Create a Figure-8 Klein
# ------------------------------------------------------------
klein2 = vtk.vtkParametricFigure8Klein()
klein2Source = vtk.vtkParametricFunctionSource()
klein2Source.SetParametricFunction(klein2)
klein2Source.GenerateTextureCoordinatesOn()
klein2Mapper = vtk.vtkPolyDataMapper()
klein2Mapper.SetInputConnection(klein2Source.GetOutputPort())
klein2Mapper.SetScalarRange(0, 3)
klein2Actor = vtk.vtkActor()
klein2Actor.SetMapper(klein2Mapper)
klein2Actor.SetPosition(16, 12, 0)
klein2Actor.SetTexture(texture)
fig8KleinTextMapper = vtk.vtkTextMapper()
fig8KleinTextMapper.SetInput("Fig-8.Klein")
fig8KleinTextMapper.GetTextProperty().SetJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
fig8KleinTextMapper.GetTextProperty().SetFontSize(14)
fig8KleinTextActor = vtk.vtkActor2D()
fig8KleinTextActor.SetMapper(fig8KleinTextMapper)
fig8KleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
fig8KleinTextActor.GetPositionCoordinate().SetValue(16, 9.5, 0)
# ------------------------------------------------------------
# Create a mobius strip
# ------------------------------------------------------------
mobius = vtk.vtkParametricMobius()
mobiusSource = vtk.vtkParametricFunctionSource()
mobiusSource.SetParametricFunction(mobius)
mobiusSource.GenerateTextureCoordinatesOn()
mobiusMapper = vtk.vtkPolyDataMapper()
mobiusMapper.SetInputConnection(mobiusSource.GetOutputPort())
mobiusActor = vtk.vtkActor()
mobiusActor.SetMapper(mobiusMapper)
mobiusActor.RotateX(45)
mobiusActor.SetPosition(24, 12, 0)
mobiusActor.SetTexture(texture)
mobiusTextMapper = vtk.vtkTextMapper()
mobiusTextMapper.SetInput("Mobius")
mobiusTextMapper.GetTextProperty().SetJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetColor(1, 0, 0)
mobiusTextMapper.GetTextProperty().SetFontSize(14)
mobiusTextActor = vtk.vtkActor2D()
mobiusTextActor.SetMapper(mobiusTextMapper)
mobiusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
mobiusTextActor.GetPositionCoordinate().SetValue(24, 9.5, 0)
# ------------------------------------------------------------
# Create a super toroid
# ------------------------------------------------------------
toroid = vtk.vtkParametricSuperToroid()
toroid.SetN1(2)
toroid.SetN2(3)
toroidSource = vtk.vtkParametricFunctionSource()
toroidSource.SetParametricFunction(toroid)
toroidSource.SetScalarModeToU()
toroidMapper = vtk.vtkPolyDataMapper()
toroidMapper.SetInputConnection(toroidSource.GetOutputPort())
toroidMapper.SetScalarRange(0, 6.28)
toroidActor = vtk.vtkActor()
toroidActor.SetMapper(toroidMapper)
toroidActor.SetPosition(0, 4, 0)
superToroidTextMapper = vtk.vtkTextMapper()
superToroidTextMapper.SetInput("Super.Toroid")
superToroidTextMapper.GetTextProperty().SetJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superToroidTextMapper.GetTextProperty().SetFontSize(14)
superToroidTextActor = vtk.vtkActor2D()
superToroidTextActor.SetMapper(superToroidTextMapper)
superToroidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superToroidTextActor.GetPositionCoordinate().SetValue(0, 1.5, 0)
# ------------------------------------------------------------
# Create a super ellipsoid
# ------------------------------------------------------------
superEllipsoid = vtk.vtkParametricSuperEllipsoid()
superEllipsoid.SetXRadius(1.25)
superEllipsoid.SetYRadius(1.5)
superEllipsoid.SetZRadius(1.0)
superEllipsoid.SetN1(1.1)
superEllipsoid.SetN2(1.75)
superEllipsoidSource = vtk.vtkParametricFunctionSource()
superEllipsoidSource.SetParametricFunction(superEllipsoid)
superEllipsoidSource.SetScalarModeToV()
superEllipsoidMapper = vtk.vtkPolyDataMapper()
superEllipsoidMapper.SetInputConnection(superEllipsoidSource.GetOutputPort())
superEllipsoidMapper.SetScalarRange(0, 3.14)
superEllipsoidActor = vtk.vtkActor()
superEllipsoidActor.SetMapper(superEllipsoidMapper)
superEllipsoidActor.SetPosition(8, 4, 0)
superEllipsoidTextMapper = vtk.vtkTextMapper()
superEllipsoidTextMapper.SetInput("Super.Ellipsoid")
superEllipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superEllipsoidTextMapper.GetTextProperty().SetFontSize(14)
superEllipsoidTextActor = vtk.vtkActor2D()
superEllipsoidTextActor.SetMapper(superEllipsoidTextMapper)
superEllipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superEllipsoidTextActor.GetPositionCoordinate().SetValue(8, 1.5, 0)
# ------------------------------------------------------------
# Create an open 1D spline
# ------------------------------------------------------------
splinePoints = [
[0.50380158308139134, -0.60679315105396936, -0.37248976406291578],
[-0.4354646054261665, -0.85362339758017258, -0.84844312996065385],
[0.2163147512899315, -0.39797507012168643, -0.76700353518454523],
[0.97158415334838644, -0.58513467367046257, -0.35846037946569753],
[-0.64359767997804918, -0.94620739107309249, -0.90762176546623086],
[-0.39901219094126117, -0.1978931497772658, 0.0098316934936828471],
[-0.75872745167404765, 0.067719714281950116, 0.165237936733867],
[-0.84599731389712418, -0.67685466896596114, 0.10357868909071133],
[0.84702754758625654, -0.0080077177882230677, -0.58571286666473044],
[-0.076150034124101484, 0.14637647622561856, 0.1494359239700418] ]
inputPoints = vtk.vtkPoints()
for i in range(0, 10):
inputPoints.InsertPoint(i, splinePoints[i])
spline = vtk.vtkParametricSpline()
spline.SetPoints(inputPoints)
spline.ClosedOff()
splineSource = vtk.vtkParametricFunctionSource()
splineSource.SetParametricFunction(spline)
splineMapper = vtk.vtkPolyDataMapper()
splineMapper.SetInputConnection(splineSource.GetOutputPort())
splineActor = vtk.vtkActor()
splineActor.SetMapper(splineMapper)
splineActor.SetPosition(16, 4, 0)
splineActor.GetProperty().SetColor(0, 0, 0)
splineTextMapper = vtk.vtkTextMapper()
splineTextMapper.SetInput("Open.Spline")
splineTextMapper.GetTextProperty().SetJustificationToCentered()
splineTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
splineTextMapper.GetTextProperty().SetColor(1, 0, 0)
splineTextMapper.GetTextProperty().SetFontSize(14)
splineTextActor = vtk.vtkActor2D()
splineTextActor.SetMapper(splineTextMapper)
splineTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
splineTextActor.GetPositionCoordinate().SetValue(16, 1.5, 0)
# ------------------------------------------------------------
# Create a closed 1D spline
# ------------------------------------------------------------
spline2 = vtk.vtkParametricSpline()
spline2.SetPoints(inputPoints)
spline2.ClosedOn()
spline2Source = vtk.vtkParametricFunctionSource()
spline2Source.SetParametricFunction(spline2)
spline2Mapper = vtk.vtkPolyDataMapper()
spline2Mapper.SetInputConnection(spline2Source.GetOutputPort())
spline2Actor = vtk.vtkActor()
spline2Actor.SetMapper(spline2Mapper)
spline2Actor.SetPosition(24, 4, 0)
spline2Actor.GetProperty().SetColor(0, 0, 0)
spline2TextMapper = vtk.vtkTextMapper()
spline2TextMapper.SetInput("Closed.Spline")
spline2TextMapper.GetTextProperty().SetJustificationToCentered()
spline2TextMapper.GetTextProperty().SetVerticalJustificationToCentered()
spline2TextMapper.GetTextProperty().SetColor(1, 0, 0)
spline2TextMapper.GetTextProperty().SetFontSize(14)
spline2TextActor = vtk.vtkActor2D()
spline2TextActor.SetMapper(spline2TextMapper)
spline2TextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
spline2TextActor.GetPositionCoordinate().SetValue(24, 1.5, 0)
# ------------------------------------------------------------
# Create a spiral conic
# ------------------------------------------------------------
sconic = vtk.vtkParametricConicSpiral()
sconic.SetA(0.8)
sconic.SetB(2.5)
sconic.SetC(0.4)
sconicSource = vtk.vtkParametricFunctionSource()
sconicSource.SetParametricFunction(sconic)
sconicSource.SetScalarModeToDistance()
sconicMapper = vtk.vtkPolyDataMapper()
sconicMapper.SetInputConnection(sconicSource.GetOutputPort())
sconicActor = vtk.vtkActor()
sconicActor.SetMapper(sconicMapper)
sconicMapper.SetScalarRange(0, 9)
sconicActor.SetPosition(0, -4, 0)
sconicActor.SetScale(1.2, 1.2, 1.2)
sconicTextMapper = vtk.vtkTextMapper()
sconicTextMapper.SetInput("Spiral.Conic")
sconicTextMapper.GetTextProperty().SetJustificationToCentered()
sconicTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
sconicTextMapper.GetTextProperty().SetColor(1, 0, 0)
sconicTextMapper.GetTextProperty().SetFontSize(14)
sconicTextActor = vtk.vtkActor2D()
sconicTextActor.SetMapper(sconicTextMapper)
sconicTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
sconicTextActor.GetPositionCoordinate().SetValue(0, -6.5, 0)
# ------------------------------------------------------------
# Create Boy's surface
# ------------------------------------------------------------
boy = vtk.vtkParametricBoy()
boySource = vtk.vtkParametricFunctionSource()
boySource.SetParametricFunction(boy)
boySource.SetScalarModeToModulus()
boyMapper = vtk.vtkPolyDataMapper()
boyMapper.SetInputConnection(boySource.GetOutputPort())
boyMapper.SetScalarRange(0, 2)
boyActor = vtk.vtkActor()
boyActor.SetMapper(boyMapper)
boyActor.SetPosition(8, -4, 0)
boyActor.SetScale(1.5, 1.5, 1.5)
boyTextMapper = vtk.vtkTextMapper()
boyTextMapper.SetInput("Boy")
boyTextMapper.GetTextProperty().SetJustificationToCentered()
boyTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
boyTextMapper.GetTextProperty().SetColor(1, 0, 0)
boyTextMapper.GetTextProperty().SetFontSize(14)
boyTextActor = vtk.vtkActor2D()
boyTextActor.SetMapper(boyTextMapper)
boyTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
boyTextActor.GetPositionCoordinate().SetValue(8, -6.5, 0)
# ------------------------------------------------------------
# Create a cross cap
# ------------------------------------------------------------
crossCap = vtk.vtkParametricCrossCap()
crossCapSource = vtk.vtkParametricFunctionSource()
crossCapSource.SetParametricFunction(crossCap)
crossCapSource.SetScalarModeToY()
crossCapMapper = vtk.vtkPolyDataMapper()
crossCapMapper.SetInputConnection(crossCapSource.GetOutputPort())
crossCapActor = vtk.vtkActor()
crossCapActor.SetMapper(crossCapMapper)
crossCapActor.RotateX(65)
crossCapActor.SetPosition(16, -4, 0)
crossCapActor.SetScale(1.5, 1.5, 1.5)
crossCapTextMapper = vtk.vtkTextMapper()
crossCapTextMapper.SetInput("Cross.Cap")
crossCapTextMapper.GetTextProperty().SetJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetColor(1, 0, 0)
crossCapTextMapper.GetTextProperty().SetFontSize(14)
crossCapTextActor = vtk.vtkActor2D()
crossCapTextActor.SetMapper(crossCapTextMapper)
crossCapTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
crossCapTextActor.GetPositionCoordinate().SetValue(16, -6.5, 0)
# ------------------------------------------------------------
# Create Dini's surface
# ------------------------------------------------------------
dini = vtk.vtkParametricDini()
diniSource = vtk.vtkParametricFunctionSource()
diniSource.SetScalarModeToDistance()
diniSource.SetParametricFunction(dini)
diniMapper = vtk.vtkPolyDataMapper()
diniMapper.SetInputConnection(diniSource.GetOutputPort())
diniActor = vtk.vtkActor()
diniActor.SetMapper(diniMapper)
diniActor.RotateX(-90)
diniActor.SetPosition(24, -3, 0)
diniActor.SetScale(1.5, 1.5, 0.5)
diniTextMapper = vtk.vtkTextMapper()
diniTextMapper.SetInput("Dini")
diniTextMapper.GetTextProperty().SetJustificationToCentered()
diniTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
diniTextMapper.GetTextProperty().SetColor(1, 0, 0)
diniTextMapper.GetTextProperty().SetFontSize(14)
diniTextActor = vtk.vtkActor2D()
diniTextActor.SetMapper(diniTextMapper)
diniTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
diniTextActor.GetPositionCoordinate().SetValue(24, -6.5, 0)
# ------------------------------------------------------------
# Create Enneper's surface
# ------------------------------------------------------------
enneper = vtk.vtkParametricEnneper()
enneperSource = vtk.vtkParametricFunctionSource()
enneperSource.SetParametricFunction(enneper)
enneperSource.SetScalarModeToQuadrant()
enneperMapper = vtk.vtkPolyDataMapper()
enneperMapper.SetInputConnection(enneperSource.GetOutputPort())
enneperMapper.SetScalarRange(1, 4)
enneperActor = vtk.vtkActor()
enneperActor.SetMapper(enneperMapper)
enneperActor.SetPosition(0, -12, 0)
enneperActor.SetScale(0.25, 0.25, 0.25)
enneperTextMapper = vtk.vtkTextMapper()
enneperTextMapper.SetInput("Enneper")
enneperTextMapper.GetTextProperty().SetJustificationToCentered()
enneperTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
enneperTextMapper.GetTextProperty().SetColor(1, 0, 0)
enneperTextMapper.GetTextProperty().SetFontSize(14)
enneperTextActor = vtk.vtkActor2D()
enneperTextActor.SetMapper(enneperTextMapper)
enneperTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
enneperTextActor.GetPositionCoordinate().SetValue(0, -14.5, 0)
# ------------------------------------------------------------
# Create an ellipsoidal surface
# ------------------------------------------------------------
ellipsoid = vtk.vtkParametricEllipsoid()
ellipsoid.SetXRadius(1)
ellipsoid.SetYRadius(0.75)
ellipsoid.SetZRadius(0.5)
ellipsoidSource = vtk.vtkParametricFunctionSource()
ellipsoidSource.SetParametricFunction(ellipsoid)
ellipsoidSource.SetScalarModeToZ()
ellipsoidMapper = vtk.vtkPolyDataMapper()
ellipsoidMapper.SetInputConnection(ellipsoidSource.GetOutputPort())
ellipsoidMapper.SetScalarRange(-0.5, 0.5)
ellipsoidActor = vtk.vtkActor()
ellipsoidActor.SetMapper(ellipsoidMapper)
ellipsoidActor.SetPosition(8, -12, 0)
ellipsoidActor.SetScale(1.5, 1.5, 1.5)
ellipsoidTextMapper = vtk.vtkTextMapper()
ellipsoidTextMapper.SetInput("Ellipsoid")
ellipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
ellipsoidTextMapper.GetTextProperty().SetFontSize(14)
ellipsoidTextActor = vtk.vtkActor2D()
ellipsoidTextActor.SetMapper(ellipsoidTextMapper)
ellipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
ellipsoidTextActor.GetPositionCoordinate().SetValue(8, -14.5, 0)
# ------------------------------------------------------------
# Create an surface with random hills on it.
# Note that for testing, we will disable the
# random generation of the surfaces. This is
# because random number generators do not
# return the same result on different operating
# systems.
# ------------------------------------------------------------
randomHills = vtk.vtkParametricRandomHills()
randomHills.AllowRandomGenerationOff()
randomHills.GenerateTheHills()
randomHillsSource = vtk.vtkParametricFunctionSource()
randomHillsSource.SetParametricFunction(randomHills)
randomHillsSource.GenerateTextureCoordinatesOn()
randomHillsMapper = vtk.vtkPolyDataMapper()
randomHillsMapper.SetInputConnection(randomHillsSource.GetOutputPort())
randomHillsActor = vtk.vtkActor()
randomHillsActor.SetMapper(randomHillsMapper)
randomHillsActor.SetPosition(16, -14, 0)
randomHillsActor.SetScale(0.2, 0.2, 0.2)
randomHillsActor.SetTexture(texture)
randomHillsTextMapper = vtk.vtkTextMapper()
randomHillsTextMapper.SetInput("Random.Hills")
randomHillsTextMapper.GetTextProperty().SetJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetColor(1, 0, 0)
randomHillsTextMapper.GetTextProperty().SetFontSize(14)
randomHillsTextActor = vtk.vtkActor2D()
randomHillsTextActor.SetMapper(randomHillsTextMapper)
randomHillsTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
randomHillsTextActor.GetPositionCoordinate().SetValue(16, -14.5, 0)
# ------------------------------------------------------------
# Create an Steiner's Roman Surface.
# ------------------------------------------------------------
roman = vtk.vtkParametricRoman()
roman.SetRadius(1.5)
romanSource = vtk.vtkParametricFunctionSource()
romanSource.SetParametricFunction(roman)
romanSource.SetScalarModeToX()
romanMapper = vtk.vtkPolyDataMapper()
romanMapper.SetInputConnection(romanSource.GetOutputPort())
romanActor = vtk.vtkActor()
romanActor.SetMapper(romanMapper)
romanActor.SetPosition(24, -12, 0)
romanTextMapper = vtk.vtkTextMapper()
romanTextMapper.SetInput("Roman")
romanTextMapper.GetTextProperty().SetJustificationToCentered()
romanTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
romanTextMapper.GetTextProperty().SetColor(1, 0, 0)
romanTextMapper.GetTextProperty().SetFontSize(14)
romanTextActor = vtk.vtkActor2D()
romanTextActor.SetMapper(romanTextMapper)
romanTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
romanTextActor.GetPositionCoordinate().SetValue(24, -14.5, 0)
# ------------------------------------------------------------
# Create the RenderWindow, Renderer and both Actors
# ------------------------------------------------------------
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# add actors
ren.AddViewProp(torusActor)
ren.AddViewProp(kleinActor)
ren.AddViewProp(klein2Actor)
ren.AddViewProp(toroidActor)
ren.AddViewProp(superEllipsoidActor)
ren.AddViewProp(mobiusActor)
ren.AddViewProp(splineActor)
ren.AddViewProp(spline2Actor)
ren.AddViewProp(sconicActor)
ren.AddViewProp(boyActor)
ren.AddViewProp(crossCapActor)
ren.AddViewProp(diniActor)
ren.AddViewProp(enneperActor)
ren.AddViewProp(ellipsoidActor)
ren.AddViewProp(randomHillsActor)
ren.AddViewProp(romanActor)
#add text actors
ren.AddViewProp(torusTextActor)
ren.AddViewProp(kleinTextActor)
ren.AddViewProp(fig8KleinTextActor)
ren.AddViewProp(mobiusTextActor)
ren.AddViewProp(superToroidTextActor)
ren.AddViewProp(superEllipsoidTextActor)
ren.AddViewProp(splineTextActor)
ren.AddViewProp(spline2TextActor)
ren.AddViewProp(sconicTextActor)
ren.AddViewProp(boyTextActor)
ren.AddViewProp(crossCapTextActor)
ren.AddViewProp(diniTextActor)
ren.AddViewProp(enneperTextActor)
ren.AddViewProp(ellipsoidTextActor)
ren.AddViewProp(randomHillsTextActor)
ren.AddViewProp(romanTextActor)
ren.SetBackground(0.7, 0.8, 1)
renWin.SetSize(500, 500)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.3)
iren.Initialize()
renWin.Render()
img_file = "TestParametricFunctions.png"
# NOTE: this test has a companion .tcl test. The threshold set
# here should be the same as the threshold in the .tcl
# test. Both tests should produce exactly the same results.
vtk.test.Testing.compareImage(iren.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=10)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestParametricFunctions, 'test')])
| 46.221831 | 119 | 0.634418 | [
"BSD-3-Clause"
] | Starlink/vtk | Common/ComputationalGeometry/Testing/Python/TestParametricFunctions.py | 26,254 | Python |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
| 42.36669 | 79 | 0.585278 | [
"Apache-2.0"
] | eniltonj/PyKMIP | kmip/pie/client.py | 59,271 | Python |
FRANCHISES = """
select t1.aliases, overall, firsts, seconds, third, y1,y2, unique_a, unique_1, unique_12
from
(select Count(A."PlayerID") as overall,T."Aliases" as aliases, MAX(A."year") as y1, MIN(A."year") as y2, Count (distinct A."PlayerID") as unique_a
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases")
group by T."Aliases"
order by T."Aliases"
) as t1
join
(
select Count(A."PlayerID") as firsts,T."Aliases" as aliases, Count (distinct A."PlayerID") as unique_1
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=1
group by T."Aliases"
order by T."Aliases"
) as t2 on t1.aliases=t2.aliases
join
(
select Count(A."PlayerID") as seconds,T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=2
group by T."Aliases"
order by T."Aliases"
) as t3 on t1.aliases=t3.aliases
join
(
select Count(A."PlayerID") as third,T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=3
group by T."Aliases"
order by T."Aliases"
) as t4 on t1.aliases=t4.aliases
join
(
select Count (distinct A."PlayerID") as unique_12, T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"in(1,2)
group by T."Aliases"
order by T."Aliases"
) as t5 on t1.aliases=t5.aliases
""" | 34.904762 | 147 | 0.697817 | [
"MIT"
] | Voldy87/all-nba-team | all_nba_team/api/hardcoded_queries.py | 1,466 | Python |
# terrascript/kind/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class kind_cluster(terrascript.Resource):
pass
| 18.642857 | 79 | 0.770115 | [
"BSD-2-Clause"
] | mjuenema/python-terrascript | terrascript/kind/r.py | 261 | Python |
from __future__ import absolute_import
import json
import datetime
import os
import os.path
import sys
import traceback
from distutils import log
from .base import BaseBuildCommand
class BuildAssetsCommand(BaseBuildCommand):
user_options = BaseBuildCommand.user_options + [
(
"asset-json-path=",
None,
"Relative path for JSON manifest. Defaults to {dist_name}/assets.json",
),
(
"inplace",
"i",
"ignore build-lib and put compiled javascript files into the source "
+ "directory alongside your pure Python modules",
),
(
"force",
"f",
"Force rebuilding of static content. Defaults to rebuilding on version "
"change detection.",
),
]
description = "build static media assets"
def initialize_options(self):
self.asset_json_path = u"{}/assets.json".format(self.distribution.get_name())
BaseBuildCommand.initialize_options(self)
def get_dist_paths(self):
return ["src/sentry/static/sentry/dist"]
def get_manifest_additions(self):
return ("src/" + self.asset_json_path,)
def _get_package_version(self):
"""
Attempt to get the most correct current version of Sentry.
"""
pkg_path = os.path.join(self.work_path, "src")
sys.path.insert(0, pkg_path)
try:
import sentry
except Exception:
version = None
build = None
else:
log.info(u"pulled version information from 'sentry' module".format(sentry.__file__))
version = self.distribution.get_version()
build = sentry.__build__
finally:
sys.path.pop(0)
if not (version and build):
json_path = self.get_asset_json_path()
try:
with open(json_path) as fp:
data = json.loads(fp.read())
except Exception:
pass
else:
log.info(u"pulled version information from '{}'".format(json_path))
version, build = data["version"], data["build"]
return {"version": version, "build": build}
def _needs_static(self, version_info):
json_path = self.get_asset_json_path()
if not os.path.exists(json_path):
return True
with open(json_path) as fp:
data = json.load(fp)
if data.get("version") != version_info.get("version"):
return True
if data.get("build") != version_info.get("build"):
return True
return False
def _needs_built(self):
if BaseBuildCommand._needs_built(self):
return True
version_info = self._get_package_version()
return self._needs_static(version_info)
def _build(self):
version_info = self._get_package_version()
log.info(
u"building assets for {} v{} (build {})".format(
self.distribution.get_name(),
version_info["version"] or "UNKNOWN",
version_info["build"] or "UNKNOWN",
)
)
if not version_info["version"] or not version_info["build"]:
log.fatal("Could not determine sentry version or build")
sys.exit(1)
try:
self._build_static()
except Exception:
traceback.print_exc()
log.fatal("unable to build Sentry's static assets!")
sys.exit(1)
log.info("writing version manifest")
manifest = self._write_version_file(version_info)
log.info(u"recorded manifest\n{}".format(json.dumps(manifest, indent=2)))
def _build_static(self):
# By setting NODE_ENV=production, a few things happen
# * React optimizes out certain code paths
# * Webpack will add version strings to built/referenced assets
env = dict(os.environ)
env["SENTRY_STATIC_DIST_PATH"] = self.sentry_static_dist_path
env["NODE_ENV"] = "production"
self._run_yarn_command(["webpack", "--bail"], env=env)
def _write_version_file(self, version_info):
manifest = {
"createdAt": datetime.datetime.utcnow().isoformat() + "Z",
"version": version_info["version"],
"build": version_info["build"],
}
with open(self.get_asset_json_path(), "w") as fp:
json.dump(manifest, fp)
return manifest
@property
def sentry_static_dist_path(self):
return os.path.abspath(os.path.join(self.build_lib, "sentry/static/sentry/dist"))
def get_asset_json_path(self):
return os.path.abspath(os.path.join(self.build_lib, self.asset_json_path))
| 32.445946 | 96 | 0.591628 | [
"BSD-3-Clause"
] | CSP197/sentry | src/sentry/utils/distutils/commands/build_assets.py | 4,802 | Python |
# Generated by Django 3.1.3 on 2021-02-16 11:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("imagedeck", "0009_auto_20201122_2300"),
("dcodex", "0034_auto_20201215_0315"),
]
operations = [
migrations.AlterField(
model_name="manuscript",
name="imagedeck",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.SET_DEFAULT,
to="imagedeck.deckbase",
),
),
]
| 24.703704 | 64 | 0.562219 | [
"Apache-2.0"
] | rbturnbull/dcodex | dcodex/migrations/0035_auto_20210216_0331.py | 667 | Python |
#!/usr/bin/env python3 -u
import sys
for value in sys.stdin:
sys.stderr.write(f"consumed {value}\n")
| 13.5 | 43 | 0.675926 | [
"BSD-3-Clause"
] | Betta-Lyon-Delsordo/missing-course | topics/05-data-wrangling/consume.py | 108 | Python |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six import iteritems, string_types
from numbers import Number
def config(parameters, exclude=None):
exclude = exclude or []
if not isinstance(parameters, dict):
raise AnsibleFilterError('php_config expects a dict but was given a %s' % type(parameters))
[parameters.pop(key, None) for key in exclude]
result = ''
for key in sorted(parameters):
parameter = config_parameter(parameters, key)
if parameter:
result += '\n%s' % parameter
return result.lstrip()
def config_parameter(parameters, key, required=False, comment=False, **kwargs):
if not isinstance(parameters, dict):
raise AnsibleFilterError('php_config_parameter parameters expects a dict but was given a %s' % type(parameters))
if not isinstance(key, string_types):
raise AnsibleFilterError('php_config_parameter key expects a string but was given a %s' % type(key))
if key in parameters:
value = parameters.get(key)
else:
if required:
raise AnsibleFilterError('php_config_parameter requires a value for key %s' % key)
if isinstance(comment, string_types):
return comment
if 'default' not in kwargs:
raise AnsibleFilterError('php_config_parameter missing a default value for key %s' % key)
value = kwargs.get('default')
if value is True:
result = '%s = On' % key
elif value is False:
result = '%s = Off' % key
elif isinstance(value, (string_types, Number)):
result = '%s = %s' % (key, value)
else:
raise AnsibleFilterError('php_config_parameter value of an unknown type %s' % type(value))
if key not in parameters and comment:
result = ';' + result.replace('\n', '\n;')
return result
class FilterModule(object):
''' Manala php config jinja2 filters '''
def filters(self):
filters = {
'php_config': config,
'php_config_parameter': config_parameter,
}
return filters
| 33.569231 | 120 | 0.660862 | [
"MIT"
] | manala/ansible-roles | plugins/filter/php_config.py | 2,182 | Python |
import pytest
from contoso import get_company_name, get_company_address
def test_get_company_name():
assert get_company_name() == "Contoso"
def test_get_company_address():
assert get_company_address() == "Contosostrasse 1, Zurich, Switzerland" | 28.222222 | 75 | 0.791339 | [
"MIT"
] | fbeltrao/az-func-gh-deployment | src/packages/tests/test_company_details.py | 254 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green ([email protected]), #
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains unit test for the converter module.
"""
import unittest
from rmgpy.molecule.converter import debug_rdkit_mol, to_rdkit_mol, from_rdkit_mol, to_ob_mol, from_ob_mol
from rmgpy.molecule.molecule import Molecule
class RDKitTest(unittest.TestCase):
def test_debugger(self):
"""Test the debug_rdkit_mol(rdmol) function doesn't crash
We can't really test it in the unit testing framework, because
that already captures and redirects standard output, and that
conflicts with the function, but this checks it doesn't crash.
"""
import rdkit.Chem
import logging
rdmol = rdkit.Chem.MolFromSmiles('CCC')
message = debug_rdkit_mol(rdmol, level=logging.INFO)
self.assertIsNotNone(message)
def test_lone_pair_retention(self):
"""Test that we don't lose any lone pairs on round trip RDKit conversion."""
mol = Molecule().from_adjacency_list("""
1 C u0 p0 c0 {2,D} {3,S} {4,S}
2 O u0 p2 c0 {1,D}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
""")
rdmol = to_rdkit_mol(mol)
mol2 = from_rdkit_mol(Molecule(), rdmol)
self.assertTrue(mol.is_isomorphic(mol2))
def test_atom_mapping_1(self):
"""Test that to_rdkit_mol returns correct indices and atom mappings."""
bond_order_dict = {'SINGLE': 1, 'DOUBLE': 2, 'TRIPLE': 3, 'AROMATIC': 1.5}
mol = Molecule().from_smiles('C1CCC=C1C=O')
rdkitmol, rd_atom_indices = to_rdkit_mol(mol, remove_h=False, return_mapping=True)
for atom in mol.atoms:
# Check that all atoms are found in mapping
self.assertTrue(atom in rd_atom_indices)
# Check that all bonds are in rdkitmol with correct mapping and order
for connected_atom, bond in atom.bonds.items():
bond_type = str(rdkitmol.GetBondBetweenAtoms(rd_atom_indices[atom],
rd_atom_indices[connected_atom]).GetBondType())
rdkit_bond_order = bond_order_dict[bond_type]
self.assertEqual(bond.order, rdkit_bond_order)
# Test for remove_h = True
rdkitmol2, rd_atom_indices2 = to_rdkit_mol(mol, remove_h=True, return_mapping=True)
for atom in mol.atoms:
# Check that all non-hydrogen atoms are found in mapping
if atom.symbol != 'H':
self.assertTrue(atom in rd_atom_indices2)
# Check that all bonds connected to non-hydrogen have the correct mapping and order
for connected_atom, bond in atom.bonds.items():
if connected_atom.symbol != 'H':
bond_type = str(rdkitmol2.GetBondBetweenAtoms(rd_atom_indices2[atom],
rd_atom_indices2[connected_atom]).GetBondType())
rdkit_bond_order = bond_order_dict[bond_type]
self.assertEqual(bond.order, rdkit_bond_order)
def test_atom_mapping_2(self):
"""Test that to_rdkit_mol returns correct indices and atom mappings when hydrogens are removed."""
adjlist = """
1 H u0 p0 c0 {2,S}
2 C u0 p0 c0 {1,S} {3,S} {4,S} {5,S}
3 H u0 p0 c0 {2,S}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {2,S} {6,S}
6 H u0 p0 c0 {5,S}
"""
mol = Molecule().from_adjacency_list(adjlist)
rdkitmol, rd_atom_indices = to_rdkit_mol(mol, remove_h=True, return_mapping=True)
heavy_atoms = [at for at in mol.atoms if at.number != 1]
for at1 in heavy_atoms:
for at2 in heavy_atoms:
if mol.has_bond(at1, at2):
try:
rdkitmol.GetBondBetweenAtoms(rd_atom_indices[at1], rd_atom_indices[at2])
except RuntimeError:
self.fail("RDKit failed in finding the bond in the original atom!")
class ConverterTest(unittest.TestCase):
def setUp(self):
"""Function run before each test in this class."""
self.test_mols = [
Molecule().from_smiles('C'),
Molecule().from_smiles('O'),
Molecule().from_smiles('N'),
Molecule().from_smiles('S'),
Molecule().from_smiles('[CH2]C'),
Molecule().from_smiles('[CH]C'),
Molecule().from_smiles('C=CC=C'),
Molecule().from_smiles('C#C[CH2]'),
Molecule().from_smiles('c1ccccc1'),
Molecule().from_smiles('[13CH3]C'),
Molecule().from_smiles('O=CCO').generate_h_bonded_structures()[0],
]
self.test_Hbond_free_mol = Molecule().from_smiles('O=CCO')
def test_rdkit_round_trip(self):
"""Test conversion to and from RDKitMol"""
for mol in self.test_mols:
rdkit_mol = to_rdkit_mol(mol)
new_mol = from_rdkit_mol(Molecule(), rdkit_mol)
self.assertTrue(mol.is_isomorphic(new_mol) or self.test_Hbond_free_mol.is_isomorphic(new_mol))
self.assertEqual(mol.get_element_count(), new_mol.get_element_count())
def test_ob_round_trip(self):
"""Test conversion to and from OBMol"""
for mol in self.test_mols:
ob_mol = to_ob_mol(mol)
new_mol = from_ob_mol(Molecule(), ob_mol)
self.assertTrue(mol.is_isomorphic(new_mol) or self.test_Hbond_free_mol.is_isomorphic(new_mol))
self.assertEqual(mol.get_element_count(), new_mol.get_element_count())
| 48.782051 | 118 | 0.574376 | [
"MIT"
] | MingyuWAN/RMG-Py | rmgpy/molecule/converterTest.py | 7,610 | Python |
"""
Lowest level connection
"""
from __future__ import division
import logging
import math
import random
import time
import uuid
import warnings
from base64 import b64decode
from threading import local
import six
from botocore.client import ClientError
from botocore.exceptions import BotoCoreError
from botocore.session import get_session
from botocore.vendored import requests
from botocore.vendored.requests import Request
from six.moves import range
from pynamodb.compat import NullHandler
from pynamodb.connection.util import pythonic
from pynamodb.constants import (
RETURN_CONSUMED_CAPACITY_VALUES, RETURN_ITEM_COLL_METRICS_VALUES, COMPARISON_OPERATOR_VALUES,
RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY, RETURN_VALUES_VALUES, ATTR_UPDATE_ACTIONS,
COMPARISON_OPERATOR, EXCLUSIVE_START_KEY, SCAN_INDEX_FORWARD, SCAN_FILTER_VALUES, ATTR_DEFINITIONS,
BATCH_WRITE_ITEM, CONSISTENT_READ, ATTR_VALUE_LIST, DESCRIBE_TABLE, KEY_CONDITION_EXPRESSION,
BATCH_GET_ITEM, DELETE_REQUEST, SELECT_VALUES, RETURN_VALUES, REQUEST_ITEMS, ATTR_UPDATES,
PROJECTION_EXPRESSION, SERVICE_NAME, DELETE_ITEM, PUT_REQUEST, UPDATE_ITEM, SCAN_FILTER, TABLE_NAME,
INDEX_NAME, KEY_SCHEMA, ATTR_NAME, ATTR_TYPE, TABLE_KEY, EXPECTED, KEY_TYPE, GET_ITEM, UPDATE,
PUT_ITEM, SELECT, ACTION, EXISTS, VALUE, LIMIT, QUERY, SCAN, ITEM, LOCAL_SECONDARY_INDEXES,
KEYS, KEY, EQ, SEGMENT, TOTAL_SEGMENTS, CREATE_TABLE, PROVISIONED_THROUGHPUT, READ_CAPACITY_UNITS,
WRITE_CAPACITY_UNITS, GLOBAL_SECONDARY_INDEXES, PROJECTION, EXCLUSIVE_START_TABLE_NAME, TOTAL,
DELETE_TABLE, UPDATE_TABLE, LIST_TABLES, GLOBAL_SECONDARY_INDEX_UPDATES, ATTRIBUTES,
CONSUMED_CAPACITY, CAPACITY_UNITS, QUERY_FILTER, QUERY_FILTER_VALUES, CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS, NULL, NOT_NULL, SHORT_ATTR_TYPES, DELETE, PUT,
ITEMS, DEFAULT_ENCODING, BINARY_SHORT, BINARY_SET_SHORT, LAST_EVALUATED_KEY, RESPONSES, UNPROCESSED_KEYS,
UNPROCESSED_ITEMS, STREAM_SPECIFICATION, STREAM_VIEW_TYPE, STREAM_ENABLED, UPDATE_EXPRESSION,
EXPRESSION_ATTRIBUTE_NAMES, EXPRESSION_ATTRIBUTE_VALUES, KEY_CONDITION_OPERATOR_MAP,
CONDITION_EXPRESSION, FILTER_EXPRESSION, FILTER_EXPRESSION_OPERATOR_MAP, NOT_CONTAINS, AND)
from pynamodb.exceptions import (
TableError, QueryError, PutError, DeleteError, UpdateError, GetError, ScanError, TableDoesNotExist,
VerboseClientError
)
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.operand import Path
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Update
from pynamodb.settings import get_settings_value
from pynamodb.signals import pre_dynamodb_send, post_dynamodb_send
from pynamodb.types import HASH, RANGE
BOTOCORE_EXCEPTIONS = (BotoCoreError, ClientError)
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class MetaTable(object):
"""
A pythonic wrapper around table metadata
"""
def __init__(self, data):
self.data = data or {}
self._range_keyname = None
self._hash_keyname = None
def __repr__(self):
if self.data:
return six.u("MetaTable<{0}>".format(self.data.get(TABLE_NAME)))
@property
def range_keyname(self):
"""
Returns the name of this table's range key
"""
if self._range_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == RANGE:
self._range_keyname = attr.get(ATTR_NAME)
return self._range_keyname
@property
def hash_keyname(self):
"""
Returns the name of this table's hash key
"""
if self._hash_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == HASH:
self._hash_keyname = attr.get(ATTR_NAME)
break
return self._hash_keyname
def get_key_names(self, index_name=None):
"""
Returns the names of the primary key attributes and index key attributes (if index_name is specified)
"""
key_names = [self.hash_keyname]
if self.range_keyname:
key_names.append(self.range_keyname)
if index_name is not None:
index_hash_keyname = self.get_index_hash_keyname(index_name)
if index_hash_keyname not in key_names:
key_names.append(index_hash_keyname)
index_range_keyname = self.get_index_range_keyname(index_name)
if index_range_keyname is not None and index_range_keyname not in key_names:
key_names.append(index_range_keyname)
return key_names
def get_index_hash_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == HASH:
return schema_key.get(ATTR_NAME)
def get_index_range_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == RANGE:
return schema_key.get(ATTR_NAME)
return None
def get_item_attribute_map(self, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
if pythonic_key:
item_key = item_key
attr_map = {
item_key: {}
}
for key, value in attributes.items():
# In this case, the user provided a mapping
# {'key': {'S': 'value'}}
if isinstance(value, dict):
attr_map[item_key][key] = value
else:
attr_map[item_key][key] = {
self.get_attribute_type(key): value
}
return attr_map
def get_attribute_type(self, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
"""
for attr in self.data.get(ATTR_DEFINITIONS):
if attr.get(ATTR_NAME) == attribute_name:
return attr.get(ATTR_TYPE)
if value is not None and isinstance(value, dict):
for key in SHORT_ATTR_TYPES:
if key in value:
return key
attr_names = [attr.get(ATTR_NAME) for attr in self.data.get(ATTR_DEFINITIONS)]
raise ValueError("No attribute {0} in {1}".format(attribute_name, attr_names))
def get_identifier_map(self, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
kwargs = {
key: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): hash_key
}
}
}
if range_key is not None:
kwargs[key][self.range_keyname] = {
self.get_attribute_type(self.range_keyname): range_key
}
return kwargs
def get_exclusive_start_key_map(self, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
if isinstance(exclusive_start_key, dict) and self.hash_keyname in exclusive_start_key:
# This is useful when paginating results, as the LastEvaluatedKey returned is already
# structured properly
return {
EXCLUSIVE_START_KEY: exclusive_start_key
}
else:
return {
EXCLUSIVE_START_KEY: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): exclusive_start_key
}
}
}
class Connection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self, region=None, host=None, session_cls=None,
request_timeout_seconds=None, max_retry_attempts=None, base_backoff_ms=None):
self._tables = {}
self.host = host
self._local = local()
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value('region')
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value('session_cls')
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value('request_timeout_seconds')
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value('max_retry_attempts')
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value('base_backoff_ms')
def __repr__(self):
return six.u("Connection<{0}>".format(self.client.meta.endpoint_url))
def _log_debug(self, operation, kwargs):
"""
Sends a debug message to the logger
"""
log.debug("Calling %s with arguments %s", operation, kwargs)
def _log_debug_response(self, operation, response):
"""
Sends a debug message to the logger about a response
"""
log.debug("%s response: %s", operation, response)
def _log_error(self, operation, response):
"""
Sends an error message to the logger
"""
log.error("%s failed with status: %s, message: %s",
operation, response.status_code,response.content)
def _create_prepared_request(self, request_dict, operation_model):
"""
Create a prepared request object from request_dict, and operation_model
"""
boto_prepared_request = self.client._endpoint.create_request(request_dict, operation_model)
# The call requests_session.send(final_prepared_request) ignores the headers which are
# part of the request session. In order to include the requests session headers inside
# the request, we create a new request object, and call prepare_request with the newly
# created request object
raw_request_with_params = Request(
boto_prepared_request.method,
boto_prepared_request.url,
data=boto_prepared_request.body,
headers=boto_prepared_request.headers
)
return self.requests_session.prepare_request(raw_request_with_params)
def dispatch(self, operation_name, operation_kwargs):
"""
Dispatches `operation_name` with arguments `operation_kwargs`
Raises TableDoesNotExist if the specified table does not exist
"""
if operation_name not in [DESCRIBE_TABLE, LIST_TABLES, UPDATE_TABLE, DELETE_TABLE, CREATE_TABLE]:
if RETURN_CONSUMED_CAPACITY not in operation_kwargs:
operation_kwargs.update(self.get_consumed_capacity_map(TOTAL))
self._log_debug(operation_name, operation_kwargs)
table_name = operation_kwargs.get(TABLE_NAME)
req_uuid = uuid.uuid4()
self.send_pre_boto_callback(operation_name, req_uuid, table_name)
data = self._make_api_call(operation_name, operation_kwargs)
self.send_post_boto_callback(operation_name, req_uuid, table_name)
if data and CONSUMED_CAPACITY in data:
capacity = data.get(CONSUMED_CAPACITY)
if isinstance(capacity, dict) and CAPACITY_UNITS in capacity:
capacity = capacity.get(CAPACITY_UNITS)
log.debug("%s %s consumed %s units", data.get(TABLE_NAME, ''), operation_name, capacity)
return data
def send_post_boto_callback(self, operation_name, req_uuid, table_name):
try:
post_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("post_boto callback threw an exception.")
def send_pre_boto_callback(self, operation_name, req_uuid, table_name):
try:
pre_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("pre_boto callback threw an exception.")
def _make_api_call(self, operation_name, operation_kwargs):
"""
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch requests for unit testing
"""
operation_model = self.client._service_model.operation_model(operation_name)
request_dict = self.client._convert_to_request_dict(
operation_kwargs,
operation_model
)
prepared_request = self._create_prepared_request(request_dict, operation_model)
for i in range(0, self._max_retry_attempts_exception + 1):
attempt_number = i + 1
is_last_attempt_for_exceptions = i == self._max_retry_attempts_exception
try:
response = self.requests_session.send(
prepared_request,
timeout=self._request_timeout_seconds,
proxies=self.client._endpoint.proxies,
)
data = response.json()
except (requests.RequestException, ValueError) as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
else:
# No backoff for fast-fail exceptions that likely failed at the frontend
log.debug(
'Retry needed for (%s) after attempt %s, retryable %s caught: %s',
operation_name,
attempt_number,
e.__class__.__name__,
e
)
continue
if response.status_code >= 300:
# Extract error code from __type
code = data.get('__type', '')
if '#' in code:
code = code.rsplit('#', 1)[1]
botocore_expected_format = {'Error': {'Message': data.get('message', ''), 'Code': code}}
verbose_properties = {
'request_id': response.headers.get('x-amzn-RequestId')
}
if 'RequestItems' in operation_kwargs:
# Batch operations can hit multiple tables, report them comma separated
verbose_properties['table_name'] = ','.join(operation_kwargs['RequestItems'])
else:
verbose_properties['table_name'] = operation_kwargs.get('TableName')
try:
raise VerboseClientError(botocore_expected_format, operation_name, verbose_properties)
except VerboseClientError as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
elif response.status_code < 500 and code != 'ProvisionedThroughputExceededException':
# We don't retry on a ConditionalCheckFailedException or other 4xx (except for
# throughput related errors) because we assume they will fail in perpetuity.
# Retrying when there is already contention could cause other problems
# in part due to unnecessary consumption of throughput.
raise
else:
# We use fully-jittered exponentially-backed-off retries:
# https://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_time_ms = random.randint(0, self._base_backoff_ms * (2 ** i))
log.debug(
'Retry with backoff needed for (%s) after attempt %s,'
'sleeping for %s milliseconds, retryable %s caught: %s',
operation_name,
attempt_number,
sleep_time_ms,
e.__class__.__name__,
e
)
time.sleep(sleep_time_ms / 1000.0)
continue
return self._handle_binary_attributes(data)
@staticmethod
def _handle_binary_attributes(data):
""" Simulate botocore's binary attribute handling """
if ITEM in data:
for attr in six.itervalues(data[ITEM]):
_convert_binary(attr)
if ITEMS in data:
for item in data[ITEMS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if RESPONSES in data:
for item_list in six.itervalues(data[RESPONSES]):
for item in item_list:
for attr in six.itervalues(item):
_convert_binary(attr)
if LAST_EVALUATED_KEY in data:
for attr in six.itervalues(data[LAST_EVALUATED_KEY]):
_convert_binary(attr)
if UNPROCESSED_KEYS in data:
for table_data in six.itervalues(data[UNPROCESSED_KEYS]):
for item in table_data[KEYS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if UNPROCESSED_ITEMS in data:
for table_unprocessed_requests in six.itervalues(data[UNPROCESSED_ITEMS]):
for request in table_unprocessed_requests:
for item_mapping in six.itervalues(request):
for item in six.itervalues(item_mapping):
for attr in six.itervalues(item):
_convert_binary(attr)
if ATTRIBUTES in data:
for attr in six.itervalues(data[ATTRIBUTES]):
_convert_binary(attr)
return data
@property
def session(self):
"""
Returns a valid botocore session
"""
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, 'session', None) is None:
self._local.session = get_session()
return self._local.session
@property
def requests_session(self):
"""
Return a requests session to execute prepared requests using the same pool
"""
if self._requests_session is None:
self._requests_session = self.session_cls()
return self._requests_session
@property
def client(self):
"""
Returns a botocore dynamodb client
"""
# botocore has a known issue where it will cache empty credentials
# https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.py#L1016-L1021
# if the client does not have credentials, we create a new client
# otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles
if not self._client or (self._client._request_signer and not self._client._request_signer._credentials):
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
return self._client
def get_meta_table(self, table_name, refresh=False):
"""
Returns a MetaTable
"""
if table_name not in self._tables or refresh:
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
self._tables[table_name] = MetaTable(data.get(TABLE_KEY))
except BotoCoreError as e:
raise TableError("Unable to describe table: {0}".format(e), e)
except ClientError as e:
if 'ResourceNotFound' in e.response['Error']['Code']:
raise TableDoesNotExist(e.response['Error']['Message'])
else:
raise
return self._tables[table_name]
def create_table(self,
table_name,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None,
stream_specification=None):
"""
Performs the CreateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name,
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
}
attrs_list = []
if attribute_definitions is None:
raise ValueError("attribute_definitions argument is required")
for attr in attribute_definitions:
attrs_list.append({
ATTR_NAME: attr.get(pythonic(ATTR_NAME)),
ATTR_TYPE: attr.get(pythonic(ATTR_TYPE))
})
operation_kwargs[ATTR_DEFINITIONS] = attrs_list
if global_secondary_indexes:
global_secondary_indexes_list = []
for index in global_secondary_indexes:
global_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
PROVISIONED_THROUGHPUT: index.get(pythonic(PROVISIONED_THROUGHPUT))
})
operation_kwargs[GLOBAL_SECONDARY_INDEXES] = global_secondary_indexes_list
if key_schema is None:
raise ValueError("key_schema is required")
key_schema_list = []
for item in key_schema:
key_schema_list.append({
ATTR_NAME: item.get(pythonic(ATTR_NAME)),
KEY_TYPE: str(item.get(pythonic(KEY_TYPE))).upper()
})
operation_kwargs[KEY_SCHEMA] = sorted(key_schema_list, key=lambda x: x.get(KEY_TYPE))
local_secondary_indexes_list = []
if local_secondary_indexes:
for index in local_secondary_indexes:
local_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
})
operation_kwargs[LOCAL_SECONDARY_INDEXES] = local_secondary_indexes_list
if stream_specification:
operation_kwargs[STREAM_SPECIFICATION] = {
STREAM_ENABLED: stream_specification[pythonic(STREAM_ENABLED)],
STREAM_VIEW_TYPE: stream_specification[pythonic(STREAM_VIEW_TYPE)]
}
try:
data = self.dispatch(CREATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to create table: {0}".format(e), e)
return data
def delete_table(self, table_name):
"""
Performs the DeleteTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DELETE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to delete table: {0}".format(e), e)
return data
def update_table(self,
table_name,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
"""
Performs the UpdateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
if read_capacity_units and not write_capacity_units or write_capacity_units and not read_capacity_units:
raise ValueError("read_capacity_units and write_capacity_units are required together")
if read_capacity_units and write_capacity_units:
operation_kwargs[PROVISIONED_THROUGHPUT] = {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
if global_secondary_index_updates:
global_secondary_indexes_list = []
for index in global_secondary_index_updates:
global_secondary_indexes_list.append({
UPDATE: {
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: index.get(pythonic(READ_CAPACITY_UNITS)),
WRITE_CAPACITY_UNITS: index.get(pythonic(WRITE_CAPACITY_UNITS))
}
}
})
operation_kwargs[GLOBAL_SECONDARY_INDEX_UPDATES] = global_secondary_indexes_list
try:
return self.dispatch(UPDATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update table: {0}".format(e), e)
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Performs the ListTables operation
"""
operation_kwargs = {}
if exclusive_start_table_name:
operation_kwargs.update({
EXCLUSIVE_START_TABLE_NAME: exclusive_start_table_name
})
if limit is not None:
operation_kwargs.update({
LIMIT: limit
})
try:
return self.dispatch(LIST_TABLES, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Unable to list tables: {0}".format(e), e)
def describe_table(self, table_name):
"""
Performs the DescribeTable operation
"""
try:
tbl = self.get_meta_table(table_name, refresh=True)
if tbl:
return tbl.data
except ValueError:
pass
raise TableDoesNotExist(table_name)
def get_conditional_operator(self, operator):
"""
Returns a dictionary containing the correct conditional operator,
validating it first.
"""
operator = operator.upper()
if operator not in CONDITIONAL_OPERATORS:
raise ValueError(
"The {0} must be one of {1}".format(
CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS
)
)
return {
CONDITIONAL_OPERATOR: operator
}
def get_item_attribute_map(self, table_name, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_item_attribute_map(
attributes,
item_key=item_key,
pythonic_key=pythonic_key)
def get_expected_map(self, table_name, expected):
"""
Builds the expected map that is common to several operations
"""
kwargs = {EXPECTED: {}}
for key, condition in expected.items():
if EXISTS in condition:
kwargs[EXPECTED][key] = {
EXISTS: condition.get(EXISTS)
}
elif VALUE in condition:
kwargs[EXPECTED][key] = {
VALUE: {
self.get_attribute_type(table_name, key): condition.get(VALUE)
}
}
elif COMPARISON_OPERATOR in condition:
kwargs[EXPECTED][key] = {
COMPARISON_OPERATOR: condition.get(COMPARISON_OPERATOR),
}
values = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_type = self.get_attribute_type(table_name, key, value)
values.append({attr_type: self.parse_attribute(value)})
if condition.get(COMPARISON_OPERATOR) not in [NULL, NOT_NULL]:
kwargs[EXPECTED][key][ATTR_VALUE_LIST] = values
return kwargs
def parse_attribute(self, attribute, return_type=False):
"""
Returns the attribute value, where the attribute can be
a raw attribute value, or a dictionary containing the type:
{'S': 'String value'}
"""
if isinstance(attribute, dict):
for key in SHORT_ATTR_TYPES:
if key in attribute:
if return_type:
return key, attribute.get(key)
return attribute.get(key)
raise ValueError("Invalid attribute supplied: {0}".format(attribute))
else:
if return_type:
return None, attribute
return attribute
def get_attribute_type(self, table_name, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
:param value: The attribute value an be supplied just in case the type is already included
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_attribute_type(attribute_name, value=value)
def get_identifier_map(self, table_name, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_identifier_map(hash_key, range_key=range_key, key=key)
def get_query_filter_map(self, table_name, query_filters):
"""
Builds the QueryFilter object needed for the Query operation
"""
kwargs = {
QUERY_FILTER: {}
}
for key, condition in query_filters.items():
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
attr_value_list = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_value_list.append({
self.get_attribute_type(table_name, key, value): self.parse_attribute(value)
})
kwargs[QUERY_FILTER][key] = {
COMPARISON_OPERATOR: operator
}
if len(attr_value_list):
kwargs[QUERY_FILTER][key][ATTR_VALUE_LIST] = attr_value_list
return kwargs
def get_consumed_capacity_map(self, return_consumed_capacity):
"""
Builds the consumed capacity map that is common to several operations
"""
if return_consumed_capacity.upper() not in RETURN_CONSUMED_CAPACITY_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY_VALUES))
return {
RETURN_CONSUMED_CAPACITY: str(return_consumed_capacity).upper()
}
def get_return_values_map(self, return_values):
"""
Builds the return values map that is common to several operations
"""
if return_values.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_VALUES, RETURN_VALUES_VALUES))
return {
RETURN_VALUES: str(return_values).upper()
}
def get_item_collection_map(self, return_item_collection_metrics):
"""
Builds the item collection map
"""
if return_item_collection_metrics.upper() not in RETURN_ITEM_COLL_METRICS_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_ITEM_COLL_METRICS_VALUES))
return {
RETURN_ITEM_COLL_METRICS: str(return_item_collection_metrics).upper()
}
def get_exclusive_start_key_map(self, table_name, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_exclusive_start_key_map(exclusive_start_key)
def delete_item(self,
table_name,
hash_key,
range_key=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the DeleteItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(DELETE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise DeleteError("Failed to delete item: {0}".format(e), e)
def update_item(self,
table_name,
hash_key,
range_key=None,
actions=None,
attribute_updates=None,
condition=None,
expected=None,
return_consumed_capacity=None,
conditional_operator=None,
return_item_collection_metrics=None,
return_values=None):
"""
Performs the UpdateItem operation
"""
self._check_actions(actions, attribute_updates)
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if not actions and not attribute_updates:
raise ValueError("{0} cannot be empty".format(ATTR_UPDATES))
actions = actions or []
attribute_updates = attribute_updates or {}
update_expression = Update(*actions)
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(attribute_updates.keys()):
path = Path([key])
update = attribute_updates[key]
action = update.get(ACTION)
if action not in ATTR_UPDATE_ACTIONS:
raise ValueError("{0} must be one of {1}".format(ACTION, ATTR_UPDATE_ACTIONS))
value = update.get(VALUE)
attr_type, value = self.parse_attribute(value, return_type=True)
if attr_type is None and action != DELETE:
attr_type = self.get_attribute_type(table_name, key, value)
value = {attr_type: value}
if action == DELETE:
action = path.remove() if attr_type is None else path.delete(value)
elif action == PUT:
action = path.set(value)
else:
action = path.add(value)
update_expression.add_action(action)
operation_kwargs[UPDATE_EXPRESSION] = update_expression.serialize(name_placeholders, expression_attribute_values)
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(UPDATE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise UpdateError("Failed to update item: {0}".format(e), e)
def put_item(self,
table_name,
hash_key,
range_key=None,
attributes=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the PutItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
name_placeholders = {}
expression_attribute_values = {}
if attributes:
attrs = self.get_item_attribute_map(table_name, attributes)
operation_kwargs[ITEM].update(attrs[ITEM])
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(PUT_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to put item: {0}".format(e), e)
def batch_write_item(self,
table_name,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the batch_write_item operation
"""
if put_items is None and delete_items is None:
raise ValueError("Either put_items or delete_items must be specified")
operation_kwargs = {
REQUEST_ITEMS: {
table_name: []
}
}
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
put_items_list = []
if put_items:
for item in put_items:
put_items_list.append({
PUT_REQUEST: self.get_item_attribute_map(table_name, item, pythonic_key=False)
})
delete_items_list = []
if delete_items:
for item in delete_items:
delete_items_list.append({
DELETE_REQUEST: self.get_item_attribute_map(table_name, item, item_key=KEY, pythonic_key=False)
})
operation_kwargs[REQUEST_ITEMS][table_name] = delete_items_list + put_items_list
try:
return self.dispatch(BATCH_WRITE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to batch write items: {0}".format(e), e)
def batch_get_item(self,
table_name,
keys,
consistent_read=None,
return_consumed_capacity=None,
attributes_to_get=None):
"""
Performs the batch get item operation
"""
operation_kwargs = {
REQUEST_ITEMS: {
table_name: {}
}
}
args_map = {}
name_placeholders = {}
if consistent_read:
args_map[CONSISTENT_READ] = consistent_read
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
args_map[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
args_map[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[REQUEST_ITEMS][table_name].update(args_map)
keys_map = {KEYS: []}
for key in keys:
keys_map[KEYS].append(
self.get_item_attribute_map(table_name, key)[ITEM]
)
operation_kwargs[REQUEST_ITEMS][table_name].update(keys_map)
try:
return self.dispatch(BATCH_GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to batch get items: {0}".format(e), e)
def get_item(self,
table_name,
hash_key,
range_key=None,
consistent_read=False,
attributes_to_get=None):
"""
Performs the GetItem operation and returns the result
"""
operation_kwargs = {}
name_placeholders = {}
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[CONSISTENT_READ] = consistent_read
operation_kwargs[TABLE_NAME] = table_name
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
try:
return self.dispatch(GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to get item: {0}".format(e), e)
def rate_limited_scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
page_size=None,
limit=None,
conditional_operator=None,
scan_filter=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
timeout_seconds=None,
read_capacity_to_consume_per_second=10,
allow_rate_limited_scan_without_consumed_capacity=None,
max_sleep_between_retry=10,
max_consecutive_exceptions=10,
consistent_read=None,
index_name=None):
"""
Performs a rate limited scan on the table. The API uses the scan API to fetch items from
DynamoDB. The rate_limited_scan uses the 'ConsumedCapacity' value returned from DynamoDB to
limit the rate of the scan. 'ProvisionedThroughputExceededException' is also handled and retried.
:param table_name: Name of the table to perform scan on.
:param filter_condition: Condition used to restrict the scan results
:param attributes_to_get: A list of attributes to return.
:param page_size: Page size of the scan to DynamoDB
:param limit: Used to limit the number of results returned
:param conditional_operator:
:param scan_filter: A map indicating the condition that evaluates the scan results
:param exclusive_start_key: If set, provides the starting point for scan.
:param segment: If set, then scans the segment
:param total_segments: If set, then specifies total segments
:param timeout_seconds: Timeout value for the rate_limited_scan method, to prevent it from running
infinitely
:param read_capacity_to_consume_per_second: Amount of read capacity to consume
every second
:param allow_rate_limited_scan_without_consumed_capacity: If set, proceeds without rate limiting if
the server does not support returning consumed capacity in responses.
:param max_sleep_between_retry: Max value for sleep in seconds in between scans during
throttling/rate limit scenarios
:param max_consecutive_exceptions: Max number of consecutive ProvisionedThroughputExceededException
exception for scan to exit
:param consistent_read: enable consistent read
:param index_name: an index to perform the scan on
"""
read_capacity_to_consume_per_ms = float(read_capacity_to_consume_per_second) / 1000
if allow_rate_limited_scan_without_consumed_capacity is None:
allow_rate_limited_scan_without_consumed_capacity = get_settings_value(
'allow_rate_limited_scan_without_consumed_capacity'
)
total_consumed_read_capacity = 0.0
last_evaluated_key = exclusive_start_key
rate_available = True
latest_scan_consumed_capacity = 0
consecutive_provision_throughput_exceeded_ex = 0
start_time = time.time()
if page_size is None:
if limit and read_capacity_to_consume_per_second > limit:
page_size = limit
else:
page_size = read_capacity_to_consume_per_second
while True:
if rate_available:
try:
data = self.scan(
table_name,
filter_condition=filter_condition,
attributes_to_get=attributes_to_get,
exclusive_start_key=last_evaluated_key,
limit=page_size,
conditional_operator=conditional_operator,
return_consumed_capacity=TOTAL,
scan_filter=scan_filter,
segment=segment,
total_segments=total_segments,
consistent_read=consistent_read,
index_name=index_name
)
for item in data.get(ITEMS):
yield item
if limit is not None:
limit -= 1
if not limit:
return
if CONSUMED_CAPACITY in data:
latest_scan_consumed_capacity = data.get(CONSUMED_CAPACITY).get(CAPACITY_UNITS)
else:
if allow_rate_limited_scan_without_consumed_capacity:
latest_scan_consumed_capacity = 0
else:
raise ScanError('Rate limited scan not possible because the server did not send back'
'consumed capacity information. If you wish scans to complete anyway'
'without functioning rate limiting, set '
'allow_rate_limited_scan_without_consumed_capacity to True in settings.')
last_evaluated_key = data.get(LAST_EVALUATED_KEY, None)
consecutive_provision_throughput_exceeded_ex = 0
except ScanError as e:
# Only retry if provision throughput is exceeded.
if isinstance(e.cause, ClientError):
code = e.cause.response['Error'].get('Code')
if code == "ProvisionedThroughputExceededException":
consecutive_provision_throughput_exceeded_ex += 1
if consecutive_provision_throughput_exceeded_ex > max_consecutive_exceptions:
# Max threshold reached
raise
else:
# Different exception, other than ProvisionedThroughputExceededException
raise
else:
# Not a Client error
raise
# No throttling, and no more scans needed. Just return
if not last_evaluated_key and consecutive_provision_throughput_exceeded_ex == 0:
return
current_time = time.time()
# elapsed_time_ms indicates the time taken in ms from the start of the
# throttled_scan call.
elapsed_time_ms = max(1, round((current_time - start_time) * 1000))
if consecutive_provision_throughput_exceeded_ex == 0:
total_consumed_read_capacity += latest_scan_consumed_capacity
consumed_rate = total_consumed_read_capacity / elapsed_time_ms
rate_available = (read_capacity_to_consume_per_ms - consumed_rate) >= 0
# consecutive_provision_throughput_exceeded_ex > 0 indicates ProvisionedThroughputExceededException occurred.
# ProvisionedThroughputExceededException can occur if:
# - The rate to consume is passed incorrectly.
# - External factors, even if the current scan is within limits.
if not rate_available or (consecutive_provision_throughput_exceeded_ex > 0):
# Minimum value is 1 second.
elapsed_time_s = math.ceil(elapsed_time_ms / 1000)
# Sleep proportional to the ratio of --consumed capacity-- to --capacity to consume--
time_to_sleep = max(1, round((total_consumed_read_capacity/ elapsed_time_s) \
/ read_capacity_to_consume_per_second))
# At any moment if the timeout_seconds hits, then return
if timeout_seconds and (elapsed_time_s + time_to_sleep) > timeout_seconds:
raise ScanError("Input timeout value {0} has expired".format(timeout_seconds))
time.sleep(min(math.ceil(time_to_sleep), max_sleep_between_retry))
# Reset the latest_scan_consumed_capacity, as no scan operation was performed.
latest_scan_consumed_capacity = 0
def scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
limit=None,
conditional_operator=None,
scan_filter=None,
return_consumed_capacity=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
consistent_read=None,
index_name=None):
"""
Performs the scan operation
"""
self._check_condition('filter_condition', filter_condition, scan_filter, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if segment is not None:
operation_kwargs[SEGMENT] = segment
if total_segments:
operation_kwargs[TOTAL_SEGMENTS] = total_segments
if scan_filter:
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
filter_expression = self._get_filter_expression(
table_name, scan_filter, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = consistent_read
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(SCAN, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise ScanError("Failed to scan table: {0}".format(e), e)
def query(self,
table_name,
hash_key,
range_key_condition=None,
filter_condition=None,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
query_filters=None,
conditional_operator=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None):
"""
Performs the Query operation and returns the result
"""
self._check_condition('range_key_condition', range_key_condition, key_conditions, conditional_operator)
self._check_condition('filter_condition', filter_condition, query_filters, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table: {0}".format(table_name))
if index_name:
hash_keyname = tbl.get_index_hash_keyname(index_name)
if not hash_keyname:
raise ValueError("No hash key attribute for index: {0}".format(index_name))
range_keyname = tbl.get_index_range_keyname(index_name)
else:
hash_keyname = tbl.hash_keyname
range_keyname = tbl.range_keyname
key_condition = self._get_condition(table_name, hash_keyname, '__eq__', hash_key)
if range_key_condition is not None:
if range_key_condition.is_valid_range_key_condition(range_keyname):
key_condition = key_condition & range_key_condition
elif filter_condition is None:
# Try to gracefully handle the case where a user passed in a filter as a range key condition
(filter_condition, range_key_condition) = (range_key_condition, None)
else:
raise ValueError("{0} is not a valid range key condition".format(range_key_condition))
if key_conditions is None or len(key_conditions) == 0:
pass # No comparisons on sort key
elif len(key_conditions) > 1:
raise ValueError("Multiple attributes are not supported in key_conditions: {0}".format(key_conditions))
else:
(key, condition), = key_conditions.items()
operator = condition.get(COMPARISON_OPERATOR)
if operator not in COMPARISON_OPERATOR_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, COMPARISON_OPERATOR_VALUES))
operator = KEY_CONDITION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST)
sort_key_expression = self._get_condition(table_name, key, operator, *values)
key_condition = key_condition & sort_key_expression
operation_kwargs[KEY_CONDITION_EXPRESSION] = key_condition.serialize(
name_placeholders, expression_attribute_values)
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
# FilterExpression does not allow key attributes. Check for hash and range key name placeholders
hash_key_placeholder = name_placeholders.get(hash_keyname)
range_key_placeholder = range_keyname and name_placeholders.get(range_keyname)
if (
hash_key_placeholder in filter_expression or
(range_key_placeholder and range_key_placeholder in filter_expression)
):
raise ValueError("'filter_condition' cannot contain key attributes")
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = True
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
# We read the conditional operator even without a query filter passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if query_filters:
filter_expression = self._get_filter_expression(
table_name, query_filters, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if select:
if select.upper() not in SELECT_VALUES:
raise ValueError("{0} must be one of {1}".format(SELECT, SELECT_VALUES))
operation_kwargs[SELECT] = str(select).upper()
if scan_index_forward is not None:
operation_kwargs[SCAN_INDEX_FORWARD] = scan_index_forward
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(QUERY, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise QueryError("Failed to query items: {0}".format(e), e)
def _get_condition_expression(self, table_name, expected, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the ConditionExpression needed for DeleteItem, PutItem, and UpdateItem operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(expected.keys()):
condition = expected[key]
if EXISTS in condition:
operator = NOT_NULL if condition.get(EXISTS, True) else NULL
values = []
elif VALUE in condition:
operator = EQ
values = [condition.get(VALUE)]
else:
operator = condition.get(COMPARISON_OPERATOR)
values = condition.get(ATTR_VALUE_LIST, [])
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_filter_expression(self, table_name, filters, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the FilterExpression needed for Query and Scan operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(filters.keys()):
condition = filters[key]
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST, [])
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_condition(self, table_name, attribute_name, operator, *values):
values = [
{self.get_attribute_type(table_name, attribute_name, value): self.parse_attribute(value)}
for value in values
]
return getattr(Path([attribute_name]), operator)(*values)
def _check_actions(self, actions, attribute_updates):
if actions is not None:
if attribute_updates is not None:
raise ValueError("Legacy attribute updates cannot be used with update actions")
else:
if attribute_updates is not None:
warnings.warn("Legacy attribute updates are deprecated in favor of update actions")
def _check_condition(self, name, condition, expected_or_filter, conditional_operator):
if condition is not None:
if not isinstance(condition, Condition):
raise ValueError("'{0}' must be an instance of Condition".format(name))
if expected_or_filter or conditional_operator is not None:
raise ValueError("Legacy conditional parameters cannot be used with condition expressions")
else:
if expected_or_filter or conditional_operator is not None:
warnings.warn("Legacy conditional parameters are deprecated in favor of condition expressions")
@staticmethod
def _reverse_dict(d):
return dict((v, k) for k, v in six.iteritems(d))
def _convert_binary(attr):
if BINARY_SHORT in attr:
attr[BINARY_SHORT] = b64decode(attr[BINARY_SHORT].encode(DEFAULT_ENCODING))
elif BINARY_SET_SHORT in attr:
value = attr[BINARY_SET_SHORT]
if value and len(value):
attr[BINARY_SET_SHORT] = set(b64decode(v.encode(DEFAULT_ENCODING)) for v in value)
| 45.698201 | 121 | 0.627469 | [
"MIT"
] | dwelch91/PynamoDB | pynamodb/connection/base.py | 68,593 | Python |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_certificate_facts
short_description: Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure
- Gets a list of SSL certificates that can be used in a WAAS policy.
- If I(certificate_id) is specified, the details of a single WaasCertificate will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
certificate_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the SSL certificate used in the WAAS policy. This number is
generated when the certificate is added to the policy.
- Required to get a specific waas_certificate.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. This number is generated when the
compartment is created.
- Required to list multiple waas_certificates.
type: str
sort_by:
description:
- The value by which certificate summaries are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`.
type: str
choices:
- "id"
- "compartmentId"
- "displayName"
- "notValidAfter"
- "timeCreated"
sort_order:
description:
- The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
type: str
choices:
- "ASC"
- "DESC"
display_name:
description:
- Filter certificates using a list of display names.
type: list
aliases: ["name"]
lifecycle_state:
description:
- Filter certificates using a list of lifecycle states.
type: list
choices:
- "CREATING"
- "ACTIVE"
- "FAILED"
- "UPDATING"
- "DELETING"
- "DELETED"
time_created_greater_than_or_equal_to:
description:
- A filter that matches certificates created on or after the specified date-time.
type: str
time_created_less_than:
description:
- A filter that matches certificates created before the specified date-time.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List waas_certificates
oci_waas_certificate_facts:
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific waas_certificate
oci_waas_certificate_facts:
certificate_id: ocid1.certificate.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
waas_certificates:
description:
- List of WaasCertificate resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate's compartment.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
display_name:
description:
- The user-friendly name of the certificate.
returned: on success
type: string
sample: display_name_example
issued_by:
description:
- ""
returned: on success
type: string
sample: issued_by_example
subject_name:
description:
- ""
returned: on success
type: complex
contains:
country:
description:
- ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's
website,https://www.iso.org/obp/ui/#search/code/).
returned: on success
type: string
sample: country_example
state_province:
description:
- The province where the organization is located.
returned: on success
type: string
sample: state_province_example
locality:
description:
- The city in which the organization is located.
returned: on success
type: string
sample: locality_example
organization:
description:
- The organization name.
returned: on success
type: string
sample: organization_example
organizational_unit:
description:
- The field to differentiate between divisions within an organization.
returned: on success
type: string
sample: organizational_unit_example
common_name:
description:
- The fully qualified domain name used for DNS lookups of the server.
returned: on success
type: string
sample: common_name_example
email_address:
description:
- The email address of the server's administrator.
returned: on success
type: string
sample: email_address_example
issuer_name:
description:
- ""
returned: on success
type: complex
contains:
country:
description:
- ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's
website,https://www.iso.org/obp/ui/#search/code/).
returned: on success
type: string
sample: country_example
state_province:
description:
- The province where the organization is located.
returned: on success
type: string
sample: state_province_example
locality:
description:
- The city in which the organization is located.
returned: on success
type: string
sample: locality_example
organization:
description:
- The organization name.
returned: on success
type: string
sample: organization_example
organizational_unit:
description:
- The field to differentiate between divisions within an organization.
returned: on success
type: string
sample: organizational_unit_example
common_name:
description:
- The Certificate Authority (CA) name.
returned: on success
type: string
sample: common_name_example
email_address:
description:
- The email address of the server's administrator.
returned: on success
type: string
sample: email_address_example
serial_number:
description:
- A unique, positive integer assigned by the Certificate Authority (CA). The issuer name and serial number identify a unique certificate.
returned: on success
type: string
sample: serial_number_example
version:
description:
- The version of the encoded certificate.
returned: on success
type: int
sample: 56
signature_algorithm:
description:
- The identifier for the cryptographic algorithm used by the Certificate Authority (CA) to sign this certificate.
returned: on success
type: string
sample: signature_algorithm_example
time_not_valid_before:
description:
- The date and time the certificate will become valid, expressed in RFC 3339 timestamp format.
returned: on success
type: string
sample: 2018-11-16T21:10:29Z
time_not_valid_after:
description:
- The date and time the certificate will expire, expressed in RFC 3339 timestamp format.
returned: on success
type: string
sample: 2018-11-16T21:10:29Z
public_key_info:
description:
- ""
returned: on success
type: complex
contains:
algorithm:
description:
- The algorithm identifier and parameters for the public key.
returned: on success
type: string
sample: algorithm_example
exponent:
description:
- The private key exponent.
returned: on success
type: int
sample: 56
key_size:
description:
- The number of bits in a key used by a cryptographic algorithm.
returned: on success
type: int
sample: 56
extensions:
description:
- Additional attributes associated with users or public keys for managing relationships between Certificate Authorities.
returned: on success
type: complex
contains:
name:
description:
- The certificate extension name.
returned: on success
type: string
sample: name_example
is_critical:
description:
- The critical flag of the extension. Critical extensions must be processed, non-critical extensions can be ignored.
returned: on success
type: bool
sample: true
value:
description:
- The certificate extension value.
returned: on success
type: string
sample: value_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
lifecycle_state:
description:
- The current lifecycle state of the SSL certificate.
returned: on success
type: string
sample: CREATING
time_created:
description:
- The date and time the certificate was created, expressed in RFC 3339 timestamp format.
returned: on success
type: string
sample: 2018-11-16T21:10:29Z
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"issued_by": "issued_by_example",
"subject_name": {
"country": "country_example",
"state_province": "state_province_example",
"locality": "locality_example",
"organization": "organization_example",
"organizational_unit": "organizational_unit_example",
"common_name": "common_name_example",
"email_address": "email_address_example"
},
"issuer_name": {
"country": "country_example",
"state_province": "state_province_example",
"locality": "locality_example",
"organization": "organization_example",
"organizational_unit": "organizational_unit_example",
"common_name": "common_name_example",
"email_address": "email_address_example"
},
"serial_number": "serial_number_example",
"version": 56,
"signature_algorithm": "signature_algorithm_example",
"time_not_valid_before": "2018-11-16T21:10:29Z",
"time_not_valid_after": "2018-11-16T21:10:29Z",
"public_key_info": {
"algorithm": "algorithm_example",
"exponent": 56,
"key_size": 56
},
"extensions": [{
"name": "name_example",
"is_critical": true,
"value": "value_example"
}],
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"lifecycle_state": "CREATING",
"time_created": "2018-11-16T21:10:29Z"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class WaasCertificateFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"certificate_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_certificate,
certificate_id=self.module.params.get("certificate_id"),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
"display_name",
"lifecycle_state",
"time_created_greater_than_or_equal_to",
"time_created_less_than",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_certificates,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
WaasCertificateFactsHelperCustom = get_custom_class("WaasCertificateFactsHelperCustom")
class ResourceFactsHelper(
WaasCertificateFactsHelperCustom, WaasCertificateFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
certificate_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
sort_by=dict(
type="str",
choices=[
"id",
"compartmentId",
"displayName",
"notValidAfter",
"timeCreated",
],
),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
display_name=dict(aliases=["name"], type="list"),
lifecycle_state=dict(
type="list",
choices=[
"CREATING",
"ACTIVE",
"FAILED",
"UPDATING",
"DELETING",
"DELETED",
],
),
time_created_greater_than_or_equal_to=dict(type="str"),
time_created_less_than=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="waas_certificate",
service_client_class=WaasClient,
namespace="waas",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(waas_certificates=result)
if __name__ == "__main__":
main()
| 37.224652 | 159 | 0.553354 | [
"Apache-2.0"
] | hanielburton/oci-ansible-collection | plugins/modules/oci_waas_certificate_facts.py | 18,724 | Python |
import os
from dotenv import load_dotenv
# The prefix the bot responds to for commands
PREFIX = '!'
# Emojis the bot should use for certain events
EMOJIS = {
'DISCORD': '🗨️', # When a message is sent from Discord
'HYPIXEL': '🎮', # When a message is sent from Hypixel
'JOIN': '📥', # When a member joins Hypixel
'LEAVE': '📤' # When a member leaves Hypixel
}
# List of Owner IDs (to use commands like sumo aaaaaaaaaaaaa)
OWNER_IDS = [635097068741853204]
# Don't touch this unless you know what you're doing
load_dotenv()
TOKEN = os.getenv("TOKEN")
GUILD_CHAT_CHANNEL = int(os.getenv("GUILD_CHAT_CHANNEL"))
MINECRAFT_EMAIL = os.getenv("MINECRAFT_EMAIL")
MINECRAFT_PASSWORD = os.getenv("MINECRAFT_PASSWORD")
| 31.565217 | 61 | 0.717631 | [
"MIT"
] | hqsss/hypixel-guild-chat-python | constants.py | 740 | Python |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import unittest
from oslo_config import types
class TypeTestHelper(object):
def setUp(self):
super(TypeTestHelper, self).setUp()
self.type_instance = self.type
def assertConvertedValue(self, s, expected):
self.assertEqual(expected, self.type_instance(s))
def assertInvalid(self, value):
self.assertRaises(ValueError, self.type_instance, value)
class StringTypeTests(TypeTestHelper, unittest.TestCase):
type = types.String()
def test_empty_string_passes(self):
self.assertConvertedValue('', '')
def test_should_return_same_string_if_valid(self):
self.assertConvertedValue('foo bar', 'foo bar')
def test_listed_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertConvertedValue('foo', 'foo')
def test_unlisted_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertInvalid('baz')
def test_with_no_values_returns_error(self):
self.type_instance = types.String(choices=[])
self.assertInvalid('foo')
def test_string_with_non_closed_quote_is_invalid(self):
self.type_instance = types.String(quotes=True)
self.assertInvalid('"foo bar')
self.assertInvalid("'bar baz")
def test_quotes_are_stripped(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('"foo bar"', 'foo bar')
def test_trailing_quote_is_ok(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('foo bar"', 'foo bar"')
def test_repr(self):
t = types.String()
self.assertEqual('String', repr(t))
def test_repr_with_choices(self):
t = types.String(choices=['foo', 'bar'])
self.assertEqual('String(choices=[\'foo\', \'bar\'])', repr(t))
def test_equal(self):
self.assertTrue(types.String() == types.String())
def test_equal_with_same_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'bar'])
t3 = types.String(choices=('foo', 'bar'))
t4 = types.String(choices=['bar', 'foo'])
self.assertTrue(t1 == t2)
self.assertTrue(t1 == t3)
self.assertTrue(t1 == t4)
def test_not_equal_with_different_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'baz'])
self.assertFalse(t1 == t2)
def test_equal_with_equal_quote_falgs(self):
t1 = types.String(quotes=True)
t2 = types.String(quotes=True)
self.assertTrue(t1 == t2)
def test_not_equal_with_different_quote_falgs(self):
t1 = types.String(quotes=False)
t2 = types.String(quotes=True)
self.assertFalse(t1 == t2)
def test_not_equal_to_other_class(self):
self.assertFalse(types.String() == types.Integer())
def test_regex_matches(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertConvertedValue("Foo", "Foo")
def test_regex_matches_uncompiled(self):
self.type_instance = types.String(regex="^[A-Z]")
self.assertConvertedValue("Foo", "Foo")
def test_regex_fails(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertInvalid("foo")
def test_regex_and_choices_raises(self):
self.assertRaises(ValueError,
types.String,
regex=re.compile("^[A-Z]"),
choices=["Foo", "Bar", "baz"])
def test_equal_with_same_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[A-Z]"))
self.assertTrue(t1 == t2)
def test_not_equal_with_different_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[a-z]"))
self.assertFalse(t1 == t2)
def test_ignore_case(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=True)
self.assertConvertedValue('Foo', 'Foo')
self.assertConvertedValue('bAr', 'bAr')
def test_ignore_case_raises(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=False)
self.assertRaises(ValueError, self.assertConvertedValue, 'Foo', 'Foo')
def test_regex_and_ignore_case(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"),
ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_and_ignore_case_str(self):
self.type_instance = types.String(regex="^[A-Z]", ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_preserve_flags(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]", re.I),
ignore_case=False)
self.assertConvertedValue("foo", "foo")
def test_max_length(self):
self.type_instance = types.String(max_length=5)
self.assertInvalid('123456')
self.assertConvertedValue('12345', '12345')
class BooleanTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Boolean()
def test_True(self):
self.assertConvertedValue('True', True)
def test_yes(self):
self.assertConvertedValue('yes', True)
def test_on(self):
self.assertConvertedValue('on', True)
def test_1(self):
self.assertConvertedValue('1', True)
def test_False(self):
self.assertConvertedValue('False', False)
def test_no(self):
self.assertConvertedValue('no', False)
def test_off(self):
self.assertConvertedValue('off', False)
def test_0(self):
self.assertConvertedValue('0', False)
def test_other_values_produce_error(self):
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Boolean', repr(types.Boolean()))
def test_equal(self):
self.assertEqual(types.Boolean(), types.Boolean())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Boolean() == types.String())
class IntegerTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Integer()
def test_empty_string(self):
self.assertConvertedValue('', None)
def test_whitespace_string(self):
self.assertConvertedValue(" \t\t\t\t", None)
def test_positive_values_are_valid(self):
self.assertConvertedValue('123', 123)
def test_zero_is_valid(self):
self.assertConvertedValue('0', 0)
def test_negative_values_are_valid(self):
self.assertConvertedValue('-123', -123)
def test_leading_whitespace_is_ignored(self):
self.assertConvertedValue(' 5', 5)
def test_trailing_whitespace_is_ignored(self):
self.assertConvertedValue('7 ', 7)
def test_non_digits_are_invalid(self):
self.assertInvalid('12a45')
def test_repr(self):
t = types.Integer()
self.assertEqual('Integer', repr(t))
def test_repr_with_min(self):
t = types.Integer(min=123)
self.assertEqual('Integer(min=123)', repr(t))
def test_repr_with_max(self):
t = types.Integer(max=456)
self.assertEqual('Integer(max=456)', repr(t))
def test_repr_with_min_and_max(self):
t = types.Integer(min=123, max=456)
self.assertEqual('Integer(min=123, max=456)', repr(t))
t = types.Integer(min=0, max=0)
self.assertEqual('Integer(min=0, max=0)', repr(t))
def test_repr_with_choices(self):
t = types.Integer(choices=[80, 457])
self.assertEqual('Integer(choices=[80, 457])', repr(t))
def test_equal(self):
self.assertTrue(types.Integer() == types.Integer())
def test_equal_with_same_min_and_no_max(self):
self.assertTrue(types.Integer(min=123) == types.Integer(min=123))
def test_equal_with_same_max_and_no_min(self):
self.assertTrue(types.Integer(max=123) == types.Integer(max=123))
def test_equal_with_same_min_and_max(self):
t1 = types.Integer(min=1, max=123)
t2 = types.Integer(min=1, max=123)
self.assertTrue(t1 == t2)
def test_equal_with_same_choices(self):
t1 = types.Integer(choices=[80, 457])
t2 = types.Integer(choices=[457, 80])
self.assertTrue(t1 == t2)
def test_not_equal(self):
self.assertFalse(types.Integer(min=123) == types.Integer(min=456))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer(choices=[80, 40]))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Integer() == types.String())
def test_choices_with_min_max(self):
self.assertRaises(ValueError,
types.Integer,
min=10,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
max=100,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
min=10, max=100,
choices=[50, 60])
def test_min_greater_max(self):
self.assertRaises(ValueError,
types.Integer,
min=100, max=50)
self.assertRaises(ValueError,
types.Integer,
min=-50, max=-100)
self.assertRaises(ValueError,
types.Integer,
min=0, max=-50)
self.assertRaises(ValueError,
types.Integer,
min=50, max=0)
def test_with_max_and_min(self):
t = types.Integer(min=123, max=456)
self.assertRaises(ValueError, t, 122)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, 0)
self.assertRaises(ValueError, t, 457)
def test_with_min_zero(self):
t = types.Integer(min=0, max=456)
self.assertRaises(ValueError, t, -1)
t(0)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, -201)
self.assertRaises(ValueError, t, 457)
def test_with_max_zero(self):
t = types.Integer(min=-456, max=0)
self.assertRaises(ValueError, t, 1)
t(0)
t(-123)
t(-300)
t(-456)
self.assertRaises(ValueError, t, 201)
self.assertRaises(ValueError, t, -457)
def test_with_choices_list(self):
t = types.Integer(choices=[80, 457])
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
def test_with_choices_tuple(self):
t = types.Integer(choices=(80, 457))
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
class FloatTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Float()
def test_decimal_format(self):
v = self.type_instance('123.456')
self.assertAlmostEqual(v, 123.456)
def test_decimal_format_negative_float(self):
v = self.type_instance('-123.456')
self.assertAlmostEqual(v, -123.456)
def test_exponential_format(self):
v = self.type_instance('123e-2')
self.assertAlmostEqual(v, 1.23)
def test_non_float_is_invalid(self):
self.assertInvalid('123,345')
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Float', repr(types.Float()))
def test_equal(self):
self.assertTrue(types.Float() == types.Float())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Float() == types.Integer())
class ListTypeTests(TypeTestHelper, unittest.TestCase):
type = types.List()
def test_empty_value(self):
self.assertConvertedValue('', [])
def test_single_value(self):
self.assertConvertedValue(' foo bar ',
['foo bar'])
def test_list_of_values(self):
self.assertConvertedValue(' foo bar, baz ',
['foo bar',
'baz'])
def test_list_of_values_containing_commas(self):
self.type_instance = types.List(types.String(quotes=True))
self.assertConvertedValue('foo,"bar, baz",bam',
['foo',
'bar, baz',
'bam'])
def test_list_of_lists(self):
self.type_instance = types.List(
types.List(types.String(), bounds=True)
)
self.assertConvertedValue('[foo],[bar, baz],[bam]',
[['foo'], ['bar', 'baz'], ['bam']])
def test_list_of_custom_type(self):
self.type_instance = types.List(types.Integer())
self.assertConvertedValue('1,2,3,5',
[1, 2, 3, 5])
def test_bounds_parsing(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertConvertedValue('[1,2,3]', [1, 2, 3])
def test_bounds_required(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertInvalid('1,2,3')
self.assertInvalid('[1,2,3')
self.assertInvalid('1,2,3]')
def test_repr(self):
t = types.List(types.Integer())
self.assertEqual('List of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.List() == types.List())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.List(it1) == types.List(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.List(it1) == types.List(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.List() == types.Integer())
class DictTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Dict()
def test_empty_value(self):
self.assertConvertedValue('', {})
def test_single_value(self):
self.assertConvertedValue(' foo: bar ',
{'foo': 'bar'})
def test_dict_of_values(self):
self.assertConvertedValue(' foo: bar, baz: 123 ',
{'foo': 'bar',
'baz': '123'})
def test_custom_value_type(self):
self.type_instance = types.Dict(types.Integer())
self.assertConvertedValue('foo:123, bar: 456',
{'foo': 123,
'bar': 456})
def test_dict_of_values_containing_commas(self):
self.type_instance = types.Dict(types.String(quotes=True))
self.assertConvertedValue('foo:"bar, baz",bam:quux',
{'foo': 'bar, baz',
'bam': 'quux'})
def test_dict_of_dicts(self):
self.type_instance = types.Dict(
types.Dict(types.String(), bounds=True)
)
self.assertConvertedValue('k1:{k1:v1,k2:v2},k2:{k3:v3}',
{'k1': {'k1': 'v1', 'k2': 'v2'},
'k2': {'k3': 'v3'}})
def test_bounds_parsing(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertConvertedValue('{foo:bar,baz:123}',
{'foo': 'bar',
'baz': '123'})
def test_bounds_required(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertInvalid('foo:bar,baz:123')
self.assertInvalid('{foo:bar,baz:123')
self.assertInvalid('foo:bar,baz:123}')
def test_no_mapping_produces_error(self):
self.assertInvalid('foo,bar')
def test_repr(self):
t = types.Dict(types.Integer())
self.assertEqual('Dict of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.Dict() == types.Dict())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.Dict(it1) == types.Dict(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.Dict(it1) == types.Dict(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.Dict() == types.Integer())
class IPAddressTypeTests(TypeTestHelper, unittest.TestCase):
type = types.IPAddress()
def test_ipv4_address(self):
self.assertConvertedValue('192.168.0.1', '192.168.0.1')
def test_ipv6_address(self):
self.assertConvertedValue('abcd:ef::1', 'abcd:ef::1')
def test_strings(self):
self.assertInvalid('')
self.assertInvalid('foo')
def test_numbers(self):
self.assertInvalid(1)
self.assertInvalid(-1)
self.assertInvalid(3.14)
class IPv4AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(4)
def test_ipv6_address(self):
self.assertInvalid('abcd:ef::1')
class IPv6AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(6)
def test_ipv4_address(self):
self.assertInvalid('192.168.0.1')
| 33.442238 | 78 | 0.601231 | [
"Apache-2.0"
] | bdrich/neutron-lbaas | .tox/scenario/lib/python2.7/site-packages/oslo_config/tests/test_types.py | 18,527 | Python |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A service for querying data for charts.
Functions for querying the IssueSnapshot table and associated join tables.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import settings
import time
from framework import framework_helpers
from framework import sql
from search import search_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
from search import query2ast
from search import ast2select
from search import ast2ast
ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot'
ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc'
ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component'
ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label'
ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id',
'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end',
'is_open']
ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id']
ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id']
ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id']
class ChartService(object):
"""Class for querying chart data."""
def __init__(self, config_service):
"""Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
"""
self.config_service = config_service
# Set up SQL table objects.
self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME)
self.issuesnapshot2cc_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2CC_TABLE_NAME)
self.issuesnapshot2component_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2COMPONENT_TABLE_NAME)
self.issuesnapshot2label_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2LABEL_TABLE_NAME)
def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids,
project, perms, group_by=None, label_prefix=None,
query=None, canned_query=None):
"""Queries historical issue counts grouped by label or component.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
unixtime: An integer representing the Unix time in seconds.
effective_ids: The effective User IDs associated with the current user.
project: A project object representing the current project.
perms: A permissions object associated with the current user.
group_by (str, optional): Which dimension to group by. Values can
be 'label', 'component', or None, in which case no grouping will
be applied.
label_prefix: Required when group_by is 'label.' Will limit the query to
only labels with the specified prefix (for example 'Pri').
query (str, optional): A query string from the request to apply to
the snapshot query.
canned_query (str, optional): Parsed canned query applied to the query
scope.
Returns:
1. A dict of {'2nd dimension or "total"': number of occurences}.
2. A list of any unsupported query conditions in query.
3. A boolean that is true if any results were capped.
"""
project_config = services.config.GetProjectConfig(cnxn,
project.project_id)
try:
query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
cnxn, services, project_config, query, canned_query, project)
except ast2select.NoPossibleResults:
return {}, ['Invalid query.'], False
restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
cnxn, None, self.config_service, effective_ids, project, perms)
left_joins = [
('Issue ON IssueSnapshot.issue_id = Issue.id', []),
]
if restricted_label_ids:
left_joins.append(
(('Issue2Label AS Forbidden_label'
' ON Issue.id = Forbidden_label.issue_id'
' AND Forbidden_label.label_id IN (%s)' % (
sql.PlaceHolders(restricted_label_ids)
)), restricted_label_ids))
if effective_ids:
left_joins.append(
('Issue2Cc AS I2cc'
' ON Issue.id = I2cc.issue_id'
' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
effective_ids))
# TODO(jeffcarp): Handle case where there are issues with no labels.
where = [
('IssueSnapshot.period_start <= %s', [unixtime]),
('IssueSnapshot.period_end > %s', [unixtime]),
('IssueSnapshot.project_id = %s', [project.project_id]),
('Issue.is_spam = %s', [False]),
('Issue.deleted = %s', [False]),
]
forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
if effective_ids:
if restricted_label_ids:
forbidden_label_clause = ' OR %s' % forbidden_label_clause
else:
forbidden_label_clause = ''
where.append(
((
'(Issue.reporter_id IN (%s)'
' OR Issue.owner_id IN (%s)'
' OR I2cc.cc_id IS NOT NULL'
'%s)'
) % (
sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids),
forbidden_label_clause
),
list(effective_ids) + list(effective_ids)
))
else:
where.append((forbidden_label_clause, []))
if group_by == 'component':
cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Component AS Is2c ON'
' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
])
group_by = ['Comp.path']
elif group_by == 'label':
cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Label AS Is2l'
' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
])
if not label_prefix:
raise ValueError('`label_prefix` required when grouping by label.')
# TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output,
# ensure regex is case-insensitive.
where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
group_by = ['Lab.label']
elif group_by == 'open':
cols = ['IssueSnapshot.is_open',
'COUNT(IssueSnapshot.issue_id) AS issue_count']
group_by = ['IssueSnapshot.is_open']
elif group_by == 'status':
left_joins.append(('StatusDef AS Stats ON ' \
'Stats.id = IssueSnapshot.status_id', []))
cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['Stats.status']
elif group_by == 'owner':
cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['IssueSnapshot.owner_id']
elif not group_by:
cols = ['IssueSnapshot.issue_id']
else:
raise ValueError('`group_by` must be label, component, ' \
'open, status, owner or None.')
if query_left_joins:
left_joins.extend(query_left_joins)
if query_where:
where.extend(query_where)
promises = []
for shard_id in range(settings.num_logical_shards):
count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
where=where, joins=left_joins, group_by=group_by,
shard_id=shard_id)
promises.append(framework_helpers.Promise(cnxn.Execute,
count_stmt, stmt_args, shard_id=shard_id))
shard_values_dict = {}
search_limit_reached = False
for promise in promises:
# Wait for each query to complete and add it to the dict.
shard_values = list(promise.WaitAndGetValue())
if not shard_values:
continue
if group_by:
for name, count in shard_values:
if count >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault(name, 0)
shard_values_dict[name] += count
else:
if shard_values[0][0] >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault('total', 0)
shard_values_dict['total'] += shard_values[0][0]
unsupported_field_names = list(set([
field.field_name
for cond in unsupported_conds
for field in cond.field_defs
]))
return shard_values_dict, unsupported_field_names, search_limit_reached
def StoreIssueSnapshots(self, cnxn, issues, commit=True):
"""Adds an IssueSnapshot and updates the previous one for each issue."""
for issue in issues:
right_now = self._currentTime()
# Update previous snapshot of current issue's end time to right now.
self.issuesnapshot_tbl.Update(cnxn,
delta={'period_end': right_now},
where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]),
('IssueSnapshot.period_end = %s',
[settings.maximum_snapshot_period_end])],
commit=commit)
config = self.config_service.GetProjectConfig(cnxn, issue.project_id)
period_end = settings.maximum_snapshot_period_end
is_open = tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
shard = issue.issue_id % settings.num_logical_shards
status = tracker_bizobj.GetStatus(issue)
status_id = self.config_service.LookupStatusID(
cnxn, issue.project_id, status) or None
owner_id = tracker_bizobj.GetOwnerId(issue) or None
issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id,
issue.local_id, issue.reporter_id, owner_id, status_id, right_now,
period_end, is_open)]
ids = self.issuesnapshot_tbl.InsertRows(
cnxn, ISSUESNAPSHOT_COLS[1:],
issuesnapshot_rows,
replace=True, commit=commit,
return_generated_ids=True)
issuesnapshot_id = ids[0]
# Add all labels to IssueSnapshot2Label.
label_rows = [
(issuesnapshot_id,
self.config_service.LookupLabelID(cnxn, issue.project_id, label))
for label in tracker_bizobj.GetLabels(issue)
]
self.issuesnapshot2label_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2LABEL_COLS,
label_rows, replace=True, commit=commit)
# Add all CCs to IssueSnapshot2Cc.
cc_rows = [
(issuesnapshot_id, cc_id)
for cc_id in tracker_bizobj.GetCcIds(issue)
]
self.issuesnapshot2cc_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2CC_COLS,
cc_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Component.
component_rows = [
(issuesnapshot_id, component_id)
for component_id in issue.component_ids
]
self.issuesnapshot2component_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2COMPONENT_COLS,
component_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Hotlist.
# This is raw SQL to obviate passing FeaturesService down through
# the call stack wherever this function is called.
# TODO(jrobbins): sort out dependencies between service classes.
cnxn.Execute('''
INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id)
SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s
''', [issuesnapshot_id, issue.issue_id])
def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids):
"""Expunge the existence of hotlists from issue snapshots.
This method will not commit the operation. This method will not make
changes to in-memory data.
Args:
cnxn: connection to SQL database.
hotlist_ids: list of hotlist_ids for hotlists we want to delete.
"""
vals_ph = sql.PlaceHolders(hotlist_ids)
cnxn.Execute(
'DELETE FROM IssueSnapshot2Hotlist '
'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph),
hotlist_ids,
commit=False)
def _currentTime(self):
"""This is a separate method so it can be mocked by tests."""
return time.time()
def _QueryToWhere(self, cnxn, services, project_config, query, canned_query,
project):
"""Parses a query string into LEFT JOIN and WHERE conditions.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
project_config: The configuration for the given project.
query (string): The query to parse.
canned_query (string): The supplied canned query.
project: The current project.
Returns:
1. A list of LEFT JOIN clauses for the SQL query.
2. A list of WHERE clases for the SQL query.
3. A list of query conditions that are unsupported with snapshots.
"""
if not (query or canned_query):
return [], [], []
query = query or ''
scope = canned_query or ''
query_ast = query2ast.ParseUserQuery(query, scope,
query2ast.BUILTIN_ISSUE_FIELDS, project_config)
query_ast = ast2ast.PreprocessAST(cnxn, query_ast, [project.project_id],
services, project_config)
left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast,
snapshot_mode=True)
return left_joins, where, unsupported
def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id):
"""Given SQL arguments, executes a snapshot COUNT query."""
stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True)
stmt.AddJoinClauses(joins, left=True)
stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])])
if group_by:
stmt.AddGroupByTerms(group_by)
stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0)
stmt_str, stmt_args = stmt.Generate()
if group_by:
if group_by[0] == 'IssueSnapshot.is_open':
count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \
'AS bool_open, results.issue_count ' \
'FROM (%s) AS results' % stmt_str)
else:
count_stmt = stmt_str
else:
count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % (
stmt_str)
return count_stmt, stmt_args
| 37.10705 | 79 | 0.675837 | [
"BSD-3-Clause"
] | xinghun61/infra | appengine/monorail/services/chart_svc.py | 14,212 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'rend'
copyright = '2020, Thomas S Hatch'
author = 'Thomas S Hatch'
# The full version, including alpha/beta/rc tags
release = '4.1'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.551724 | 79 | 0.661871 | [
"Apache-2.0"
] | saltstack/rend | docs/conf.py | 1,946 | Python |
import time
import neopixel
from resetMachine import *
@pytest.fixture()
def tenPixelStrand():
pin = machine.Pin(5)
return neopixel.NeoPixel(pin, n=10)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
class TestNeoPixel:
pin = machine.Pin(5)
def test_canSetPixelColor(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
assert tenPixelStrand[0] == green
assert tenPixelStrand[1] == red
def test_mustCallWriteToDisplay(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
assert len(tenPixelStrand.writesForTesting) == 0
tenPixelStrand.write()
assert len(tenPixelStrand.writesForTesting) == 1
def test_fill(self, resetMachine, tenPixelStrand):
tenPixelStrand.fill(green)
assert _allPixelsAreColor(tenPixelStrand, green)
def test_recordsWrites(self, resetMachine, tenPixelStrand):
delayTime = 300
tenPixelStrand.fill(green)
tenPixelStrand.write()
time.sleep(delayTime / 1000)
tenPixelStrand.fill(red)
tenPixelStrand.write()
writeHistory = tenPixelStrand.writesForTesting
assert len(writeHistory) == 2
assert _allPixelsAreColor(writeHistory[0], green)
assert writeHistory[0].timeFromFirstWrite == 0
assert _allPixelsAreColor(writeHistory[1], red)
assert _approximately(writeHistory[1].timeFromFirstWrite) == delayTime
def test_writeUpdatesPixels(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
tenPixelStrand.write()
assert len(tenPixelStrand.writesForTesting) == 1
writtenStrand = tenPixelStrand.writesForTesting[0]
assert writtenStrand[0] == green
assert writtenStrand[1] == red
assert writtenStrand.timeFromFirstWrite == 0
def test_initWithDefaults(self, resetMachine):
np = neopixel.NeoPixel(self.pin, n=10)
assert np.pin == self.pin
assert np.n == 10
assert np.bpp == 3
assert np.timing == 1
def test_initWithOverrides(self, resetMachine):
np = neopixel.NeoPixel(self.pin, n=10, bpp=4, timing=2)
assert np.bpp == 4
assert np.timing == 2
def test_invalid_bytes_per_pixel(self, resetMachine):
try:
neopixel.NeoPixel(self.pin, n=10, bpp=5, timing=2)
assert 0
except OSError:
pass
def _approximately(exactMilliSeconds):
return int(exactMilliSeconds / 10) * 10
def _allPixelsAreColor(strand, color):
pixelCount = strand.n
for i in range(pixelCount):
if strand[i] != color:
return False
return True
| 29.827957 | 78 | 0.658616 | [
"MIT"
] | fovallesp/esp32-python | test/test_neopixel.py | 2,774 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Circulation Loan resolvers."""
from invenio_circulation.proxies import current_circulation
from invenio_pidstore.errors import PIDDeletedError
from invenio_app_ils.circulation.utils import resolve_item_from_loan
from invenio_app_ils.jsonresolver.api import \
get_field_value_for_record as get_field_value
from invenio_app_ils.jsonresolver.api import get_pid_or_default, pick
from invenio_app_ils.proxies import current_app_ils
from invenio_app_ils.records.resolver.resolver import get_patron
def item_resolver(loan_pid):
"""Resolve an Item given a Loan PID."""
Loan = current_circulation.loan_record_cls
loan = Loan.get_record_by_pid(loan_pid)
if not loan.get("item_pid"):
return {}
try:
# can resolve to an Item or BorrowingRequest
item = resolve_item_from_loan(loan["item_pid"])
except PIDDeletedError:
item = {}
else:
item = pick(
item,
"barcode", # not set in BorrowingRequest
"description",
"document_pid",
"medium", # not set in BorrowingRequest
"pid",
)
return item
@get_pid_or_default(default_value=dict())
def loan_patron_resolver(loan_pid):
"""Resolve a Patron given a Loan PID."""
Loan = current_circulation.loan_record_cls
try:
patron_pid = get_field_value(Loan, loan_pid, "patron_pid")
except KeyError:
return {}
return get_patron(patron_pid)
@get_pid_or_default(default_value=dict())
def document_resolver(loan_pid):
"""Resolve a Document given a Loan PID."""
Loan = current_circulation.loan_record_cls
try:
document_pid = get_field_value(Loan, loan_pid, "document_pid")
except KeyError:
return {}
Document = current_app_ils.document_record_cls
try:
document = Document.get_record_by_pid(document_pid)
except PIDDeletedError:
obj = {}
else:
obj = pick(
document,
"authors",
"edition",
"document_type",
"pid",
"title",
# TODO: add the imprint year here
)
return obj
| 28.190476 | 76 | 0.668074 | [
"MIT"
] | equadon/invenio-app-ils | invenio_app_ils/circulation/jsonresolvers/loan.py | 2,368 | Python |
"""Tests for functions defined in the floodsystem/geo module
"""
from floodsystem import geo
from floodsystem.station import MonitoringStation
from floodsystem.stationdata import build_station_list
stations = build_station_list()
# define arbitrary stations for the tests
station_id1 = "test station id 1"
measure_id1 = "test measure id 1"
label1 = "TS1"
coord1 = (1.0, 4.0)
typical_range1 = (-2, 5)
river1 = "River Cam"
town1 = "Town 1"
TestStation1 = MonitoringStation(station_id1, measure_id1, label1, coord1, typical_range1, river1, town1)
station_id2 = "test station id 2"
measure_id2 = "test measure id 2"
label2 = "TS2"
coord2 = (0.0, 1.0)
typical_range2 = (-2, 2)
river2 = "River Cam"
town2 = "Town 2"
TestStation2 = MonitoringStation(station_id2, measure_id2, label2, coord2, typical_range2, river2, town2)
station_id3 = "test station id 3"
measure_id3 = "test measure id 3"
label3 = "TS3"
coord3 = (1.0, 1.0)
typical_range3 = (-2, 3)
river3 = "River Thames"
town3 = "Town 3"
TestStation3 = MonitoringStation(station_id3, measure_id3, label3, coord3, typical_range3, river3, town3)
test_stations = [TestStation1, TestStation2, TestStation3]
def test_stations_within_radius():
centre = (52.2053, 0.1218)
# check that no stations are at a negative distance from the centre
assert geo.stations_within_radius(stations, centre, 0) == []
# check that all stations are within 10000km of the centre
assert len(geo.stations_within_radius(stations, centre, 10000)) == len(stations)
def test_rivers_by_station_number():
lst = geo.rivers_by_station_number(stations, 2)
# check that the number of stations is greater (or equal to the second one) for the first river.
assert lst[0][1] >= lst[1][1]
def test_stations_by_distance():
test = geo.stations_by_distance(test_stations, (0,0))
# check that the results are in the right order based on the test stations provided above
assert (test[0][0], test[1][0], test[2][0]) == (TestStation2, TestStation3, TestStation1)
def test_rivers_with_station():
# check that the results are River Cam and River Thames as per the test stations provided above
assert geo.rivers_with_station(test_stations) == ['River Cam', 'River Thames']
def test_stations_by_river():
# check that the two stations on the River Cam are TestStation1 and TestStation2
assert sorted([x.name for x in geo.stations_by_river(test_stations)['River Cam']]) == [TestStation1.name, TestStation2.name]
| 34.943662 | 128 | 0.742846 | [
"MIT"
] | negsrahimi/lahsheesh | test_geo.py | 2,481 | Python |
import pyopencl as cl
class DeviceInfo(object):
def __init__(self, device):
self.compute_units = device.get_info(cl.device_info.MAX_COMPUTE_UNITS)
self.maxShared = device.get_info(cl.device_info.LOCAL_MEM_SIZE) // 1024
self.compute_capability = (
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MAJOR_NV),
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MINOR_NV)
)
self.deviceName = device.get_info(cl.device_info.NAME)
self.deviceSimpleName = self.deviceName.replace(
'GeForce', '').replace('GTX', '').strip().replace(' ', '').lower()
print('deviceName', self.deviceName, 'compute capability', self.compute_capability)
print('compute units', self.compute_units, 'max shared memory', self.maxShared)
self.shared_memory_per_sm = None
# data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
if self.compute_capability[0] == 5:
if self.compute_capability[1] == 0:
self.shared_memory_per_sm = 65536
elif self.compute_capability[1] == 2:
self.shared_memory_per_sm = 98304
else:
raise Exception('compute capability %s not recognized' % compute_capability)
else:
raise Exception('compute capability %s not recognized' % compute_capability)
assert self.shared_memory_per_sm is not None
| 45.9375 | 105 | 0.666667 | [
"BSD-2-Clause"
] | hughperkins/gpu-experiments | gpuexperiments/deviceinfo.py | 1,470 | Python |
import inspect
import os
import re
import subprocess
from collections import Counter
from io import StringIO
import pandas as pd
from numpy import unique
file_sep = os.path.sep
def imports_in_module(module):
"""
Get a list of strings showing what is imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported objects (modules, functions, variables, classes...)
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
You may want to use ``imports_in_py_content(py_content)`` on the actual string content itself.
# >>> print('\\n'.join(imports_in_module(__file__))) # doctest: +SKIP
# StringIO.StringIO
# collections.Counter
# inspect
# numpy.unique
# os
# pandas
# re
# subprocess
# ut.pfile.iter.get_filepath_iterator
# ut.util.code.packages.get_module_name
# ut.util.code.packages.read_requirements
"""
if not isinstance(module, str):
module = inspect.getfile(module)
if module.endswith('c'):
module = module[:-1] # remove the 'c' of '.pyc'
t = subprocess.check_output(['sfood-imports', '-u', module])
return [x for x in t.split('\n') if len(x) > 0]
def base_modules_used_in_module(module):
"""
Get a list of strings showing what base modules that are imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported base modules (i.e. the X of import X.Y.Z or from X.Y import Z).
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
>>> base_modules_used_in_module(__file__) # doctest: +SKIP
['StringIO', 'collections', 'inspect', 'numpy', 'os', 'pandas', 're', 'subprocess', 'ut']
"""
return list(unique([re.compile('\w+').findall(x)[0] for x in imports_in_module(module)]))
def base_module_imports_in_module_recursive(module):
"""
Get a list of strings showing what base modules that are imported in a module, recursively.
It's the recursive version of the base_modules_used_in_module function.
Recursive in the sense that if module is a package module (i.e. containing a __init__.py and further submodules),
the base_modules_used_in_module function will be applied to all .py files under the mother folder.
Function returns a count (Counter object) of the number of modules where each base module was found.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:param module_names: Modules to filter for.
None: Will grab all modules
A list or tuple: Of modules to grab
If not will assume module_names is a regex to apply to find module names
:return:
"""
# if module_names is None:
# module_names = any_module_import_regex
# elif isinstance(module_names, (tuple, list)):
# module_names = mk_multiple_package_import_regex(module_names)
if inspect.ismodule(module):
module = inspect.getsourcefile(module)
if module.endswith('__init__.py'):
module = os.path.dirname(module)
if os.path.isdir(module):
c = Counter()
it = get_filepath_iterator(module, pattern='.py$')
next(it) # to skip the seed module itself, and not get into an infinite loop
for _module in it:
try:
c.update(base_module_imports_in_module_recursive(_module))
except Exception as e:
if 'sfood-imports' in e.args[1]:
raise RuntimeError("You don't have sfood-imports installed (snakefood), so I can't do my job")
else:
print(("Error with module {}: {}".format(_module, e)))
return c
elif not os.path.isfile(module):
raise ValueError("module file not found: {}".format(module))
return Counter(base_modules_used_in_module(module))
# with open(module) as fp:
# module_contents = fp.read()
# return Counter(map(lambda x: x[1:], unique(module_names.findall(module_contents))))
def requirements_packages_in_module(module, requirements=None):
if requirements is None:
requirements = list(pip_licenses_df(include_module_name=False)['package_name'])
elif isinstance(requirements, str) and os.path.isfile(requirements):
with open(requirements) as fp:
requirements = fp.read().splitlines()
p = re.compile('^[^=]+')
module_names = list()
for x in requirements:
try:
xx = p.findall(x)
if xx:
module_name = get_module_name(xx[0])
module_names.append(module_name)
except Exception as e:
print(("Error with {}\n {}".format(x, e)))
return base_module_imports_in_module_recursive(module, module_names=requirements)
word_or_letter_p = re.compile('\w')
at_least_two_spaces_p = re.compile('\s{2,}')
def pip_licenses_df(package_names=None, include_module_name=True, on_module_search_error=None):
"""
Get a dataframe of pip packages and licences
:return:
"""
pip_licenses_output = subprocess.check_output(['pip-licenses'])
t = list(map(str.strip,
list(filter(word_or_letter_p.search,
pip_licenses_output.split('\n')))))
t = [at_least_two_spaces_p.sub('\t', x) for x in t]
t = '\n'.join(t)
df = pd.read_csv(StringIO(t), sep='\t')
df = df.rename(columns={'Name': 'package_name', 'Version': 'version', 'License': 'license'})
if include_module_name:
df['module'] = [get_module_name(x, on_error=on_module_search_error) for x in df['package_name']]
df = df[['module', 'package_name', 'version', 'license']] # reorder
if package_names is not None:
df = df[df['package_name'].isin(package_names)]
return df
def get_filepath_iterator(root_folder,
pattern='',
return_full_path=True,
apply_pattern_to_full_path=False):
if apply_pattern_to_full_path:
return recursive_file_walk_iterator_with_name_filter(root_folder, pattern, return_full_path)
else:
return recursive_file_walk_iterator_with_filepath_filter(root_folder, pattern, return_full_path)
def iter_relative_files_and_folder(root_folder):
from glob import iglob
if not root_folder.endswith(file_sep):
root_folder += file_sep
return map(lambda x: x.replace(root_folder, ''), iglob(root_folder + '*'))
def pattern_filter(pattern):
pattern = re.compile(pattern)
def _pattern_filter(s):
return pattern.search(s) is not None
return _pattern_filter
def recursive_file_walk_iterator_with_name_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
# if isinstance(pattern, basestring):
# pattern = re.compile(pattern)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_name_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(name):
if return_full_path:
yield full_path
else:
yield name
def recursive_file_walk_iterator_with_filepath_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_filepath_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(full_path):
if return_full_path:
yield full_path
else:
yield name
| 38.638889 | 117 | 0.659358 | [
"Apache-2.0"
] | thorwhalen/tec | tec/snake_food_import_counting.py | 8,346 | Python |
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# -----------------------------------------------------------------------------
# System
# -----------------------------------------------------------------------------
_C.SYSTEM = CN()
_C.SYSTEM.NUM_GPUS = 4
_C.SYSTEM.NUM_CPUS = 4
# -----------------------------------------------------------------------------
# Model
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Model architectures defined in the package: unet_super, super, fpn, unet_residual_3d
_C.MODEL.ARCHITECTURE = 'unet_residual_3d'
# Number of filters per unet block
_C.MODEL.FILTERS = [28, 36, 48, 64, 80]
_C.MODEL.TARGET_OPT = ['0']
_C.MODEL.WEIGHT_OPT = [['1']]
# Choose the right loss function for each target:
# 'WeightedMSE', 'WeightedBCE', 'JaccardLoss', 'DiceLoss'
_C.MODEL.LOSS_OPTION = [['WeightedBCE']]
# Weight for each loss function
_C.MODEL.LOSS_WEIGHT = [[1.0]]
# Define the number of input channels. Usually EM images are
# single-channel gray-scale image.
_C.MODEL.IN_PLANES = 1
# Define the number of output channels.
_C.MODEL.OUT_PLANES = 1
# Padding mode, possible options: 'zeros','circular', 'rep'
_C.MODEL.PAD_MODE = 'rep'
# Normalization mode, possible options: 'bn', 'abn', 'in', 'bin'
_C.MODEL.NORM_MODE = 'bn'
# Activation mode, possible options: 'relu', 'elu', 'leaky'
_C.MODEL.ACT_MODE = 'elu'
# If MODEL.EMBEDDING = 1 will do embedding
_C.MODEL.EMBEDDING = 1
# Last decoder head depth
_C.MODEL.HEAD_DEPTH = 1
_C.MODEL.INPUT_SIZE = [8, 256, 256]
_C.MODEL.OUTPUT_SIZE = [8, 256, 256]
_C.MODEL.REGU_OPT = []
_C.MODEL.REGU_WEIGHT = []
# Fine-tune suffix for model saving
_C.MODEL.FINETUNE = ''
# Exact matching: the weights shape in pretrain model and current model are identical
_C.MODEL.EXACT = True
_C.MODEL.SIZE_MATCH = True
_C.MODEL.PRE_MODEL = ''
_C.MODEL.PRE_MODEL_LAYER = ['']
_C.MODEL.PRE_MODEL_ITER = 0
_C.MODEL.PRE_MODEL_LAYER_SELECT = [-1]
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
# Scale ratio of the input data for different resolutions.
# Using a DATA_SCALE of [1., 0.5, 0.5] will downsample the
# original image by two times (e.g., 4nm -> 8nm).
_C.DATASET.DATA_SCALE = [1., 1., 1.]
# Scaling factor for super resolution
_C.DATASET.SCALE_FACTOR = [2, 3, 3]
# Specify the data path in the *.yaml files for different experiments.
_C.DATASET.IMAGE_NAME = ''
_C.DATASET.LABEL_NAME = ''
_C.DATASET.INPUT_PATH = ''
_C.DATASET.OUTPUT_PATH = ''
# Padding size for the input volumes
_C.DATASET.PAD_SIZE = [2, 64, 64]
# Half Patch size for 2D label erosion
_C.DATASET.LABEL_EROSION = 0
# If it's a binary label
_C.DATASET.LABEL_BINARY = False
_C.DATASET.LABEL_MAG = 0
# Data in tile format or not.
_C.DATASET.DO_CHUNK_TITLE = 0
# Chunk parameters for tile format: chunk_num (z,y,x), chunk_stride
_C.DATASET.DATA_CHUNK_NUM = [1, 1, 1]
# Predefined data chunk to iterate through
_C.DATASET.DATA_CHUNK_NUM_IND = []
# Boolean variable, euqal to 'int(args.data_chunk_num[-1:])==1'
_C.DATASET.DATA_CHUNK_STRIDE = True
# Chunk parameters for tile format: chunk_iter_num
_C.DATASET.DATA_CHUNK_ITER = 1000
# Number of voxel to exceed for a valid sample
_C.DATASET.DATA_INVALID_THRES = [0., 0.]
_C.DATASET.PRE_LOAD_DATA = [None,None,None]
# Reject sampling
_C.DATASET.REJECT_SIZE_THRES = 100
_C.DATASET.REJECT_P = 0.95
# -----------------------------------------------------------------------------
# Augmentor
# -----------------------------------------------------------------------------
_C.AUGMENTOR = CN()
_C.AUGMENTOR.ROTATE = True
# Probability of applying the rotation augmentation
_C.AUGMENTOR.ROTATE_P = 0.1
_C.AUGMENTOR.RESCALE = True
# Probability of applying the rescale augmentation
_C.AUGMENTOR.RESCALE_P = 0.5
_C.AUGMENTOR.FLIP = True
# Probability of applying the flip augmentation
_C.AUGMENTOR.FLIP_P = 1.0
# Conducting x-z and y-z flip only when the dataset is isotropic.
_C.AUGMENTOR.FLIP_DO_ZTRANS = 0
_C.AUGMENTOR.ELASTIC = True
# Maximum pixel-moving distance of elastic transformation
_C.AUGMENTOR.ELASTIC_ALPHA = 12.0
# Standard deviation of the Gaussian filter
_C.AUGMENTOR.ELASTIC_SIGMA = 4.0
# Probability of applying the elastic augmentation
_C.AUGMENTOR.ELASTIC_P = 0.75
_C.AUGMENTOR.GRAYSCALE = True
# Probability of applying the grayscale augmentation
_C.AUGMENTOR.GRAYSCALE_P = 0.75
_C.AUGMENTOR.MISSINGPARTS = True
# Probability of applying the missingparts augmentation
_C.AUGMENTOR.MISSINGPARTS_P = 0.9
_C.AUGMENTOR.MISSINGSECTION = True
# Probability of applying the missingsection augmentation
_C.AUGMENTOR.MISSINGSECTION_P = 0.5
_C.AUGMENTOR.MISALIGNMENT = True
# Probability of applying the misalignment augmentation
_C.AUGMENTOR.MISALIGNMENT_P = 1.0
# Maximum pixel displacement in each direction (x and y) (int)
_C.AUGMENTOR.MISALIGNMENT_DISPLACEMENT = 16
# -----------------------------------------------------------------------------
# Solver
# -----------------------------------------------------------------------------
_C.SOLVER = CN()
# Specify the learning rate scheduler.
_C.SOLVER.LR_SCHEDULER_NAME = "MultiStepLR"
_C.SOLVER.ITERATION_STEP = 1
_C.SOLVER.ITERATION_SAVE = 5000
_C.SOLVER.ITERATION_TOTAL = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 1.0
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.0
_C.SOLVER.MOMENTUM = 0.9
# The weight decay that's applied to parameters of normalization layers
# (typically the affine transformation)
_C.SOLVER.WEIGHT_DECAY = 0.0001
_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
# The iteration number to decrease learning rate by GAMMA
_C.SOLVER.GAMMA = 0.1
# should be a tuple like (30000,)
_C.SOLVER.STEPS = (30000, 35000)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
_C.SOLVER.WARMUP_ITERS = 1000
_C.SOLVER.WARMUP_METHOD = "linear"
# Save a checkpoint after every this number of iterations
_C.SOLVER.CHECKPOINT_PERIOD = 5000
# Number of samples per batch across all machines.
# If we have 16 GPUs and IMS_PER_BATCH = 32,
# each GPU will see 2 images per batch.
_C.SOLVER.SAMPLES_PER_BATCH = 16
# -----------------------------------------------------------------------------
# Monitor
# -----------------------------------------------------------------------------
_C.MONITOR = CN()
_C.MONITOR.LOG_OPT = [1, 1, 0]
_C.MONITOR.VIS_OPT = [0, 8]
_C.MONITOR.ITERATION_NUM = [10, 50]
# # -----------------------------------------------------------------------------
# # Inference
# # -----------------------------------------------------------------------------
_C.INFERENCE = CN()
_C.INFERENCE.INPUT_SIZE = [8, 256, 256]
_C.INFERENCE.OUTPUT_SIZE = [8, 256, 256]
_C.INFERENCE.IMAGE_NAME = ''
_C.INFERENCE.OUTPUT_PATH = ''
_C.INFERENCE.OUTPUT_NAME = 'result.h5'
_C.INFERENCE.PAD_SIZE = [8, 64, 64]
_C.INFERENCE.STRIDE = [1, 192, 192]
_C.INFERENCE.AUG_MODE = 'mean'
_C.INFERENCE.AUG_NUM = 4
_C.INFERENCE.DO_EVAL = True
_C.INFERENCE.DO_3D = True
# If not None then select channel of output
_C.INFERENCE.MODEL_OUTPUT_ID = [None]
# Number of test workers
_C.INFERENCE.TEST_NUM = 1
# Test worker id
_C.INFERENCE.TEST_ID = 0
# Batchsize for inference
_C.INFERENCE.SAMPLES_PER_BATCH = 32
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
def save_all_cfg(cfg, output_dir):
"""Save configs in the output directory."""
# Save config.yaml in the experiment directory after combine all
# non-default configurations from yaml file and command line.
path = os.path.join(output_dir, "config.yaml")
with open(path, "w") as f:
f.write(cfg.dump())
print("Full config saved to {}".format(path))
| 25.639241 | 86 | 0.619477 | [
"MIT"
] | divyam-goel/pytorch_connectomics | connectomics/config/config.py | 8,102 | Python |
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
| 30.618005 | 88 | 0.54458 | [
"BSD-3-Clause"
] | diofant/diofant | diofant/polys/numberfields.py | 25,170 | Python |
print('\033[1;33m--' * 10)
print('\033[1;32m EXERCÍCIO 002')
print('\033[1;33m--\033[m' * 10)
nome = input('\033[1;34mDigite seu nome: ')
print(f'É um prazer te conhecer, \033[1;33m{nome}!')
| 31.833333 | 52 | 0.633508 | [
"MIT"
] | Dobravoski/Exercicios-Python | Exercicios/ex002.py | 193 | Python |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag,Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Tag.objects.create(user=user2, name='Tasty')
tag = Tag.objects.create(user=self.user, name='Just Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Simple'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user,
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 31.555556 | 71 | 0.629527 | [
"MIT"
] | deborahoni/recipe-app-api | app/recipe/tests/test_tags_api.py | 3,976 | Python |
from pipeline import app
if __name__ == "__main__":
app.run(debug=True)
| 15.4 | 26 | 0.701299 | [
"Apache-2.0"
] | HeqetLabs/pipeline | debug.py | 77 | Python |
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class InvalidSurveyColumnsTests(PyxformTestCase):
def test_missing_name(self):
"""
every question needs a name (or alias of name)
"""
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'type': 'text',
'label': 'label'}]},
errored=True,
error__contains=['no name'],
)
def test_missing_name_but_has_alias_of_name(self):
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'value': 'q1',
'type': 'text',
'label': 'label'}]},
errored=False,
)
def test_missing_label(self):
self.assertPyxformXform(
name="invalidcols",
ss_structure={'survey': [{'type': 'text',
'name': 'q1'}]},
errored=True,
error__contains=['no label or hint'],
)
def test_column_case(self):
"""
Ensure that column name is case insensitive
"""
self.assertPyxformXform(
name="mixedcasecolumns",
md="""
| Survey | | | |
| | Type | name | Label |
| | text | Name | the name |
| | integer | age | the age |
| | text | gender | the gender |
""",
errored=False,
debug=True
)
class InvalidChoiceSheetColumnsTests(PyxformTestCase):
def _simple_choice_ss(self, choice_sheet=None):
if choice_sheet is None:
choice_sheet = []
return {'survey': [{'type': 'select_one l1',
'name': 'l1choice',
'label': 'select one from list l1'}],
'choices': choice_sheet}
def test_valid_choices_sheet_passes(self):
self.assertPyxformXform(
name='valid_choices',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'name': 'c1',
'label': 'choice 1'},
{'list_name': 'l1',
'name': 'c2',
'label': 'choice 2'}]),
errored=False,
)
def test_invalid_choices_sheet_fails(self):
self.assertPyxformXform(
name='missing_name',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'label': 'choice 1'},
{'list_name': 'l1',
'label': 'choice 2'},
]),
errored=True,
error__contains=['option with no name'],
)
def test_missing_list_name(self):
self.assertPyxformXform(
name='missing_list_name',
ss_structure=self._simple_choice_ss([
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 1'},
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 2'},
]),
debug=True,
errored=True,
# some basic keywords that should be in the error:
error__contains=[
'choices',
'name',
'list name',
])
class AliasesTests(PyxformTestCase):
def test_value_and_name(self):
'''
confirm that both 'name' and 'value' columns of choice list work
'''
for name_alias in ['name', 'value']:
self.assertPyxformXform(
name="aliases",
md="""
| survey | | | |
| | type | name | label |
| | select_one yn | q1 | Question 1 |
| choices | | | |
| | list name | %(name_alias)s | label |
| | yn | yes | Yes |
| | yn | no | No |
""" % ({
u'name_alias': name_alias
}),
instance__contains=[
'<q1/>',
],
model__contains=[
'<bind nodeset="/aliases/q1" type="select1"/>',
],
xml__contains=[
'<select1 ref="/aliases/q1">',
'<value>yes</value>',
'<value>no</value>',
'</select1>',
])
''' # uncomment when re-implemented
# TODO: test that this fails for the correct reason
def test_conflicting_aliased_values_raises_error(self):
# example:
# an xlsform has {"name": "q_name", "value": "q_value"}
# should not compile because "name" and "value" columns are aliases
self.assertPyxformXform(
# debug=True,
name="aliases",
md="""
| survey | | | | |
| | type | name | value | label |
| | text | q_name | q_value | Question 1 |
""",
errored=True,
)
'''
| 35.367089 | 75 | 0.411059 | [
"BSD-2-Clause"
] | medic/pyxform | pyxform/tests_v1/test_sheet_columns.py | 5,588 | Python |
from mayan.apps.documents.tests.base import GenericDocumentViewTestCase
from mayan.apps.testing.tests.base import GenericViewTestCase
from ..events import event_workflow_template_edited
from ..models import WorkflowTransition
from ..permissions import (
permission_workflow_template_edit, permission_workflow_template_view
)
from .literals import TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL
from .mixins.workflow_instance_mixins import WorkflowInstanceViewTestMixin
from .mixins.workflow_template_mixins import (
WorkflowTemplateTestMixin, WorkflowTemplateViewTestMixin
)
from .mixins.workflow_template_transition_mixins import (
WorkflowTransitionEventViewTestMixin, WorkflowTransitionFieldTestMixin,
WorkflowTransitionFieldViewTestMixin, WorkflowTransitionViewTestMixin
)
class WorkflowTransitionViewTestCase(
WorkflowTemplateTestMixin, WorkflowTemplateViewTestMixin,
WorkflowTransitionViewTestMixin, GenericViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_workflow_template()
self._create_test_workflow_template_state()
self._create_test_workflow_template_state()
def test_workflow_template_transition_create_view_no_permission(self):
self._clear_events()
response = self._request_test_workflow_template_transition_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(WorkflowTransition.objects.count(), 0)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_create_view_with_access(self):
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_transition_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(WorkflowTransition.objects.count(), 1)
self.assertEqual(
WorkflowTransition.objects.all()[0].label,
TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL
)
self.assertEqual(
WorkflowTransition.objects.all()[0].origin_state,
self.test_workflow_template_states[0]
)
self.assertEqual(
WorkflowTransition.objects.all()[0].destination_state,
self.test_workflow_template_states[1]
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_workflow_template_transition
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_template_transition_delete_view_no_permission(self):
self._create_test_workflow_template_transition()
self._clear_events()
response = self._request_test_workflow_template_transition_delete_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_workflow_template_transition in WorkflowTransition.objects.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_delete_view_with_access(self):
self._create_test_workflow_template_transition()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_transition_delete_view()
self.assertEqual(response.status_code, 302)
self.assertFalse(
self.test_workflow_template_transition in WorkflowTransition.objects.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_template_transition_edit_view_no_permission(self):
self._create_test_workflow_template_transition()
test_workflow_template_transition_label = self.test_workflow_template_transition.label
self._clear_events()
response = self._request_test_workflow_template_transition_edit_view()
self.assertEqual(response.status_code, 404)
self.test_workflow_template_transition.refresh_from_db()
self.assertEqual(
self.test_workflow_template_transition.label,
test_workflow_template_transition_label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_edit_view_with_access(self):
self._create_test_workflow_template_transition()
test_workflow_template_transition_label = self.test_workflow_template_transition.label
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_transition_edit_view()
self.assertEqual(response.status_code, 302)
self.test_workflow_template_transition.refresh_from_db()
self.assertNotEqual(
self.test_workflow_template_transition.label,
test_workflow_template_transition_label
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_workflow_template_transition
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_template_transition_list_view_no_permission(self):
self._create_test_workflow_template_transition()
self._clear_events()
response = self._request_test_workflow_template_transition_list_view()
self.assertNotContains(
response=response,
text=self.test_workflow_template_transition.label,
status_code=404
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_list_view_with_access(self):
self._create_test_workflow_template_transition()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_view
)
self._clear_events()
response = self._request_test_workflow_template_transition_list_view()
self.assertContains(
response=response,
text=self.test_workflow_template_transition.label,
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class WorkflowTransitionEventViewTestCase(
WorkflowInstanceViewTestMixin, WorkflowTemplateTestMixin,
WorkflowTransitionEventViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self._create_test_workflow_template()
self.test_workflow_template.document_types.add(
self.test_document_type
)
self._create_test_workflow_template_state()
self._create_test_workflow_template_state()
self._create_test_workflow_template_transition()
self._create_test_workflow_template_transition()
self._create_test_document_stub()
self.test_workflow_instance = self.test_document.workflows.first()
def test_workflow_template_transition_event_list_view_no_permission(self):
self._clear_events()
response = self._request_test_workflow_template_transition_event_list_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_event_list_view_with_access(self):
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_transition_event_list_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class WorkflowTransitionFieldViewTestCase(
WorkflowTemplateTestMixin, WorkflowTransitionFieldTestMixin,
WorkflowTransitionFieldViewTestMixin, WorkflowTransitionViewTestMixin,
GenericViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_workflow_template()
self._create_test_workflow_template_state()
self._create_test_workflow_template_state()
self._create_test_workflow_template_transition()
def test_workflow_template_transition_field_create_view_no_permission(self):
workflow_template_transition_field_count = self.test_workflow_template_transition.fields.count()
self._clear_events()
response = self._request_workflow_template_transition_field_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_workflow_template_transition.fields.count(),
workflow_template_transition_field_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_field_create_view_with_access(self):
workflow_template_transition_field_count = self.test_workflow_template_transition.fields.count()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_workflow_template_transition_field_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_workflow_template_transition.fields.count(),
workflow_template_transition_field_count + 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object,
self.test_workflow_template_transition_field
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_template_transition_field_delete_view_no_permission(self):
self._create_test_workflow_template_transition_field()
workflow_template_transition_field_count = self.test_workflow_template_transition.fields.count()
self._clear_events()
response = self._request_workflow_template_transition_field_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_workflow_template_transition.fields.count(),
workflow_template_transition_field_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_field_delete_view_with_access(self):
self._create_test_workflow_template_transition_field()
workflow_template_transition_field_count = self.test_workflow_template_transition.fields.count()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_workflow_template_transition_field_delete_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_workflow_template_transition.fields.count(),
workflow_template_transition_field_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_template_transition_field_edit_view_no_permission(self):
self._create_test_workflow_template_transition_field()
workflow_template_transition_field_label = self.test_workflow_template_transition_field.label
self._clear_events()
response = self._request_workflow_template_transition_field_edit_view()
self.assertEqual(response.status_code, 404)
self.test_workflow_template_transition_field.refresh_from_db()
self.assertEqual(
workflow_template_transition_field_label,
self.test_workflow_template_transition_field.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_field_edit_view_with_access(self):
self._create_test_workflow_template_transition_field()
workflow_template_transition_field_label = self.test_workflow_template_transition_field.label
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_workflow_template_transition_field_edit_view()
self.assertEqual(response.status_code, 302)
self.test_workflow_template_transition_field.refresh_from_db()
self.assertNotEqual(
workflow_template_transition_field_label,
self.test_workflow_template_transition_field.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object,
self.test_workflow_template_transition_field
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_template_transition_field_list_view_no_permission(self):
self._create_test_workflow_template_transition_field()
self._clear_events()
response = self._request_test_workflow_template_transition_field_list_view()
self.assertNotContains(
response=response,
text=self.test_workflow_template_transition_field.label,
status_code=404
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_template_transition_field_list_view_with_access(self):
self._create_test_workflow_template_transition_field()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_transition_field_list_view()
self.assertContains(
response=response,
text=self.test_workflow_template_transition_field.label,
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
| 38.21327 | 105 | 0.708483 | [
"Apache-2.0"
] | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | mayan/apps/document_states/tests/test_workflow_transition_views.py | 16,126 | Python |
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from {{cookiecutter.app_name}}.config import app_config
from {{cookiecutter.app_name}}.models import db, bcrypt
from {{cookiecutter.app_name}}.resources import Login, Register
from {{cookiecutter.app_name}}.schemas import ma
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
CORS(app)
app.config.from_object(app_config[env_name])
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
ma.init_app(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
# Route
api = Api(app)
# user endpoint
api.add_resource(Login, '/auth/login')
api.add_resource(Register, '/auth/register')
return app
| 22.930233 | 63 | 0.704868 | [
"MIT"
] | opatua/cookiecutter-flask-api | {{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | 986 | Python |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
from fairseq import distributed_utils
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger('fairseq_cli.eval_lm')
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args, **unused_kwargs):
assert parsed_args.path is not None, '--path required for evaluation!'
if torch.cuda.is_available() and not parsed_args.cpu:
torch.cuda.set_device(parsed_args.device_id)
utils.import_user_module(parsed_args)
logger.info(parsed_args)
if parsed_args.ipex:
import intel_pytorch_extension as ipex
if args.dnnl:
ipex.core.enable_auto_dnnl()
else:
ipex.core.disable_auto_dnnl()
if args.mix_precision:
ipex.core.enable_mix_bf16_fp32()
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
logger.info('loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(os.pathsep),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
suffix=getattr(parsed_args, "checkpoint_suffix", ""),
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=task.source_dictionary.pad(),
)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
if args.ipex:
model = model.to(device = ipex.DEVICE)
assert len(models) > 0
logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.move_to_ipex(sample) if args.ipex else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
logger.info(
'skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()])
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(
gen_timer.n, gen_timer.sum, 1. / gen_timer.avg
))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(
avg_nll_loss, 2**avg_nll_loss
))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
| 34.119403 | 112 | 0.585302 | [
"MIT"
] | liangan1/fairseq | fairseq_cli/eval_lm.py | 9,144 | Python |
BACKSLASH = '\\'
class MiniString(object):
"""
Create a representation of a string object
:param str string: The string to minify
"""
def __init__(self, string, quote="'"):
self._s = string
self.safe_mode = False
self.quote = quote
def __str__(self):
"""
The smallest python literal representation of a string
:rtype: str
"""
if self._s == '':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
try:
eval(self.quote + s + self.quote)
except UnicodeDecodeError:
if self._safe_mode:
raise
self._safe_mode = True
assert eval(self.quote + s + self.quote) == self._s
return s
def to_short(self):
s = ''
escaped = {
'\n': BACKSLASH + 'n',
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote: BACKSLASH + self.quote,
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
def to_long(self):
s = ''
escaped = {
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote[0]: BACKSLASH + self.quote[0],
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
class MiniBytes(object):
"""
Create a representation of a bytes object
:param bytes string: The string to minify
"""
def __init__(self, string, quote="'"):
self._b = string
self.quote = quote
def __str__(self):
"""
The smallest python literal representation of a string
:rtype: str
"""
if self._b == b'':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
assert eval('b' + self.quote + s + self.quote) == self._b
return s
def to_short(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == b'\n':
b += BACKSLASH + 'n'
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
def to_long(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
| 24.017045 | 75 | 0.399574 | [
"MIT"
] | clbarnes/python-minifier | src/python_minifier/ministring.py | 4,227 | Python |
from django.test import TestCase
from django.urls import reverse
from accounts.factories import UserFactory
from transmittals.factories import create_transmittal
ack_button = '<a id="action-ack-transmittal"'
class TransmittalActionTests(TestCase):
def setUp(self):
self.trs = create_transmittal()
self.doc = self.trs.document
self.category = self.doc.category
self.url = reverse(
"document_detail",
args=[
self.category.organisation.slug,
self.category.slug,
self.doc.document_key,
],
)
self.user = UserFactory(
name="User", password="pass", is_superuser=True, category=self.category
)
self.client.login(username=self.user.email, password="pass")
def test_internal_user_cannot_ack_transmittal(self):
self.assertIsNone(self.trs.ack_of_receipt_date)
self.assertFalse(self.user.is_external)
res = self.client.get(self.url)
self.assertNotContains(res, ack_button)
def test_external_user_can_ack_transmittal(self):
self.user.is_external = True
self.user.save()
res = self.client.get(self.url)
self.assertContains(res, ack_button)
def test_transmittal_cannot_be_acked_twice(self):
self.user.is_external = True
self.trs.ack_receipt(self.user)
self.assertIsNotNone(self.trs.ack_of_receipt_date)
res = self.client.get(self.url)
self.assertNotContains(res, ack_button)
| 30.411765 | 83 | 0.661509 | [
"MIT"
] | PhaseDMS/phase | src/transmittals/tests/test_templates.py | 1,551 | Python |
def path_hack():
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
# print('path added:', sys.path[0])
path_hack()
import traceback
import sys
import urllib.request
from urllib.request import urlopen
import json
from apis import utilities
try:
from apis import my_token
API_TUTOR_TOKEN = my_token.API_TUTOR_TOKEN
except:
title = 'IMPORTANT: You Need an Access Token!'
error_message = '\n\n\n' + '*' * len(title) + '\n' + \
title + '\n' + '*' * len(title) + \
'\nPlease download the the my_token.py file and save it in your apis directory.\n\n'
raise Exception(error_message)
def get_token(url):
try:
response = urlopen(url + '?auth_manager_token=' + API_TUTOR_TOKEN)
data = response.read()
results = data.decode('utf-8', 'ignore')
return json.loads(results)['token']
except urllib.error.HTTPError as e:
# give a good error message:
error = utilities.get_error_message(e, url)
raise Exception(error) | 30.289474 | 92 | 0.667246 | [
"MIT"
] | n-ryan/spotify-genius | apis/authentication.py | 1,151 | Python |
"""QuizSubmissionFiles API Tests for Version 1.0.
This is a testing template for the generated QuizSubmissionFilesAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.quiz_submission_files import QuizSubmissionFilesAPI
class TestQuizSubmissionFilesAPI(unittest.TestCase):
"""Tests for the QuizSubmissionFilesAPI."""
def setUp(self):
self.client = QuizSubmissionFilesAPI(
secrets.instance_address, secrets.access_token
)
def test_upload_file(self):
"""Integration test for the QuizSubmissionFilesAPI.upload_file method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
| 32.347826 | 126 | 0.752688 | [
"MIT"
] | tylerclair/py3canvas | py3canvas/tests/quiz_submission_files.py | 744 | Python |
from __future__ import absolute_import
import pickle
from kombu.utils.functional import lazy
from celery.five import THREAD_TIMEOUT_MAX, items, range, nextfun
from celery.utils.functional import (
LRUCache,
firstmethod,
first,
mlazy,
padlist,
maybe_list,
)
from celery.tests.case import Case
class test_LRUCache(Case):
def test_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x[i] = i
self.assertListEqual(list(x.keys()), list(slots[limit:]))
self.assertTrue(x.items())
self.assertTrue(x.values())
def test_is_pickleable(self):
x = LRUCache(limit=10)
x.update(luke=1, leia=2)
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.limit, y.limit)
self.assertEqual(y, x)
def test_update_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x.update({i: i})
self.assertListEqual(list(x.keys()), list(slots[limit:]))
def test_least_recently_used(self):
x = LRUCache(3)
x[1], x[2], x[3] = 1, 2, 3
self.assertEqual(list(x.keys()), [1, 2, 3])
x[4], x[5] = 4, 5
self.assertEqual(list(x.keys()), [3, 4, 5])
# access 3, which makes it the last used key.
x[3]
x[6] = 6
self.assertEqual(list(x.keys()), [5, 3, 6])
x[7] = 7
self.assertEqual(list(x.keys()), [3, 6, 7])
def assertSafeIter(self, method, interval=0.01, size=10000):
from threading import Thread, Event
from time import sleep
x = LRUCache(size)
x.update(zip(range(size), range(size)))
class Burglar(Thread):
def __init__(self, cache):
self.cache = cache
self.__is_shutdown = Event()
self.__is_stopped = Event()
Thread.__init__(self)
def run(self):
while not self.__is_shutdown.isSet():
try:
self.cache.data.popitem(last=False)
except KeyError:
break
self.__is_stopped.set()
def stop(self):
self.__is_shutdown.set()
self.__is_stopped.wait()
self.join(THREAD_TIMEOUT_MAX)
burglar = Burglar(x)
burglar.start()
try:
for _ in getattr(x, method)():
sleep(0.0001)
finally:
burglar.stop()
def test_safe_to_remove_while_iteritems(self):
self.assertSafeIter('iteritems')
def test_safe_to_remove_while_keys(self):
self.assertSafeIter('keys')
def test_safe_to_remove_while_itervalues(self):
self.assertSafeIter('itervalues')
def test_items(self):
c = LRUCache()
c.update(a=1, b=2, c=3)
self.assertTrue(list(items(c)))
class test_utils(Case):
def test_padlist(self):
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 3),
['George', 'Costanza', 'NYC'],
)
self.assertListEqual(
padlist(['George', 'Costanza'], 3),
['George', 'Costanza', None],
)
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 4, default='Earth'),
['George', 'Costanza', 'NYC', 'Earth'],
)
def test_firstmethod_AttributeError(self):
self.assertIsNone(firstmethod('foo')([object()]))
def test_firstmethod_handles_lazy(self):
class A(object):
def __init__(self, value=None):
self.value = value
def m(self):
return self.value
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), A('four'), A('five')]))
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), lazy(lambda: A('four')), A('five')]))
def test_first(self):
iterations = [0]
def predicate(value):
iterations[0] += 1
if value == 5:
return True
return False
self.assertEqual(5, first(predicate, range(10)))
self.assertEqual(iterations[0], 6)
iterations[0] = 0
self.assertIsNone(first(predicate, range(10, 20)))
self.assertEqual(iterations[0], 10)
def test_maybe_list(self):
self.assertEqual(maybe_list(1), [1])
self.assertEqual(maybe_list([1]), [1])
self.assertIsNone(maybe_list(None))
class test_mlazy(Case):
def test_is_memoized(self):
it = iter(range(20, 30))
p = mlazy(nextfun(it))
self.assertEqual(p(), 20)
self.assertTrue(p.evaluated)
self.assertEqual(p(), 20)
self.assertEqual(repr(p), '20')
| 27.082873 | 71 | 0.5459 | [
"Apache-2.0"
] | suntao789/Aclsm | site-packages/celery/tests/utils/test_functional.py | 4,902 | Python |
import pytest
import responses
def test_url(client):
assert client.url == "http://0.0.0.0:8580/jsonrpc"
@pytest.mark.parametrize('method', ['status', 'poll', 'kill', 'cli_args'])
def test_default_request(client, method):
expected = {
"jsonrpc": client.jsonrpc_version,
"method": method,
"params": {}
}
resp = client._default_request(method=method)
assert resp["jsonrpc"] == expected["jsonrpc"]
assert resp["method"] == expected["method"]
assert resp["params"] == expected["params"]
def test_selection(client):
expected = {
"models": "@model_1 +model_2+ model_3+",
"select": "snapshot_1 snapshot_2 snapshot_3",
"exclude": "model_4+"
}
data = client._selection(models=["@model_1", "+model_2+", "model_3+", "model_3+"],
select=["snapshot_1", "snapshot_2", "snapshot_3"],
exclude=["model_4+"])
assert set(data["models"].split(' ')) == set(expected["models"].split(' '))
assert set(data["select"].split(' ')) == set(expected["select"].split(' '))
assert set(data["exclude"].split(' ')) == set(expected["exclude"].split(' '))
def test_status(client):
with responses.RequestsMock() as rsps:
expected = {
"result": {
"status": "ready",
"error": "null",
"logs": [],
"timestamp": "2019-10-07T16:30:09.875534Z",
"pid": 76715
},
"id": "2db9a2fe-9a39-41ef-828c-25e04dd6b07d",
"jsonrpc": client.jsonrpc_version
}
rsps.add(responses.POST, client.url,
json=expected, status=202)
resp = client.status()
assert resp.json() == expected
def test_poll(client):
with responses.RequestsMock() as rsps:
expected = {
"result": {
"results": [],
"generated_at": "2019-10-11T18:25:22.477203Z",
"elapsed_time": 0.8381369113922119,
"logs": [],
"tags": {
"command": "run --models my_model",
"branch": "abc123"
},
"status": "success"
},
"id": "2db9a2fe-9a39-41ef-828c-25e04dd6b07d",
"jsonrpc": client.jsonrpc_version
}
rsps.add(responses.POST, client.url,
json=expected, status=202)
resp = client.poll(request_token="f86926fa-6535-4891-8d24-2cfc65d2a347")
assert resp.json() == expected
| 32.607595 | 86 | 0.527562 | [
"MIT"
] | goodeggs/dbt-rpc-client | tests/test_client.py | 2,576 | Python |
from config.configure import Configure
conf = Configure()
conf.model_name = 'vgg16.h5'
conf.classes = ['no_breads', 'breads']
conf.no_breads_path = './dataset/data/pool/no_breads/*'
conf.breads_path = './dataset/data/pool/breads/*'
# conf.baked_breads_path = './dataset/data/pool/breads/*'
conf.lr = 1e-4
conf.momentum = 0.9
conf.batch_size = 20
conf.epochs = 20
conf.image_size = 224
| 24.25 | 57 | 0.726804 | [
"MIT"
] | jphacks/FK_1908 | server/recognition/config/__init__.py | 388 | Python |
import copy
import json
from abc import ABC
from datetime import datetime
from typing import Any
from cyber_sdk.util.converter import to_isoformat
def to_data(x: Any) -> Any:
if "to_data" in dir(x):
return x.to_data()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_data(x)
return x
def to_amino(x: Any) -> Any:
if "to_amino" in dir(x):
return x.to_amino()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_amino(x)
if isinstance(x, int):
return str(x)
if isinstance(x, datetime):
return to_isoformat(x)
def dict_to_amino(d: dict):
return {key: to_amino(d[key]) for key in d}
def dict_to_data(d: dict) -> dict:
"""Recursively calls to_data on dict"""
return {key: to_data(d[key]) for key in d}
class JSONSerializable(ABC):
def to_data(self) -> Any:
"""Converts the object to its JSON-serializable Python data representation."""
return dict_to_data(copy.deepcopy(self.__dict__))
def to_json(self) -> str:
"""Marshals the object into a stringified JSON serialization. Keys are first sorted
and the JSON rendered removes all unnecessary whitespace.
Returns:
str: JSON string representation
"""
return json.dumps(self.to_data(), sort_keys=True, separators=(",", ":"))
| 26.436364 | 91 | 0.644429 | [
"MIT"
] | SaveTheAles/cyber.py | cyber_sdk/util/json.py | 1,454 | Python |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
lines = open("for_james.csv").read().splitlines()
data = [[float(x) for x in lines[i].split(", ")] for i in range(len(lines))]
# each item in data is a list of floats that can be passed to plt.hist
for i in range(9):
plt.hist(data[i], bins=np.logspace(1, 3, 20))
plt.title(f'Precipitating Energy Distribution at t = {i+0.5} sec')
plt.xscale("log"); plt.yscale("log"); plt.xlabel('Energy (KeV)'); plt.ylabel('Number of Particles')
plt.ylim(10,600); plt.xlim(10,1000)
plt.savefig(f'results/plots/preciphist{i}.png')
plt.clf() | 40.1875 | 103 | 0.682737 | [
"MIT"
] | ethantsai/nlwhistlers | dataPlotter.py | 643 | Python |
#! /usr/bin/python
#
# This is the answer code for the course "Learning from Data" on edX.org
# https://www.edx.org/course/caltechx/cs1156x/learning-data/1120
#
# The software is intended for course usage, no guarantee whatsoever.
# Date: 10/4/2013
# Created by: kirbs
# See notes at bottom for further details.
import sys
import os
import random
import pylab
import scipy
import numpy as np
#############################################################################
#############################################################################
# Returns a list of points with y (indicating 1/-1) as the last element
# and the x,y coordinates for the two points separating line.
# Returns a list of points; each point is a list in the following format.
# [x0, x1, x2, y] i.e. [dummy 1 to represent threshold, x1 value, x2 value, sample points correct sign (+1/-1)]
def generatePoints(numberOfPoints):
## random.seed(1) # used for testing
x1 = random.uniform(-1, 1)
y1 = random.uniform(-1, 1)
x2 = random.uniform(-1, 1)
y2 = random.uniform(-1, 1)
points = []
for i in range (0,numberOfPoints - 1):
## random.seed(1) # used for testing
x = random.uniform (-1, 1)
y = random.uniform (-1, 1)
points.append([1, x, y, targetFunction(x1, y1, x2, y2, x, y)]) # add 1/-1 indicator to the end of each point list
return x1, y1, x2, y2, points
# This function determines the cross product between a line and a given point.
# Returns 1 if above the line and -1 if below the line.
def targetFunction(x1,y1,x2,y2,x3,y3):
u = (x2-x1)*(y3-y1) - (y2-y1)*(x3-x1)
if u >= 0:
return 1
elif u < 0:
return -1
# Simple sign function
def sign(y):
if y >= 0:
return 1
elif y < 0:
return -1
# a.k.a dot product
def perceptronCalc(x, w):
return x[0]*w[0] + x[1]*w[1] + x[2]*w[2]
def train(training_points, iterationLimit):
w = [0.0,0.0,0.0] # initialize weights for w[0], w[1], w[2]
learned = False
iterations = 0 # keep track of the iteration count
# This method is the primary PLA implentation.
# It returns True when all sample points are corectly classfied by the hypothesis.
# Returns False if there was a misclassified point and the weight vector changed.
def updateWeights():
random.shuffle(training_points) # randomize training points
for point in training_points:
result = sign(perceptronCalc(point,w)) # caclulate point and determine its sign.
if point[3] != result: # does sample point's result match our calculated result?
# Use line below to watch the perceptron's weights change
# print str(iterations) + " " + str(w) + " " + str(result) + " " + str(point) + " " + str(perceptronCalc(point))
# if not update weights by sample point's result
w[0] += point[0]*point[3]
w[1] += point[1]*point[3]
w[2] += point[2]*point[3]
return False # break out of loop and return
return True # if the loop reaches this point all calculated points in the training points match their expected y's
while not learned:
iterations += 1
noErrors = updateWeights()
if iterations == iterationLimit or noErrors:
learned = True
break
return iterations, w
# Calculates approximate probability of hypothesis function returns a result
# that is different from the target function.
def findErrorProbability(x1,y1,x2,y2, weights, numberOfPointsToTest):
numberOfErrors = 0
for i in range(0, numberOfPointsToTest-1):
#generate random test points
x = random.uniform(-1,1)
y = random.uniform(-1,1)
#compare results from target function and hypothesis function
if targetFunction(x1,y1,x2,y2,x,y) != sign(perceptronCalc([1,x,y], weights)):
numberOfErrors += 1 # keep track of errors
return numberOfErrors/float(numberOfPointsToTest)
# Runs runTrial specified number of times.
# Returns average iterations, average error probability, and a histogram of trial iteration count.
def runSimulation(numberOfTrials, numberOfTestPoints, iterationLimit):
interations = []
probability = []
for t in range(1,numberOfTrials+1):
iteration_count, w, error_probability = runTrial(numberOfTestPoints, iterationLimit)
interations.append(iteration_count)
probability.append(error_probability)
print "Avg. iterations: " + str(np.mean(interations)) + " : Avg. error probability: " + str(np.mean(probability))
pylab.hist(interations)
pylab.show()
# Runs one trial based on the number of test points desired and an iteration limit to cap run time.
# If showChart is set to True, this function with also return a chart of the points, target function and hypothesis.
# Returns the number of iterations perceptron took to converge, final weights, and the error probability.
def runTrial(numberOfTestPoints, iterationLimit, showChart = False):
x1, y1, x2, y2, points = generatePoints(numberOfTestPoints)
iterations, w = train(points, iterationLimit)
errorProb = findErrorProbability(x1,y1,x2,y2,w, 10000)
if showChart:
if iterations == iterationLimit:
print "No solution found in " + str(iterations) + " iterations!"
print "Iterations: " + str(iterations) + ' | Weights: ' + str(w)
# plot points above(green) and below(blue) the target function.
green_x = []
green_y = []
blue_x = []
blue_y = []
for x in points:
if x[3] == 1:
green_x.append(x[1])
green_y.append(x[2])
else:
blue_x.append(x[1])
blue_y.append(x[2])
pylab.plot(green_x, green_y, 'go')
pylab.plot(blue_x, blue_y, 'bo')
# plot target function(black) and hypothesis function(red) lines
x = np.array( [-1,1] )
slope = (y2-y1)/(x2-x1)
intercept = y2 - slope * x2
pylab.plot(x, slope*x + intercept, 'k--')
pylab.plot( x, -w[1]/w[2] * x - w[0] / w[2] , 'r' ) # this will throw an error if w[2] == 0
pylab.ylim([-1,1])
pylab.xlim([-1,1])
pylab.show()
return iterations, w, errorProb
########################################################################
############################----NOTES----###############################
########################################################################
# Uncomment one line below and reload the script in your favorite Python
# environment. Or load the script and type the method with requireed
# paramaters you want to execute.
########################################################################
########################################################################
# runSimulation takes 3 arguments, number of trials to run, number of test points, and interation limit.
# The higher you set each parameter, the longer this method takes to run.
# This will return the average number of iterations the perceptron took to converge
# and the average error probability.
# Question 7/8
# runSimulation(1000, 10, 100)
# Question 9/10
# runSimulation(1000, 100, 1000)
#########################################################################
#########################################################################
# runTrial takes 3 arguments, number of points, iteration limit, and boolean if a chart should be shown.
# This method returns the number of iteration perceptron took to converge, the final
# weights vector, and the error probability.
# runTrial(10, 100, True) # Show graph of one trial with points, hypothesis (red line), and target funtion (black line).
# runTrial(10, 100) # No chart
# runTrial(10, 100, False) # No chart
| 41.173469 | 129 | 0.584634 | [
"Apache-2.0"
] | freeernest/edX-Learning-From-Data-Solutions | Homework_1/Python/homework_1_by_kirbs.py | 8,070 | Python |
# -*- coding: utf-8 -*-
import sys
from contextlib import contextmanager
from shutil import rmtree as _rmtree
from tempfile import template, mkdtemp, _exists
from cms.apphook_pool import apphook_pool
from django.contrib.auth import get_user_model
from django.utils.six.moves import StringIO
from django.utils.translation import get_language, activate
class NULL:
pass
class StdOverride(object):
def __init__(self, std='out', buffer=None):
self.std = std
self.buffer = buffer or StringIO()
def __enter__(self):
setattr(sys, 'std%s' % self.std, self.buffer)
return self.buffer
def __exit__(self, type, value, traceback):
setattr(sys, 'std%s' % self.std, getattr(sys, '__std%s__' % self.std))
class StdoutOverride(StdOverride):
"""
This overrides Python's the standard output and redirects it to a StringIO
object, so that on can test the output of the program.
example:
lines = None
with StdoutOverride() as buffer:
# print stuff
lines = buffer.getvalue()
"""
def __init__(self, buffer=None):
super(StdoutOverride, self).__init__('out', buffer)
class LanguageOverride(object):
def __init__(self, language):
self.newlang = language
def __enter__(self):
self.oldlang = get_language()
activate(self.newlang)
def __exit__(self, type, value, traceback):
activate(self.oldlang)
class TemporaryDirectory:
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def cleanup(self):
if _exists(self.name):
_rmtree(self.name)
def __exit__(self, exc, value, tb):
self.cleanup()
class UserLoginContext(object):
def __init__(self, testcase, user):
self.testcase = testcase
self.user = user
def __enter__(self):
loginok = self.testcase.client.login(username=getattr(self.user, get_user_model().USERNAME_FIELD),
password=getattr(self.user, get_user_model().USERNAME_FIELD))
self.old_user = getattr(self.testcase, 'user', None)
self.testcase.user = self.user
self.testcase.assertTrue(loginok)
def __exit__(self, exc, value, tb):
self.testcase.user = self.old_user
if not self.testcase.user:
delattr(self.testcase, 'user')
self.testcase.client.logout()
class ChangeModel(object):
"""
Changes attributes on a model while within the context.
These changes *ARE* saved to the database for the context!
"""
def __init__(self, instance, **overrides):
self.instance = instance
self.overrides = overrides
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(self.instance, key, NULL)
setattr(self.instance, key, value)
self.instance.save()
def __exit__(self, exc, value, tb):
for key in self.overrides.keys():
old_value = self.old[key]
if old_value is NULL:
delattr(self.instance, key)
else:
setattr(self.instance, key, old_value)
self.instance.save()
@contextmanager
def disable_logger(logger):
old = logger.disabled
logger.disabled = True
yield
logger.disabled = old
@contextmanager
def apphooks(*hooks):
_apphooks = apphook_pool.apphooks
_apps = apphook_pool.apps
_discovered = apphook_pool.discovered
apphook_pool.clear()
for hook in hooks:
apphook_pool.register(hook)
try:
yield
finally:
apphook_pool.apphooks = _apphooks
apphook_pool.apps = _apps
apphook_pool.discovered = _discovered
@contextmanager
def signal_tester(*signals):
env = SignalTester()
for signal in signals:
signal.connect(env)
try:
yield env
finally:
for signal in signals:
signal.disconnect(env)
class SignalTester(object):
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.calls.append((args, kwargs))
| 26.32 | 106 | 0.640035 | [
"BSD-3-Clause"
] | CamelotVG/django-cms | cms/test_utils/util/context_managers.py | 4,606 | Python |
# Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# [email protected]
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multiplex segmentation application.
Deprecated in favor of ``deepcell.applications.Mesmer`` instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deepcell.applications.mesmer import Mesmer as MultiplexSegmentation
| 42.861111 | 80 | 0.747894 | [
"Apache-2.0"
] | GuillaumeMougeot/deepcell-tf | deepcell/applications/multiplex_segmentation.py | 1,543 | Python |
#ERIMX Made By Paradox4280 aka c2FI, x2Fi, RG9t
import discord, base64, codecs, requests, urllib.parse, datetime, asyncio, sys, praw
import random, aiohttp, io, json, os, string, platform, time, bs4, colorama
from discord.ext import (
commands
)
from discord.voice_client import VoiceClient
# from discord.ext.commands import bot
from bs4 import BeautifulSoup as bs4
from colorama import Fore, Style
from discord import Permissions
from discord.utils import get
from discord import User
from os import system
with open('config.json') as f:
config = json.load(f)
def get_prefix(paradox, message):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
paradox = commands.Bot(command_prefix = get_prefix, case_Insensitive = True)
[paradox.load_extension(f"cogs.{cog[:-3]}") for cog in os.listdir("cogs") if cog.endswith(".py")]
@paradox.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="Her"))
print(f'\n{Fore.GREEN}[>] {Fore.RESET}{Fore.CYAN}Logged in as{Fore.RESET} {Fore.YELLOW}{paradox.user.name}#{paradox.user.discriminator}\n')
print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}User ID:{Fore.RESET} {Fore.YELLOW}{paradox.user.id}\n')
print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}Version:{Fore.RESET} {Fore.YELLOW}{discord.__version__}\n')
@paradox.event
async def on_command_error(ctx, error):
embed = discord.Embed(description=f'Error. Try =help ({error})', color = 16202876)
await ctx.send(embed = embed)
@paradox.event
async def on_guild_join(guild):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
prefixes[str(guild.id)] = '='
with open('prefixes.json', 'w') as f:
json.dump(prefixes, f, indent=4)
@paradox.event
async def on_guild_remove(guild):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
prefixes.pop(str(guild.id))
with open('prefixes.json', 'w') as f:
json.dump(prefixes, f, indent=4)
@paradox.command()
async def changeprefix(ctx, prefix):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
prefixes[str(ctx.guild.id)] = prefix
with open('prefixes.json', 'w') as f:
json.dump(prefixes, f, indent=4)
embed = discord.Embed(description = f'prefix changed to: {prefix}', color = 16202876)
await ctx.send(embed = embed)
paradox.run(os.getenv('BOT_TOKEN'))
| 33.76 | 144 | 0.669431 | [
"MIT"
] | paradox4280/Erimx | src/bot.py | 2,532 | Python |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Importer definition."""
from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata.proto import metadata_store_pb2
# Constant to access importer importing result from importer output dict.
IMPORT_RESULT_KEY = 'result'
# Constant to access artifact uri from importer exec_properties dict.
SOURCE_URI_KEY = 'artifact_uri'
# Constant to access re-import option from importer exec_properties dict.
REIMPORT_OPTION_KEY = 'reimport'
def _set_artifact_properties(artifact: types.Artifact,
properties: Optional[Dict[str, Any]],
custom_properties: Optional[Dict[str, Any]]):
"""Sets properties and custom_properties to the given artifact."""
if properties is not None:
for key, value in properties.items():
setattr(artifact, key, value)
if custom_properties is not None:
for key, value in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(
f'Unexpected custom_property value type:{type(value)}')
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result
def generate_output_dict(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None
) -> Dict[str, List[types.Artifact]]:
"""Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
a dictionary with the only key `result` whose value is the Artifact.
"""
return {
IMPORT_RESULT_KEY: [
_prepare_artifact(
metadata_handler,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport)
]
}
class ImporterDriver(base_driver.BaseDriver):
"""Driver for Importer."""
def pre_execution(
self,
input_dict: Dict[str, types.Channel],
output_dict: Dict[str, types.Channel],
exec_properties: Dict[str, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution.
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Create imported artifacts.
output_channel = output_dict[IMPORT_RESULT_KEY]
output_artifacts = generate_output_dict(
self._metadata_handler,
uri=exec_properties[SOURCE_URI_KEY],
properties=output_channel.additional_properties,
custom_properties=output_channel.additional_custom_properties,
reimport=exec_properties[REIMPORT_OPTION_KEY],
output_artifact_class=output_channel.type)
# Update execution with imported artifacts.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=output_artifacts,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
output_dict[IMPORT_RESULT_KEY] = channel_utils.as_channel(
output_artifacts[IMPORT_RESULT_KEY])
return data_types.ExecutionDecision(
input_dict={},
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=False)
class Importer(base_node.BaseNode):
"""Definition for TFX Importer.
The Importer is a special TFX node which registers an external resource into
MLMD so that downstream nodes can use the registered artifact as an input.
Here is an example to use the Importer:
```
importer = Importer(
source_uri='uri/to/schema',
artifact_type=standard_artifacts.Schema,
reimport=False).with_id('import_schema')
schema_gen = SchemaGen(
fixed_schema=importer.outputs['result'],
examples=...)
```
"""
def __init__(self,
source_uri: str,
artifact_type: Type[types.Artifact],
reimport: Optional[bool] = False,
properties: Optional[Dict[str, Union[str, int]]] = None,
custom_properties: Optional[Dict[str, Union[str, int]]] = None):
"""Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
"""
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
# TODO(b/161490287): remove static artifacts.
self._output_dict = {
IMPORT_RESULT_KEY:
types.Channel(
type=artifact_type,
additional_properties=properties,
additional_custom_properties=custom_properties).set_artifacts(
[artifact])
}
super().__init__(driver_class=ImporterDriver)
@property
@doc_controls.do_not_generate_docs
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
"""Output Channel dict that contains imported artifacts."""
return self._output_dict
@property
@doc_controls.do_not_generate_docs
def exec_properties(self) -> Dict[str, Any]:
return {
SOURCE_URI_KEY: self._source_uri,
REIMPORT_OPTION_KEY: int(self._reimport),
}
| 37.607973 | 79 | 0.716343 | [
"Apache-2.0"
] | SunitRoy2703/tfx | tfx/dsl/components/common/importer.py | 11,320 | Python |
from datetime import timedelta
from os import path
from re import sub as regex_sub
from shutil import rmtree
import uuid
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch.dispatcher import receiver
from django.utils import timezone
from validator.models import DatasetConfiguration, User, CopiedValidations
from django.db.models import Q, ExpressionWrapper, F, BooleanField
class ValidationRun(models.Model):
# scaling methods
MIN_MAX = 'min_max'
LINREG = 'linreg'
MEAN_STD = 'mean_std'
NO_SCALING = 'none'
BETA_SCALING = 'cdf_beta_match'
SCALING_METHODS = (
(NO_SCALING, 'No scaling'),
(MIN_MAX, 'Min/Max'),
(LINREG, 'Linear regression'),
(MEAN_STD, 'Mean/standard deviation'),
(BETA_SCALING, 'CDF matching with beta distribution fitting'),
)
# scale to
SCALE_TO_REF = 'ref'
SCALE_TO_DATA = 'data'
SCALE_TO_OPTIONS = (
(SCALE_TO_REF, 'Scale to reference'),
(SCALE_TO_DATA, 'Scale to data')
)
# anomalies
MOVING_AVG_35_D = "moving_avg_35_d"
CLIMATOLOGY = "climatology"
NO_ANOM = "none"
ANOMALIES_METHODS = (
(NO_ANOM, 'Do not calculate'),
(MOVING_AVG_35_D, '35 day moving average'),
(CLIMATOLOGY, 'Climatology'),
)
# upscaling options
NO_UPSCALE = "none"
AVERAGE = "average"
UPSCALING_METHODS = (
(NO_UPSCALE, 'Do not upscale point measurements'),
(AVERAGE, 'Average point measurements'),
)
# fields
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name_tag = models.CharField(max_length=80, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
start_time = models.DateTimeField('started')
end_time = models.DateTimeField('finished', null=True)
total_points = models.IntegerField(default=0)
error_points = models.IntegerField(default=0)
ok_points = models.IntegerField(default=0)
progress = models.IntegerField(default=0)
reference_configuration = models.ForeignKey(to=DatasetConfiguration, on_delete=models.SET_NULL,
related_name='ref_validation_run', null=True)
scaling_ref = models.ForeignKey(to=DatasetConfiguration, on_delete=models.SET_NULL,
related_name='scaling_ref_validation_run', null=True)
scaling_method = models.CharField(max_length=20, choices=SCALING_METHODS, default=MEAN_STD)
interval_from = models.DateTimeField(null=True)
interval_to = models.DateTimeField(null=True)
anomalies = models.CharField(max_length=20, choices=ANOMALIES_METHODS, default=NO_ANOM)
min_lat = models.FloatField(null=True, blank=True, validators=[MinValueValidator(-90.0), MaxValueValidator(90.0)])
min_lon = models.FloatField(null=True, blank=True)
max_lat = models.FloatField(null=True, blank=True, validators=[MinValueValidator(-90.0), MaxValueValidator(90.0)])
max_lon = models.FloatField(null=True, blank=True)
# only applicable if anomalies with climatology is selected
anomalies_from = models.DateTimeField(null=True, blank=True)
anomalies_to = models.DateTimeField(null=True, blank=True)
# upscaling of ISMN point measurements
upscaling_method = models.CharField(max_length=50, choices=UPSCALING_METHODS, default=NO_UPSCALE, blank=True)
temporal_stability = models.BooleanField(default=False)
output_file = models.FileField(null=True, max_length=250, blank=True)
is_archived = models.BooleanField(default=False)
last_extended = models.DateTimeField(null=True, blank=True)
expiry_notified = models.BooleanField(default=False)
doi = models.CharField(max_length=255, blank=True)
publishing_in_progress = models.BooleanField(default=False)
tcol = models.BooleanField(default=False)
bootstrap_tcol_cis = models.BooleanField(default=False)
used_by = models.ManyToManyField(User, through=CopiedValidations, through_fields=('original_run', 'used_by_user'),
related_name='copied_runs')
# many-to-one relationships coming from other models:
# dataset_configurations from DatasetConfiguration
# celery_tasks from CeleryTask
@property
def expiry_date(self):
if (self.is_archived or (self.end_time is None)) and (self.progress != -1):
return None
if self.progress == -1:
initial_date = self.start_time
else:
initial_date = self.last_extended if self.last_extended else self.end_time
return initial_date + timedelta(days=settings.VALIDATION_EXPIRY_DAYS)
@property
def is_expired(self):
e = self.expiry_date
return (e is not None) and (timezone.now() > e)
@property
def is_near_expiry(self):
e = self.expiry_date
return (e is not None) and (timezone.now() > e - timedelta(days=settings.VALIDATION_EXPIRY_WARNING_DAYS))
@property
def is_unpublished(self):
return not self.doi
def archive(self, unarchive=False, commit=True):
if unarchive:
self.extend_lifespan(commit=False)
self.is_archived = False
else:
self.is_archived = True
if commit:
self.save()
def extend_lifespan(self, commit=True):
self.last_extended = timezone.now()
self.expiry_notified = False
if commit:
self.save()
def clean(self):
super(ValidationRun, self).clean()
if self.interval_from is None and self.interval_to is not None:
raise ValidationError({'interval_from': 'What has an end must have a beginning.', })
if self.interval_from is not None and self.interval_to is None:
raise ValidationError({'interval_to': 'What has a beginning must have an end.', })
if self.interval_from is not None and self.interval_to is not None and self.interval_from > self.interval_to:
raise ValidationError({'interval_from': 'From must be before To',
'interval_to': 'From must be before To', })
if self.anomalies == self.CLIMATOLOGY:
if self.anomalies_from is None or self.anomalies_to is None:
raise ValidationError({'anomalies': 'Need valid time period to calculate climatology from.', })
if self.anomalies_from > self.anomalies_to:
raise ValidationError({'anomalies_from': 'Start of climatology period must be before end.',
'anomalies_to': 'Start of climatology period must be before end.', })
else:
if self.anomalies_from is not None or self.anomalies_to is not None:
raise ValidationError(
{'anomalies': 'Time period makes no sense for anomalies calculation without climatology.', })
box = {'min_lat': self.min_lat, 'min_lon': self.min_lon, 'max_lat': self.max_lat, 'max_lon': self.max_lon}
if any(x is None for x in box.values()) and any(x is not None for x in box.values()):
affected_fields = {}
for key, value in box.items():
if value is None:
affected_fields[key] = 'For spatial subsetting, please set all bounding box coordinates.'
raise ValidationError(affected_fields)
def __str__(self):
return "id: {}, user: {}, start: {} )".format(self.id, self.user, self.start_time)
@property
def output_dir_url(self):
if bool(self.output_file) is False:
return None
url = regex_sub('[^/]+$', '', self.output_file.url)
return url
@property
def output_file_name(self):
if bool(self.output_file) is False:
return None
name = self.output_file.name.split('/')[1]
return name
@property
def is_a_copy(self):
copied_runs = CopiedValidations.objects.filter(copied_run_id=self.id)\
.annotate(is_copied=ExpressionWrapper(~Q(copied_run=F('original_run')), output_field=BooleanField())) \
.filter(is_copied=True)
return len(copied_runs) != 0
# delete model output directory on disk when model is deleted
@receiver(post_delete, sender=ValidationRun)
def auto_delete_file_on_delete(sender, instance, **kwargs):
if instance.output_file:
rundir = path.dirname(instance.output_file.path)
if path.isdir(rundir):
rmtree(rundir)
| 40.018349 | 118 | 0.673315 | [
"MIT"
] | awst-austria/qa4sm | validator/models/validation_run.py | 8,724 | Python |
# Refaça o exercicio009, mostrando a tabuada de um número que um usuário escolher utilizando FOR.
print('=-='*3)
print('TABUADA')
print('=-='*3)
m = 0
n = int(input('Digite o número que deseja saber a tabuada: '))
for c in range(1, 11):
m = n * c
print('{} x {} = {}.'.format(n, c, m))
| 24.666667 | 97 | 0.611486 | [
"MIT"
] | PedroMunizdeMatos/Estudos-e-Projetos | Python 3 - Curso completo/exercicio049.py | 300 | Python |
from functools import lru_cache
import sqlalchemy
class lru_cache_in_transaction: # noqa: N801
"""
Decorator to wrap a function with a memoizing callable that saves up to
the `maxsize` most recent calls. The underlying cache is automatically
cleared at the end of the database transaction.
Since a dictionary is used to cache results, the positional and keyword
arguments to the function must be hashable.
For documentation of the `maxsize` and `typed` arguments, see the
documentation of :py:func:`functools.lru_cache`.
Example::
@lru_cache_in_transaction(session)
def fetch_user(userid):
return session.query(models.User).filter_by(userid=userid).one_or_none()
fetch_user('acct:[email protected]') # => executes a query
fetch_user('acct:[email protected]') # => returns cached value
fetch_user('acct:[email protected]') # => executes a query
session.commit()
fetch_user('acct:[email protected]') # => executes a query
"""
def __init__(self, session, maxsize=128, typed=False):
self._session = session
self._maxsize = maxsize
self._typed = typed
def __call__(self, func):
decorator = lru_cache(maxsize=self._maxsize, typed=self._typed)
wrapped = decorator(func)
on_transaction_end(self._session)(wrapped.cache_clear)
return wrapped
def on_transaction_end(session):
"""
Decorator for a function which should run after a top-level transaction ended.
Transactions that are either implicitly or explicitly committed or rolled back will be
closed at the end of a Pyramid view. This is here for cleaning up caches so that
code after the view, exception views for example, will not be able to access
detached instances.
Example usage:
.. code-block:: python
@util.db.on_transaction_end(session)
def flush_cache():
self._cache = {}
"""
def decorate(func):
def _handler(_, transaction):
# We only clear the cache when the top-level transaction finishes.
if transaction.parent is None:
func()
sqlalchemy.event.listen(session, "after_transaction_end", _handler)
return func
return decorate
| 31.135135 | 90 | 0.676215 | [
"BSD-2-Clause"
] | Brahim109/h | h/util/db.py | 2,304 | Python |
"""
django:
https://docs.djangoproject.com/en/3.0/topics/http/middleware/
https://docs.djangoproject.com/en/3.0/ref/settings/#middleware
"""
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
| 38.222222 | 66 | 0.77907 | [
"MIT"
] | fabiommendes/capacidade_hospitalar | hcap/settings/general/middleware.py | 688 | Python |
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
from openvino.tools.pot.configs.config import Config
from .utils.path import TOOL_CONFIG_PATH
ALGORITHM_SETTINGS = {
'wrong_preset': (
{
'name': 'MinMaxQuantization',
'params': {
'perset': 'accuracy',
'stat_subset_size': 1
}
},
'Algorithm MinMaxQuantization. Unknown parameter: perset'
),
'wrong_stats_subset_size': (
{
'name': 'DefaultQuantization',
'params': {
'preset': 'accuracy',
'stats_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: stats_subset_size'
),
'wrong_weights': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weight': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: weight'
),
'wrong_mode': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'type': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: type'
),
'wrong_outlier_prob': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'maximal_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: outlier'
),
'wrong_maximal_drop': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'max_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier_prob': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: max_drop'
)
}
@pytest.mark.parametrize(
'algorithm_settings', ALGORITHM_SETTINGS.items(),
ids=['{}_config'.format(os.path.splitext(c)[0]) for c in ALGORITHM_SETTINGS]
)
def test_algo_params_validation(algorithm_settings):
tool_config_path = TOOL_CONFIG_PATH.joinpath('mobilenet-v2-pytorch_single_dataset.json').as_posix()
config = Config.read_config(tool_config_path)
config['compression']['algorithms'][0] = algorithm_settings[1][0]
config_error = algorithm_settings[1][1]
with pytest.raises(RuntimeError, match=config_error):
config.validate_algo_config()
| 32.26875 | 103 | 0.427077 | [
"Apache-2.0"
] | 3Demonica/openvino | tools/pot/tests/test_wrong_config.py | 5,163 | Python |
stevilo = int(input("Select a number between 1 and 100:"))
x = 1
if stevilo >= 1 and stevilo <= 100:
while x <= stevilo:
ostanek_1 = x % 3
ostanek_2 = x % 5
if x % 3 == 0 and x % 5 == 0:
print('fizzbuzz')
elif ostanek_1 == 0:
print ("fizz")
elif ostanek_2 == 0:
print("buzz")
else:
print(x)
x += 1
| 24.882353 | 59 | 0.434988 | [
"MIT"
] | anzpia/FizzBuzz | fizzbuzz.py | 423 | Python |
from backports import tempfile
import numpy as np
import os
import dill
import tensorflow as tf
import zipfile
import baselines.common.tf_util as U
from build_graph import build_act, build_train
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
@staticmethod
def load(path, num_cpu=16):
with open(path, "rb") as f:
model_data, act_params = dill.load(f)
act = build_act(**act_params)
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save(self, path):
"""Save model to a pickle located at `path`"""
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
dill.dump((model_data, self._act_params), f)
def load(path, num_cpu=16):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
num_cpu: int
number of cpus to use for executing the policy
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load(path, num_cpu=num_cpu)
def learn(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=1,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
num_cpu=16,
callback=None):
"""Train a deepq model.
Parameters
-------
env : gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
num_cpu: int
number of cpus to use for training
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
def make_obs_ph(name):
return U.BatchInput(env.observation_space.shape, name=name)
act, train, update_target, debug = build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
action = act(np.array(obs)[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
U.load_state(model_file)
return ActWrapper(act, act_params)
| 39.182836 | 100 | 0.622322 | [
"MIT"
] | hyperdo/python2-baselines | baselines/deepq/simple.py | 10,501 | Python |
import io
import os.path
_DUMPS = os.path.join(os.path.dirname(__file__), 'dumps')
def load_dump(fname):
with open(os.path.join(_DUMPS, fname), 'rb') as f:
return io.BytesIO(f.read())
| 18.181818 | 57 | 0.665 | [
"MIT"
] | popravich/rdbtools3 | tests/util.py | 200 | Python |
import pytest
from exchange_calendars.exchange_calendar_xshg import XSHGExchangeCalendar
from .test_exchange_calendar import ExchangeCalendarTestBase
from .test_utils import T
class TestXSHGCalendar(ExchangeCalendarTestBase):
@pytest.fixture(scope="class")
def calendar_cls(self):
yield XSHGExchangeCalendar
@pytest.fixture
def max_session_hours(self):
# Shanghai stock exchange is open from 9:30 am to 3pm
yield 5.5
@pytest.fixture
def start_bound(self):
yield T("1999-01-01")
@pytest.fixture
def end_bound(self):
yield T("2025-12-31")
@pytest.fixture
def regular_holidays_sample(self):
yield [
# 2017
"2017-01-02",
"2017-01-27",
"2017-01-30",
"2017-01-31",
"2017-02-01",
"2017-02-02",
"2017-04-03",
"2017-04-04",
"2017-05-01",
"2017-05-29",
"2017-05-30",
"2017-10-02",
"2017-10-03",
"2017-10-04",
"2017-10-05",
"2017-10-06",
# 2020
"2020-01-31"
]
| 23.62 | 74 | 0.538527 | [
"Apache-2.0"
] | gerrymanoim/trading_calendars | tests/test_xshg_calendar.py | 1,181 | Python |
# Generated by Django 4.0.3 on 2022-04-06 17:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('records', '0006_alter_records_phasesday'),
]
operations = [
migrations.RenameField(
model_name='records',
old_name='date',
new_name='created_date',
),
]
| 19.631579 | 52 | 0.595174 | [
"MIT"
] | Glucemy/Glucemy-back | records/migrations/0007_rename_date_records_created_date.py | 373 | Python |
import datetime
from typing import Dict, Tuple, Any
import boto3
from botocore.stub import Stubber
from dateutil.tz import tzutc
from dassana.common.aws_client import LambdaTestContext
from json import dumps
import pytest
@pytest.fixture()
def input_s3_with_website(s3_public_bucket_with_website, region):
return {
'bucketName': s3_public_bucket_with_website,
'region': region
}
@pytest.fixture()
def iam_policy():
return {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"ec2:GetDefaultCreditSpecification",
"ec2:GetEbsEncryptionByDefault",
"ec2:ExportClientVpnClientConfiguration",
"ec2:GetCapacityReservationUsage",
"ec2:DescribeVolumesModifications",
"ec2:GetHostReservationPurchasePreview",
"ec2:GetSubnetCidrReservations",
"ec2:GetConsoleScreenshot",
"ec2:GetConsoleOutput",
"ec2:ExportClientVpnClientCertificateRevocationList",
"ec2:GetLaunchTemplateData",
"ec2:GetSerialConsoleAccessStatus",
"ec2:GetFlowLogsIntegrationTemplate",
"ec2:DescribeScheduledInstanceAvailability",
"ec2:GetEbsDefaultKmsKeyId",
"ec2:GetManagedPrefixListEntries",
"ec2:DescribeVpnConnections",
"ec2:DescribeTags",
"ec2:GetCoipPoolUsage",
"ec2:DescribeFastSnapshotRestores",
"ec2:GetReservedInstancesExchangeQuote",
"ec2:GetAssociatedEnclaveCertificateIamRoles",
"ec2:GetPasswordData",
"ec2:GetAssociatedIpv6PoolCidrs",
"ec2:DescribeScheduledInstances",
"ec2:GetManagedPrefixListAssociations",
"ec2:DescribeElasticGpus"
],
"Resource": "*"
}
]
}
@pytest.fixture()
def iam_role_name():
return 'ec2-iam-role'
@pytest.fixture()
def instance_profile_name():
return 'ec2-instance-profile-role'
@pytest.fixture()
def iam_role_arn(iam_client, iam_policy, iam_role_name, instance_profile_name) -> Tuple[Any, Dict[str, Any]]:
resp = iam_client.create_role(RoleName=iam_role_name, AssumeRolePolicyDocument=dumps(iam_policy))
instance_profile_resp = iam_client.create_instance_profile(
InstanceProfileName=instance_profile_name
)
iam_client.add_role_to_instance_profile(
InstanceProfileName=instance_profile_name,
RoleName=iam_role_name
)
instance_profile_resp = instance_profile_resp.get('InstanceProfile')
return resp['Role']['Arn'], {
'Name': instance_profile_resp.get('InstanceProfileName'),
'Arn': instance_profile_resp.get('Arn')
}
@pytest.fixture()
def ec2_instance_with_role(ec2_client, iam_role_arn, instance_profile_name):
instances = ec2_client.run_instances(ImageId='ami-1234',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
IamInstanceProfile=iam_role_arn[1])
instance_id = instances.get('Instances')[0].get('InstanceId')
assoc_resp = ec2_client.associate_iam_instance_profile(IamInstanceProfile=iam_role_arn[1], InstanceId=instance_id)
return instance_id
@pytest.fixture()
def ec2_instance_without_role(ec2_client):
ec2_client.run_instances(ImageId='ami-1234-foobar',
MinCount=1,
MaxCount=1)
instances = ec2_client.describe_instances(
Filters=[
{
'Name': 'image-id',
'Values': ['ami-1234-foobar']
}
]
)['Reservations'][0]['Instances']
return instances[0]['InstanceId']
def test_ec2_instance_with_role(ec2_instance_with_role, iam_role_arn, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': ec2_instance_with_role, 'region': region},
LambdaTestContext('foobar', env={},
custom={}))
assert result.get('result').get('roleName') == iam_role_arn[1].get('Name')
assert str.split(result.get('result').get('roleArn'), ':role/') == str.split(iam_role_arn[1].get(
'Arn'), ':instance-profile/')
def test_ec2_instance_without_role(ec2_instance_without_role, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': ec2_instance_without_role, 'region': region},
LambdaTestContext('foobar', env={}, custom={}))
assert result.get('result').get('roleArn') == ''
assert result.get('result').get('roleName') == ''
def test_ec2_instance_does_not_exist(ec2_instance_without_role, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': 'i-abcd', 'region': region},
LambdaTestContext('foobar', env={}, custom={}))
assert result.get('result').get('roleArn') == ''
assert result.get('result').get('roleName') == ''
| 37.358621 | 118 | 0.603101 | [
"Apache-2.0"
] | XxxSetItOffxxX/dassana | content/actions/what-is-ec2-role/test_ec2_role_handler.py | 5,417 | Python |
# Unit PYG02: Pygame Wall Ball Game version 3 操控型
import pygame,sys
pygame.init()
vINFO=pygame.display.Info()
print(vINFO)
size = width, height = vINFO.current_w,vINFO.current_h
speed = [1,1]
BLACK = 0, 0, 0
screen = pygame.display.set_mode(size,pygame.FULLSCREEN)
icon=pygame.image.load("1.png")
pygame.display.set_icon(icon)
pygame.display.set_caption("Pygame壁球")
ball = pygame.image.load("PYG02-ball.gif")
ballrect = ball.get_rect()
fps = 300
fclock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
speed[0] = speed[0] if speed[0] == 0 else (abs(speed[0]) - 1)*int(speed[0]/abs(speed[0]))
elif event.key == pygame.K_RIGHT:
speed[0] = speed[0] + 1 if speed[0] > 0 else speed[0] - 1
elif event.key == pygame.K_UP:
speed[1] = speed[1] + 1 if speed[1] > 0 else speed[1] - 1
elif event.key == pygame.K_DOWN:
speed[1] = speed[1] if speed[1] == 0 else (abs(speed[1]) - 1)*int(speed[1]/abs(speed[1]))
elif event.key==pygame.K_e:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
print(repr(event))
ballrect = ballrect.move(speed)
if ballrect.left < 0 or ballrect.right > width:
speed[0] = - speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
speed[1] = - speed[1]
screen.fill(BLACK)
screen.blit(ball, ballrect)
pygame.display.update()
fclock.tick(fps)
| 32.096154 | 105 | 0.59257 | [
"MIT"
] | LZY2006/pygame-small-games | 壁球/壁球游戏2.0/main.py | 1,679 | Python |
import asyncio
import os
from pstats import Stats
from tempfile import NamedTemporaryFile
from aiomisc.service.profiler import Profiler
async def test_profiler_start_stop():
profiler = Profiler(interval=0.1, top_results=10)
try:
await profiler.start()
await asyncio.sleep(0.5)
finally:
await profiler.stop()
async def test_profiler_dump():
profiler = None
fl = NamedTemporaryFile(delete=False)
path = NamedTemporaryFile(delete=False).name
fl.close()
try:
profiler = Profiler(
interval=0.1, top_results=10,
path=path
)
await profiler.start()
# Get first update
await asyncio.sleep(0.01)
stats1 = Stats(path)
# Not enough sleep till next update
await asyncio.sleep(0.01)
stats2 = Stats(path)
# Getting the same dump
assert stats1.stats == stats2.stats
# Enough sleep till next update
await asyncio.sleep(0.2)
stats3 = Stats(path)
# Getting updated dump
assert stats2.stats != stats3.stats
finally:
if profiler:
await profiler.stop()
os.remove(path)
| 23.038462 | 53 | 0.6202 | [
"MIT"
] | MrPainter/aiomisc | tests/test_profiler.py | 1,198 | Python |
# Use legacy numpy printing. This fix is made to keep doctests functional.
# For more info, see https://github.com/scikit-image/scikit-image/pull/2935 .
# TODO: remove this workaround once minimal required numpy is set to 1.14.0
from distutils.version import LooseVersion as Version
import numpy as np
if Version(np.__version__) >= Version('1.14'):
np.set_printoptions(legacy='1.13')
# List of files that pytest should ignore
collect_ignore = ["io/_plugins",]
try:
import visvis
except ImportError:
collect_ignore.append("measure/mc_meta/visual_test.py")
| 35.5625 | 77 | 0.760984 | [
"MIT"
] | 997Yi/Flask-web | venv/Lib/site-packages/skimage/conftest.py | 569 | Python |
import streamlit as st
def render():
st.write("You are in Search a song page")
| 16.8 | 45 | 0.690476 | [
"MIT"
] | AlexCaranha/QueryByHumming | src/search_a_song_page.py | 84 | Python |
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_in_group(app, db):
if app.contact.count() == 0:
app.contact.create_new(Contact(firstname="Contact for deletion", middlename="some middlename", lastname="some last name"))
if len(app.group.get_group_list()) == 0:
app.group.create(Group(name="Group for deletion"))
group_id = app.group.get_random_group_id()
contacts_in_group = app.contact.get_contacts_in_group(group_id)
if len(contacts_in_group) > 0:
contact = random.choice(contacts_in_group)
app.contact.remove_from_group(contact.id, group_id)
contact_ui = app.contact.get_contacts_in_group(group_id)
contact_db = db.get_contacts_in_group(group_id)
print()
print(contact_db)
print(contact_ui)
assert contact_db == contact_ui
else:
True
#
# contact = app.contact.get_contacts_in_group(group_id)
#
# contacts = db.get_contact_list()
#
# contact = random.choice(contacts)
# app.contact.add_contact_to_group(contact.id, group_id)
#
# contact_db = db.get_contacts_in_group(group_id)
# assert contact_db == contact_ui | 36.848485 | 130 | 0.697368 | [
"Apache-2.0"
] | vatanov/python_training | test/test_del_contact_from_group.py | 1,216 | Python |
from .config import AppConfig
from .registry import apps
__all__ = ['AppConfig', 'apps']
| 18 | 31 | 0.744444 | [
"MIT"
] | 0724654276/Awwards | venv/lib/python3.8/site-packages/django/apps/__init__.py | 90 | Python |
from asyncio.log import logger
from django.apps import AppConfig
class NamesConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "derbot.names"
def ready(self):
import derbot.names.signals
logger.info(f"Signals loaded: {derbot.names.signals}")
| 21.642857 | 62 | 0.716172 | [
"MIT"
] | bdunnette/derbot-docker | derbot/names/apps.py | 303 | Python |
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
import shutil
import numpy as np
from monty.json import MontyDecoder
from pymatgen.io.vasp.sets import MITVaspInputSet, MITHSEVaspInputSet, \
MPVaspInputSet, MITGGAVaspInputSet, MITNEBVaspInputSet,\
MPStaticVaspInputSet, MPNonSCFVaspInputSet, MITMDVaspInputSet,\
MPHSEVaspInputSet, MPBSHSEVaspInputSet, MPStaticDielectricDFPTVaspInputSet,\
MPOpticsNonSCFVaspInputSet
from pymatgen.io.vasp.inputs import Poscar, Incar
from pymatgen import Specie, Lattice, Structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
dec = MontyDecoder()
class MITMPVaspInputSetTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitparamset = MITVaspInputSet()
self.mitparamset_unsorted = MITVaspInputSet(sort_structure=False)
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
self.mitggaparam = MITGGAVaspInputSet()
self.mpstaticparamset = MPStaticVaspInputSet()
self.mpnscfparamsetu = MPNonSCFVaspInputSet(
{"NBANDS": 50}, mode="Uniform")
self.mpnscfparamsetl = MPNonSCFVaspInputSet(
{"NBANDS": 60}, mode="Line")
self.mphseparamset = MPHSEVaspInputSet()
self.mpbshseparamsetl = MPBSHSEVaspInputSet(mode="Line")
self.mpbshseparamsetu = MPBSHSEVaspInputSet(
mode="Uniform", added_kpoints=[[0.5, 0.5, 0.0]])
self.mpdielparamset = MPStaticDielectricDFPTVaspInputSet()
def test_get_poscar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
s_unsorted = self.mitparamset_unsorted.get_poscar(struct).structure
s_sorted = self.mitparamset.get_poscar(struct).structure
self.assertEqual(s_unsorted[0].specie.symbol, 'Fe')
self.assertEqual(s_sorted[0].specie.symbol, 'Mn')
def test_get_potcar_symbols(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
syms = self.paramset.get_potcar_symbols(struct)
self.assertEqual(syms, ['Fe_pv', 'P', 'O'])
syms = MPVaspInputSet(sort_structure=False).get_potcar_symbols(struct)
self.assertEqual(syms, ['P', 'Fe_pv', 'O'])
def test_false_potcar_hash(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe_pv'
self.assertRaises(ValueError, self.mitparamset.get_potcar, struct, check_hash=True)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe'
def test_lda_potcar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe"], coords)
p = MITVaspInputSet(potcar_functional="LDA").get_potcar(struct)
self.assertEqual(p.functional, 'LDA')
def test_get_nelect(self):
coords = [[0]*3, [0.5]*3, [0.75]*3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ['Si', 'Si', 'Fe'], coords)
self.assertAlmostEqual(MITVaspInputSet().get_nelect(s), 16)
def test_get_incar(self):
incar = self.paramset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [5.3, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar = self.mitparamset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar_gga = self.mitggaparam.get_incar(self.struct)
self.assertNotIn("LDAU", incar_gga)
incar_static = self.mpstaticparamset.get_incar(self.struct)
self.assertEqual(incar_static["NSW"], 0)
incar_nscfl = self.mpnscfparamsetl.get_incar(self.struct)
self.assertEqual(incar_nscfl["NBANDS"], 60)
incar_nscfu = self.mpnscfparamsetu.get_incar(self.struct)
self.assertEqual(incar_nscfu["ISYM"], 0)
incar_hse = self.mphseparamset.get_incar(self.struct)
self.assertEqual(incar_hse['LHFCALC'], True)
self.assertEqual(incar_hse['HFSCREEN'], 0.2)
incar_hse_bsl = self.mpbshseparamsetl.get_incar(self.struct)
self.assertEqual(incar_hse_bsl['LHFCALC'], True)
self.assertEqual(incar_hse_bsl['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsl['NSW'], 0)
incar_hse_bsu = self.mpbshseparamsetu.get_incar(self.struct)
self.assertEqual(incar_hse_bsu['LHFCALC'], True)
self.assertEqual(incar_hse_bsu['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsu['NSW'], 0)
incar_diel = self.mpdielparamset.get_incar(self.struct)
self.assertEqual(incar_diel['IBRION'], 8)
self.assertEqual(incar_diel['LEPSILON'], True)
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
#Silicon structure for testing.
latt = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(latt, [si, si], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn("LDAU", incar)
incar = self.mithseparamset.get_incar(self.struct)
self.assertTrue(incar['LHFCALC'])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn('LDAU', incar)
#check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
self.assertEqual(incar['MAGMOM'], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0])
#Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
struct = Structure(lattice, ["Fe", "Mn"], coords,
site_properties={'magmom': (5.2, -4.5)})
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mpstaticparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mitparamset_unsorted.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5.2, -4.5])
struct = Structure(lattice, [Specie("Fe", 2, {'spin': 4.1}), "Mn"],
coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5, 4.1])
incar = self.mpnscfparamsetl.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [4, 3])
incar = self.mpnscfparamsetu.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[100, 0.6])
#sulfide vs sulfate test
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [1.9, 0])
#Make sure Matproject sulfides are ok.
self.assertNotIn('LDAUU', self.paramset.get_incar(struct))
self.assertNotIn('LDAUU', self.mpstaticparamset.get_incar(struct))
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
#Make sure Matproject sulfates are ok.
self.assertEqual(self.paramset.get_incar(struct)['LDAUU'], [5.3, 0, 0])
self.assertEqual(self.mpnscfparamsetl.get_incar(struct)['LDAUU'],
[5.3, 0, 0])
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[10, -5, 0.6])
def test_optics(self):
self.mpopticsparamset = MPOpticsNonSCFVaspInputSet.from_previous_vasp_run(
'{}/static_silicon'.format(test_dir), output_dir='optics_test_dir',
nedos=1145)
self.assertTrue(os.path.exists('optics_test_dir/CHGCAR'))
incar = Incar.from_file('optics_test_dir/INCAR')
self.assertTrue(incar['LOPTICS'])
self.assertEqual(incar['NEDOS'], 1145)
#Remove the directory in which the inputs have been created
shutil.rmtree('optics_test_dir')
def test_get_kpoints(self):
kpoints = self.paramset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mitparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpstaticparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[6, 6, 4]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpnscfparamsetl.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 140)
self.assertEqual(kpoints.style, 'Reciprocal')
kpoints = self.mpnscfparamsetu.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 168)
kpoints = self.mpbshseparamsetl.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 164)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[26][0], 0.0714285714286)
self.assertAlmostEqual(kpoints.kpts[26][1], 0.0)
self.assertAlmostEqual(kpoints.kpts[26][2], 0.0)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.5)
kpoints = self.mpbshseparamsetu.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 25)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.0)
def test_get_all_vasp_input(self):
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
self.struct.make_supercell(4)
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_to_from_dict(self):
self.mitparamset = MITVaspInputSet()
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
d = self.mitparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 4)
d = self.mitggaparam.as_dict()
v = dec.process_decoded(d)
self.assertNotIn("LDAUU", v.incar_settings)
d = self.mithseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.mphseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.paramset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 5.3)
d = self.userparamset.as_dict()
v = dec.process_decoded(d)
#self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(v.incar_settings["MAGMOM"],
{"Fe": 10, "S": -5, "Mn3+": 100})
class MITMDVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDVaspInputSet(300, 1200, 10000)
def test_get_potcar_symbols(self):
syms = self.mitmdparam.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.mitmdparam.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 2.4e-5)
def test_get_kpoints(self):
kpoints = self.mitmdparam.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, 'Gamma')
def test_to_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDVaspInputSet)
self.assertEqual(v.incar_settings["TEBEG"], 300)
class MITNEBVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.vis = MITNEBVaspInputSet(nimages=10, hubbard_off=True)
def test_get_potcar_symbols(self):
syms = self.vis.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.vis.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 0.00005)
def test_get_kpoints(self):
kpoints = self.vis.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
def test_to_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["IMAGES"], 10)
def test_write_inputs(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ['Si', 'Si'], c1)
s2 = Structure(Lattice.cubic(5), ['Si', 'Si'], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites,
to_unit_cell=True))
fc = self.vis._process_structures(structs)[2].frac_coords
self.assertTrue(np.allclose(fc, [[0.5]*3,[0.9, 1.033333, 1.0333333]]))
if __name__ == '__main__':
unittest.main()
| 39.940048 | 91 | 0.616331 | [
"MIT"
] | rousseab/pymatgen | pymatgen/io/vasp/tests/test_sets.py | 16,655 | Python |
import logging
import configparser
import os
from utils import bool_query
class BreakRule(object):
def __init__(self, settings):
self.settings = settings
self.rules_record = configparser.ConfigParser()
self.rules_record.read("{}/tms/breakrules.ini".format(os.getcwd()))
self.rules = {}
for rule_id in self.rules_record.sections():
self.rules[rule_id] = self.rules_record.get(rule_id, "Description")
def _check_rule_exists(self, rule_id):
if self.rules.get(rule_id, None) is None:
logging.warning("Rule {} doesn't exist".format(rule_id))
return False
else:
logging.debug("Rule {} exists".format(rule_id))
return True
def _update_break_rule(self, rule_id):
self.settings.set("Settings", "BreakRule", rule_id)
with open("{}/tms/settings.ini".format(os.getcwd()), 'w') as configfile:
self.settings.write(configfile)
logging.info("Break rule changed to rule {}".format(self.settings.get("Settings", "BreakRule")))
def print_rules(self):
logging.info("Break Rules: ")
for rule_id in self.rules:
logging.info(' [{}] {}'.format(rule_id, self.rules[rule_id]))
def get_break_rule(self, desired_rule_id=None):
if not desired_rule_id: desired_rule_id = self.settings.get("Settings", "BreakRule")
if self._check_rule_exists(desired_rule_id):
for rule_id in self.rules:
if rule_id == desired_rule_id:
logging.info(' [{}] {}'.format(rule_id, self.rules[desired_rule_id]))
def cmd_update_break_rule(self):
self.print_rules()
selection_query = None
while selection_query is None:
logging.info('Please enter the ID of the rule to be used...')
selection = input()
try:
int(selection)
except ValueError:
logging.warning('WARNING: Please enter a numeric value corresponding to a rule ID.')
else:
if self._check_rule_exists(selection):
selection_query = bool_query('Select Rule "{}" for use?'.format(selection, default="y"))
self._update_break_rule(selection)
| 37.57377 | 108 | 0.616492 | [
"MIT"
] | marmstr93ng/TimeManagementSystem | tms/breakrule.py | 2,292 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.