ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df736cfbfa2302306d3cd24b8d8cc95a7c820ec | #%%
'''
<reference>
re-implementation code of https://github.com/zhixuhao/unet
- only binary classification
'''
#%%
# import tensorflow as tf
# tf.keras.preprocessing.image.ImageDataGenerator
import tensorflow.keras as keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import *
from keras.layers import *
from keras.optimizers import *
# from keras.callbacks import ModelCheckpoint, LearningRateScheduler
# from keras import backend as keras
#%%
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import skimage.io as io
import skimage.transform as trans
from skimage import img_as_ubyte
os.chdir('/home/jeon/Desktop/an/image_segmentation')
#%%
'''image input normalization'''
def normalize(img, mask):
img = img / 255.0
mask = mask / 255.0
mask[mask > 0.5] = 1.0
mask[mask <= 0.5] = 0.0
return (img, mask)
#%%
'''augmentation'''
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
#%%
batch_size = 20
train_path = 'data/membrane/train'
image_folder = 'image'
mask_folder = 'label'
aug_dict = data_gen_args
image_color_mode = "grayscale"
mask_color_mode = "grayscale"
image_save_prefix = "image"
mask_save_prefix = "mask"
save_to_dir = "data/membrane/train/aug"
target_size = (256, 256)
seed = 1
#%%
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
image_generator = image_datagen.flow_from_directory(
train_path,
classes = [image_folder],
class_mode = None,
color_mode = image_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = image_save_prefix,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes = [mask_folder],
class_mode = None,
color_mode = mask_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = mask_save_prefix,
seed = seed)
traingenerator = zip(image_generator, mask_generator)
# augmentation이 수행될 때마다 image와 mask를 저장
sampleimg, samplemask = next(iter(traingenerator))
print(sampleimg.shape)
print(samplemask.shape)
#%%
del image_datagen
del mask_datagen
del traingenerator
#%%
def BuildTrainGenerator(batch_size,
train_path,
image_folder,
mask_folder,
aug_dict,
image_color_mode = "grayscale",
mask_color_mode = "grayscale",
image_save_prefix = "image",
mask_save_prefix = "mask",
save_to_dir = None,
target_size = (256, 256), # reshape image 512x512 -> 256x256
seed = 1):
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes = [image_folder],
class_mode = None,
color_mode = image_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = image_save_prefix,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes = [mask_folder],
class_mode = None,
color_mode = mask_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = mask_save_prefix,
seed = seed)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
img, mask = normalize(img, mask)
yield (img, mask) # generate image on demand
#%%
traingenerator = BuildTrainGenerator(10,
'data/membrane/train',
'image',
'label',
data_gen_args)
#%%
'''
model architecture
(# of parameters: 31,031,685)
'''
def BuildUnet(input_size = (256, 256, 1)):
# input_size = (256, 256, 1)
'''contracting path'''
inputs = Input(input_size)
conv1 = Conv2D(filters = 64, kernel_size = 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1) # 256x256x64
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) # 128x128x64
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2) # 128x128x128
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) # 64x64x128
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) # 64x64x256
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) # 32x32x256
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) # 32x32x512
drop4 = Dropout(0.5)(conv4) # 32x32x512, implicit augmentation
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) # 16x16x512
'''bottle-neck'''
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5) # 16x16x1024, implicit augmentation
'''expanding path'''
updrop5 = UpSampling2D(size = (2, 2))(drop5) # 32x32x1024
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(updrop5) # 32x32x512
merge6 = concatenate([drop4, up6], axis = 3) # skip connection
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
upconv6 = UpSampling2D(size = (2, 2))(conv6) # 64x64x512
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(upconv6) #64x64x256
merge7 = concatenate([conv3, up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
upconv7 = UpSampling2D(size = (2, 2))(conv7) # 128x128x256
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(upconv7) # 128x128x128
merge8 = concatenate([conv2, up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
upconv8 = UpSampling2D(size = (2, 2))(conv8) # 256x256x128
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(upconv8) # 256x256x64
merge9 = concatenate([conv1, up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) # 256x256x2, final feature map
'''output layer'''
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs, conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
model.summary()
return model
#%%
'''train'''
model = BuildUnet()
#%%
# model_checkpoint = ModelCheckpoint('./assets/unet_membrane.hdf5', monitor='loss', verbose=1, save_best_only=True)
# model.fit(traingenerator, steps_per_epoch=10, epochs=1, callbacks=[model_checkpoint])
model.fit(traingenerator, steps_per_epoch=4000, epochs=5) # no callbacks
# last accuracy: 0.9791
#%%
# '''last feature map'''
# model2 = Model(inputs, conv9)
# testgenerator = BuildTestGenerator("data/membrane/test")
# results = model2.predict(testgenerator, 30, verbose=1)
# print(np.all(results >= 0))
# plt.figure(figsize=(10, 10))
# plt.imshow(results[0][:, :, 0])
# plt.figure(figsize=(10, 10))
# plt.imshow(results[0][:, :, 1])
#%%
model.save_weights('./assets/weights')
#%%
imported = BuildUnet()
imported.load_weights('./assets/weights').expect_partial()
#%%
def BuildTestGenerator(test_path,
num_image = 30,
target_size = (256, 256),
as_gray = True):
for i in range(num_image):
img = io.imread(os.path.join(test_path, "{}.png".format(i)), as_gray = as_gray)
img = img / 255.0
img = trans.resize(img, target_size)
img = np.reshape(img, img.shape + (1,))
img = np.reshape(img, (1,)+img.shape)
yield img
#%%
def saveResult(save_path, npyfile):
for i, item in enumerate(npyfile):
img = item[:, :, 0]
io.imsave(os.path.join(save_path, "predict_{}.png".format(i)), img_as_ubyte(img))
#%%
'''test result'''
testgenerator = BuildTestGenerator("data/membrane/test")
results = imported.predict(testgenerator, 30, verbose=1)
saveResult("data/membrane/test", results)
#%%
'''test result'''
testgenerator = BuildTestGenerator("data/membrane/test")
for i, testimg in enumerate(testgenerator):
fig, axes = plt.subplots(1, 2, figsize=(6, 3))
axes.flatten()[0].imshow(trans.resize(testimg[0, ...], (256, 256, 1)), 'gray')
axes.flatten()[1].imshow(results[i, ...], 'gray')
plt.tight_layout()
plt.show()
plt.close()
if (i >= 4): break
#%% |
py | 7df737cafd79bc491f44262fb9df78eb441028cf | from extutils.logger import LoggerSkeleton
logger = LoggerSkeleton("mongo.main", logger_name_env="MONGO_UTILS")
|
py | 7df73804a0bb6251a735d3fa73f05fd9d8a8aeb0 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_sxx(conf):
v=conf.env
cc=conf.find_program(['CC','c++'],var='CXX')
try:
conf.cmd_and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler'%cc)
v.CXX_NAME='sun'
conf.get_suncc_version(cc)
@conf
def sxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Bdynamic'
v['STLIB_MARKER']='-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-Kpic','-DPIC']
v['LINKFLAGS_cxxshlib']=['-G']
v['cxxshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cxxstlib']=['-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_sxx()
conf.find_ar()
conf.sxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
|
py | 7df738196eb5d5d3583292d061d683a4bda81ca6 | #!/usr/bin/python3
from PIL import Image
import numpy as np
import h5py
import argparse
import random
import os
from subprocess import call
from typing import *
'''
Squash a list of images into HDF5 without any label, unlike util.imageset.py
'''
## Parse command line
parser = argparse.ArgumentParser()
parser.add_argument('-l', type=str, action='store', dest='list',
required=True, help='image list txt file')
parser.add_argument('-o', type=str, action='store', dest='output',
default=__file__+'.h5', help='output hdf5 path')
parser.add_argument('-p', type=int, action='store', dest='pixels',
required=True, help='output image size')
parser.add_argument('-c', action='store_true', dest='c',
default=False, help='use HDF5 compression?')
parser.add_argument('-f', action='store_true', dest='force',
default=False, help='force overwrite the destination')
parser.add_argument('-v', action='store_true', dest='view',
default=False, help='show example image')
args = parser.parse_args()
## Configure
compargs = {'compression':'gzip', 'compression_opts':6} if args.c else {}
## Helpers
def readlist(_fpath):
# -> list[ list[ str(path) ] ]
with open(_fpath, 'r') as f:
l = [ l.strip() for l in f.readlines() ]
return l
def fillhdf5(_h5, _list, _group):
for i, line in enumerate(_list, 1):
print('\0337* {:3.1f}% | {}\0338'.format(i*100/len(_list), line), end='')
if i < 10: print(repr(line))
image = Image.open(line).resize((args.pixels, args.pixels), Image.BILINEAR)
image = image.convert('RGB') # RGBA/... -> RGB
if i == 1 and args.view: image.show()
if i < 10: print('\t', image)
# image -> [0,255], H,W,C
image = np.asarray(image) # Image -> Numpy
# HWC -> CHW
image = image.transpose((2,0,1)) #image.swapaxes(0,2) roration:left:pi/4
_h5[_group+'/images'][i-1,:,:,:] = image
if i == 1 and args.view:
Image.fromarray(_h5[_group+'/images'][i-1,:,:,:].transpose((1,2,0)), mode='RGB').show()
def createdsets(_h5, _list, _impath):
# Chunks is crucial to compression performance
# https://stackoverflow.com/questions/41771992/hdf5-adding-numpy-arrays-slow
# https://stackoverflow.com/questions/16786428/compression-performance-related-to-chunk-size-in-hdf5-files
# https://support.hdfgroup.org/HDF5/doc/Advanced/Chunking/Chunking_Tutorial_EOS13_2009.pdf
h5.create_dataset(_impath, # N x C x H x W, np.ubyte (not np.byte! that will cause problem)
(len(_list), 3, args.pixels, args.pixels), dtype=np.ubyte,
chunks=(1, 3, args.pixels, args.pixels), **compargs)
# Read list files
imagelist = readlist(args.list)
print('-> Found {} images'.format(len(imagelist)))
# Create output file
if os.path.exists(args.output):
if not args.force: raise SystemExit('HDF5 file {} already exists!'.format(args.output))
h5 = h5py.File(args.output, 'w')
# Fill HDF5
createdsets(h5, imagelist, '/images')
fillhdf5(h5, imagelist, '')
print(' *> processed {} images'.format(len(imagelist)))
# Write to disk
h5.close()
print('-> Dataset saved as {}'.format(args.output))
call(['sleep', '0.1'])
call(['h5ls', '-rv', args.output ])
|
py | 7df7381bcc3b9b768c71057064e225444db9d8ec | import os
import sys
import traceback
workers = 4
worker_connections = 256
errorlog = "/home/vcap/logs/gunicorn_error.log"
bind = "0.0.0.0:{}".format(os.getenv("PORT", "5000"))
def on_starting(server):
server.log.info("Starting Team Metrics")
def worker_abort(worker):
worker.log.info("worker received ABORT {}".format(worker.pid))
for thread_id, stack in sys._current_frames().items():
worker.log.error(''.join(traceback.format_stack(stack)))
def on_exit(server):
server.log.info("Stopping Team Metrics")
def worker_int(worker):
worker.log.info("worker: received SIGINT {}".format(worker.pid))
|
py | 7df73858d7837b199bc56889a840e1bde37820e5 | import json
import pytz
from datetime import datetime
from dal import autocomplete
from django.conf import settings
from django.core.urlresolvers import Http404
from django.db.models import Q
from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse_lazy, reverse
from django.views.generic import FormView, TemplateView
from django.utils import timezone
from .forms import ApplicationForm
from .helpers import is_valid_email_address
from .models import Airport, Draft, Application, Institution, Organization, Country, Reference
from . import constants
# todo: find better solution for prefilling fields!
def post_to_json(post):
dictionary = {}
for key in post.keys():
data = post.getlist(key)
dictionary[key] = data
return json.dumps(dictionary)
from .models import MULTIFIELD_NAMES
def load_json_to_initial(data):
global MULTIFIELD_NAMES
MULTIFIELD_NAMES += ['skills_0', 'gender_0', 'how_did_you_find_out_0', ]
for key in data.keys():
if key not in MULTIFIELD_NAMES:
data[key] = data[key][0]
data['skills'] = [data.get('skills_0', ''), data.get('skills_1', '')]
data['gender'] = [data.get('gender_0', ''), data.get('gender_1', '')]
data['how_did_you_find_out'] = [data.get('how_did_you_find_out_0', ''), data.get('how_did_you_find_out_1', '')]
return data
class ApplicationView(FormView):
"""View shows the application itself"""
form_class = ApplicationForm
template_name = 'application/application_form.html'
def get_referral(self, referral=None):
if referral is None or not Reference.objects.filter(key=referral).exists():
return {}
reference = Reference.objects.get(key=referral)
return {
'image': reference.image,
'name': reference.name,
'text': reference.text,
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'action_url': self.request.get_full_path(),
'organization': self.get_referral(self.kwargs.get('referral')),
})
return context
def get_success_url(self):
"""Get the success url with the right primary key so a customized thank you message can be shown"""
application_pk = self.application.pk
return reverse('application:thank_you', kwargs={'pk': application_pk})
def remove_form_errors(self, form):
"""Removes the errors from the form except for email and acknowledgement which is required for draft as well"""
for field in form:
if field.name in ['email', 'acknowledgements']:
continue
form.errors[field.name] = form.error_class()
return form
def save_draft(self, form):
"""
Tries to save the draft. Checks whether the email and acknowledgement is valid.
Returns the whether the draft was saved, the form itself and the the draft if it was created.
"""
form = self.remove_form_errors(form)
email = form.data['email']
acknowledgements = form.data.getlist('acknowledgements')
if is_valid_email_address(email) and len(acknowledgements) == 4:
draft, created = Draft.objects.get_or_create(email=email)
if created:
draft.data = post_to_json(self.request.POST)
draft.save()
return True, form, draft
form.add_error('email', 'An draft application associated with your e-mail address has '
'already been saved on our servers. If you cannot access it, contact us. ')
return False, form, None
def is_after_deadline(self):
deadline_unaware = datetime.strptime(constants.APPLICATION_DEADLINE, '%Y/%m/%d %H:%M')
deadline = pytz.utc.localize(deadline_unaware)
referral = self.kwargs.get('referral', '')
try:
reference = Reference.objects.get(key=referral)
if reference.deadline:
deadline = reference.deadline
except Reference.DoesNotExist:
pass
return timezone.now() > deadline
def form_invalid(self, form):
"""
Handles the form when it's invalid. For save,
it tries to save a draft for submit it invokes super().form_invalid()
"""
if '_save' in self.request.POST:
valid, form, draft = self.save_draft(form)
if valid:
return render(self.request, 'application/form_saved.html', {'draft': draft})
return super().form_invalid(form)
def form_valid(self, form):
"""
If the form was saved, it saves a draft and renders a info message.
If the form was submitted, it saves it and set a inactive flag for draft.
"""
if '_save' in self.request.POST:
_, _, draft = self.save_draft(form)
return render(self.request, 'application/form_saved.html', {'draft': draft})
elif '_submit':
self.application = form.save()
email = form.data['email']
if Draft.objects.filter(email=email).exists():
draft = Draft.objects.get(email=email)
draft.inactive = True
draft.save()
return super().form_valid(form)
def dispatch(self, request, *args, **kwargs):
if self.is_after_deadline():
return redirect(settings.REDIRECT_URL)
return super().dispatch(request, *args, **kwargs)
class PreFilledApplicationView(ApplicationView):
"""View for handling the application with prefilled data from draft"""
def get_draft(self):
"""Gets the draft based on uuid and raises a 404 if the draft does not exists"""
try:
draft_uuid = self.kwargs.get('uuid')
draft = Draft.all_objects.get(uuid=draft_uuid)
return draft
except (ValueError, Draft.DoesNotExist):
raise Http404
def save_draft(self, form):
"""Saves the draft and makes sure the email wasn't changed"""
form = self.remove_form_errors(form)
draft_uuid = self.kwargs.get('uuid')
draft = Draft.objects.get(uuid=draft_uuid)
# Do not change email doesn't matter what
mutable = self.request.POST._mutable
self.request.POST._mutable = True
self.request.POST['email'] = draft.email
self.request.POST._mutable = mutable
draft.data = post_to_json(self.request.POST)
draft.save()
return True, form, draft
def get_initial(self):
"""Loads the initial data from draft"""
draft = self.get_draft()
draft_data = json.loads(draft.data)
return load_json_to_initial(draft_data)
def get(self, request, uuid, *args, **kwargs):
draft = self.get_draft()
if draft.inactive:
return render(self.request, 'application/already_submitted.html', {})
return super().get(request, *args, **kwargs)
def dispatch(self, request, *args, **kwargs):
data = json.loads(self.get_draft().data)
referral = data.get('referred_by', [''])[0]
self.kwargs['referral'] = referral
return super().dispatch(request, *args, **kwargs)
class ReferralApplicationView(ApplicationView):
"""View shows the application with referral code"""
def get_initial(self):
referral = self.kwargs.get('referral')
return {'referred_by': referral}
def check_email(request, email):
if Application.objects.filter(email=email).exists():
context = {}
return render(request, 'application/popup_already_submitted.html', context)
if Draft.objects.filter(email=email).exists():
context = {'draft': Draft.objects.get(email=email)}
return render(request, 'application/popup_saved_draft.html', context)
context = {}
return render(request, 'application/popup_alright.html', context)
def send_email(request, email):
draft = get_object_or_404(Draft, email=email)
status = draft.send_access()
if status:
template_name = 'application/access_sent.html'
else:
template_name = 'application/access_not_sent.html'
return render(request, template_name, {})
class AirportAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = Airport.objects.all()
if self.q:
qs = qs.filter(Q(name__icontains=self.q) | Q(iata_code__istartswith=self.q))
return qs
class InstitutionAutocomplete(autocomplete.Select2QuerySetView):
def has_add_permission(self, request):
return True
def get_queryset(self):
qs = Institution.objects.filter(show=True)
if self.q:
qs = qs.filter(Q(name__icontains=self.q))
return qs
class OrganizationAutocomplete(autocomplete.Select2QuerySetView):
def has_add_permission(self, request):
return True
def get_queryset(self):
qs = Organization.objects.filter(show=True)
if self.q:
qs = qs.filter(Q(name__icontains=self.q))
return qs
class CountryAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = Country.objects.all()
if self.q:
qs = qs.filter(Q(name__icontains=self.q))
return qs
class ThankYou(TemplateView):
template_name = 'application/thank_you.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({'application': self.application})
return context
def get(self, request, pk, *args, **kwargs):
self.application = get_object_or_404(Application, pk=pk)
return redirect('http://www.opencon2016.org/thank_you?referral=' + str(self.application.my_referral))
# return super().get(request, *args, **kwargs)
|
py | 7df7395cd1291956be4df1896d01bae6ed4acf1c | import json
import logging
import traceback
from enum import Enum
from typing import Union, Iterable, List, TYPE_CHECKING, Dict, Set, Mapping
from typing_extensions import Literal
from multidict import MultiDict
from schematics.exceptions import DataError, ConversionError
from schematics.types import BaseType, ListType
from slim.base.const import ERR_TEXT_ROGUE_FIELD, ERR_TEXT_COLUMN_IS_NOT_FOREIGN_KEY
from slim.base.types.func_meta import get_meta
from slim.utils.schematics_ext import schematics_model_merge
from ..utils import BlobParser, JSONParser, is_py36, dict_filter, dict_filter_inplace, BoolParser
from ..exception import SyntaxException, ResourceException, InvalidParams, \
PermissionDenied, ColumnNotFound, ColumnIsNotForeignKey, SQLOperatorInvalid, InvalidRole, SlimException, \
InvalidPostData, TableNotFound
if TYPE_CHECKING:
from .view import AbstractSQLView
from .permission import Ability
from .user import BaseUser
logger = logging.getLogger(__name__)
class NamedObject:
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Named Object: %s>' % self.name
PRIMARY_KEY = NamedObject('Primary Key') # for add condition
ALL_COLUMNS = NamedObject('All Columns')
class DataRecord:
def __init__(self, table_name, val):
self.table = table_name
self.val = val
self.selected = ALL_COLUMNS
self.available_columns = ALL_COLUMNS
self._cache = None
def _to_dict(self) -> Dict:
raise NotImplementedError()
def set_info(self, info: "SQLQueryInfo", ability: "Ability", user: "BaseUser"):
from .permission import A
if info:
self.selected = info.select
# 注意,这里实际上读了 self.keys(),所以cache已经生成了,因此直接调用reserve
self.available_columns = ability.can_with_record(user, A.READ, self, available=info.select if info else None)
self.reserve(self.available_columns)
return self.available_columns
@property
def cache(self) -> Dict:
if self._cache is None:
self._cache = self._to_dict()
return self._cache
def get(self, key, default=None):
return self.cache.get(key, default)
def to_dict(self):
return self.cache.copy()
def keys(self):
return self.cache.keys()
def pop(self, key):
return self.cache.pop(key)
def reserve(self, keys):
cache_keys = set(self.keys())
for k in cache_keys - set(keys):
del self.cache[k]
def __getitem__(self, item):
return self.get(item)
def __setitem__(self, key, value):
self.cache[key] = value
def __delitem__(self, key):
self.pop(key)
def __repr__(self):
return self.to_dict().__repr__()
class SQLForeignKey:
def __init__(self, rel_table: str, rel_field: str, is_soft_key=False):
self.rel_table = rel_table # 关联的表
self.rel_field = rel_field # 关联的列
# self.rel_type = rel_type # 关联列类型
# rel_type: SQL_TYPE,
self.is_soft_key = is_soft_key
class SQLQueryOrder:
def __init__(self, column, order):
assert order in ('asc', 'desc')
self.column = column
self.order = order
def __eq__(self, other):
if isinstance(other, SQLQueryOrder):
return self.column == other.column and self.order == other.order
return False
def __repr__(self):
return '<SQLQueryOrder %r.%s>' % (self.column, self.order)
class SQL_OP(Enum):
EQ = ('eq', '==', '=')
NE = ('ne', '!=', '<>')
LT = ('lt', '<')
LE = ('le', '<=')
GE = ('ge', '>=')
GT = ('gt', '>')
IN = ('in',)
NOT_IN = ('notin', 'not in')
IS = ('is',)
IS_NOT = ('isnot', 'is not')
AND = ('and',)
OR = ('or',)
PREFIX = ('prefix',) # string like only
CONTAINS = ('contains',) # ArrayField only
CONTAINS_ANY = ('contains_any',) # ArrayField only
LIKE = ('like',)
ILIKE = ('ilike',)
_COMMON = EQ + NE + LT + LE + GE + GT + IN + IS + IS_NOT + PREFIX + CONTAINS + CONTAINS_ANY
_ALL = _COMMON + LIKE + ILIKE
SQL_OP.COMMON = SQL_OP._COMMON.value
SQL_OP.ALL = SQL_OP._ALL.value
SQL_OP.txt2op = {}
for i in SQL_OP:
if i == SQL_OP._COMMON: continue
if i == SQL_OP._ALL: continue
for opval in i.value:
SQL_OP.txt2op[opval] = i
class QueryConditions(list):
"""
查询条件,这是 SQLQueryInfo 的一部分。与 list 实际没有太大不同,独立为类型的目的是使其能与list区分开来
i[0]: str
i[1]: SQL_OP
i[2]: Any
"""
def __contains__(self, item):
for i in self:
if i[0] == item:
return True
def find(self, column):
ret = []
for i in self:
if i[0] == column:
ret.append(i)
return ret
def map(self, key, func):
for i in self:
if i[0] == key:
i[:] = func(i)
class SQLQueryInfo:
""" SQL查询参数。"""
def __init__(self, params=None, view=None):
self.select: Union[Set[str], Literal[ALL_COLUMNS]] = ALL_COLUMNS
self.select_exclude: Set[str] = set()
self.conditions = QueryConditions()
self.orders: List[SQLQueryOrder] = []
self.loadfk: Dict[str, List[Dict[str, object]]] = {}
if params: self.parse(params)
if view: self.bind(view)
def set_orders(self, orders: List[SQLQueryOrder]):
assert isinstance(orders, list)
for i in orders:
assert isinstance(i, SQLQueryOrder)
self.orders = orders.copy()
@staticmethod
def parse_order(text):
"""
:param text: order=id.desc, xxx.asc
:return: [
[<column>, asc|desc|default],
[<column2>, asc|desc|default],
]
"""
orders = []
for i in map(str.strip, text.split(',')):
items = i.split('.', 2)
if len(items) == 1: column, order = items[0], 'default'
elif len(items) == 2: column, order = items
else: raise InvalidParams("Invalid order syntax")
order = order.lower()
if order not in ('asc', 'desc', 'default'):
raise InvalidParams('Invalid order mode: %s' % order)
if order != 'default':
orders.append(SQLQueryOrder(column, order))
return orders
def set_select(self, items):
if items == ALL_COLUMNS:
self.select = ALL_COLUMNS
elif isinstance(items, Iterable):
for i in items:
assert isinstance(i, str)
self.select = set(items)
else:
raise InvalidParams('Invalid select')
@classmethod
def parse_select(cls, text: str) -> Union[Set, Literal[ALL_COLUMNS]]:
"""
get columns from select text
:param text: col1, col2
:return: ALL_COLUMNS or ['col1', 'col2']
"""
if text == '*':
return ALL_COLUMNS # None means ALL
selected_columns = set(filter(lambda x: x, map(str.strip, text.split(','))))
if not selected_columns:
raise InvalidParams("No column(s) selected")
return selected_columns
@classmethod
def parse_load_fk(cls, data: Dict[str, List[Dict[str, object]]]) -> Dict[str, List[Dict[str, object]]]:
"""
:param data:{
<column>: role,
<column2>: role,
<column>: {
'role': role,
'loadfk': { ... },
},
:return: {
<column>: {
'role': role,
},
...
<column3>: {
'role': role,
'loadfk': { ... },
},
}
"""
default_value_dict = {'role': None, 'as': None, 'table': None, 'loadfk': None}
def value_normalize_dict(value):
def check(k, v):
if k == 'role': return isinstance(v, str)
if k == 'as': return isinstance(v, str)
if k == 'table': return isinstance(v, str)
if k == 'loadfk': return isinstance(v, dict)
valid = {k: v for k, v in value.items() if check(k, v)}
if not valid: return default_value_dict.copy()
if 'loadfk' in valid and valid['loadfk']:
valid['loadfk'] = cls.parse_load_fk(valid['loadfk'])
for k, v in default_value_dict.items():
valid.setdefault(k, v)
return valid
def value_normalize(value, no_list=True):
if value is None:
return default_value_dict.copy()
elif not no_list and isinstance(value, List):
# <column>: [value1, value2, ...]
return list(map(value_normalize, value))
elif isinstance(value, str):
# <column>: role
val = default_value_dict.copy()
val['role'] = value
return val
elif isinstance(value, Mapping):
# {'role': <str>, 'as': <str>, ...}
return value_normalize_dict(value)
else:
raise InvalidParams('Invalid syntax for "loadfk": %s' % value)
# 对全部项进行检查
new_data = {}
if not isinstance(data, dict):
raise InvalidParams('Invalid syntax for "loadfk": %s' % data)
for k, v in data.items():
nv = value_normalize(v, False)
new_data[k] = nv if isinstance(nv, List) else [nv]
return new_data
@classmethod
def check_condition_and_format(cls, val: Iterable) -> List:
"""
检查条件语句,并将其格式化为可用状态
:param val:
:return:
"""
field_name, op, value = val
if not isinstance(op, SQL_OP):
if op not in SQL_OP.txt2op:
raise SQLOperatorInvalid(op, 'The condition is illegal: %s' % val)
else:
op = SQL_OP.txt2op.get(op)
return [field_name, op, value]
def add_condition(self, field_name, op, value):
"""
Add a query condition and validate it.
raise ParamsException if failed.
self.view required
:param field_name:
:param op:
:param value:
:return: None
"""
cond = self.check_condition_and_format((field_name, op, value))
self.conditions.append(cond)
def parse_then_add_condition(self, field_name, op_name, value):
if op_name not in SQL_OP.txt2op:
raise SQLOperatorInvalid(op_name)
op = SQL_OP.txt2op.get(op_name)
if op in (SQL_OP.IN, SQL_OP.NOT_IN, SQL_OP.CONTAINS, SQL_OP.CONTAINS_ANY):
try:
# 强制 json.loads() 右值,符合 parameters 的一般情况
value = json.loads(value)
except (TypeError, json.JSONDecodeError):
raise InvalidParams('The right value of "in" condition must be serialized json string: %s' % value)
self.add_condition(field_name, op, value)
def clear_condition(self):
self.conditions.clear()
def parse(self, params):
for key, value in params.items():
# xxx.{op}
info = key.split('.', 1)
field_name = info[0]
if field_name.startswith('$'):
continue
elif field_name == 'order':
self.orders = self.parse_order(value)
continue
elif field_name == 'select':
self.select = self.parse_select(value)
continue
elif field_name == '-select':
self.select_exclude = self.parse_select(value)
continue
elif field_name == 'loadfk':
try:
value = json.loads(value) # [List, Dict[str, str]]
except (json.JSONDecodeError, TypeError):
raise InvalidParams('Invalid json syntax for "loadfk": %s' % value)
self.loadfk = self.parse_load_fk(value)
continue
op = info[1] if len(info) > 1 else 'eq'
self.parse_then_add_condition(field_name, op, value)
def check_query_permission(self, view: "AbstractSQLView"):
user = view.current_user if view.can_get_user else None
self.check_query_permission_full(user, view.table_name, view.ability, view)
def check_query_permission_full(self, user: "BaseUser", table: str, ability: "Ability", view: "AbstractSQLView", ignore_error=True):
from .permission import A
# QUERY 权限检查
# QUERY 的特殊之处在于即使没有查询条件也会查出数据
checking_columns = set()
checking_columns_qex = set()
if self.conditions:
# 按照目前的设计,存在condition的情况下才会检查condition的权限
is_qex_cond = lambda x: x[1] == SQL_OP.ILIKE or x[1] == SQL_OP.LIKE
is_q_cond = lambda x: not is_qex_cond(x)
for c in self.conditions:
field_name, op, value = c
if is_qex_cond(c):
checking_columns_qex.add(field_name)
else:
checking_columns.add(field_name)
def condition_filter(available_columns: Set, skip_func):
new_conditions = []
for i in self.conditions:
if skip_func(i):
# 如果不是要检查的列,那么直接填入
new_conditions.append(i)
else:
# 如果是的话,将不在许可列中的条件剔除掉
if i[0] in available_columns:
new_conditions.append(i)
self.conditions[:] = new_conditions
def do_check(cs: Set, skip_func, action):
if cs:
new_columns = ability.can_with_columns(user, action, table, cs)
if not ignore_error:
if len(cs) != len(new_columns):
raise PermissionDenied("These columns has no permission to %s: %r of %r" % (action, cs - new_columns, table))
condition_filter(new_columns, skip_func)
do_check(checking_columns, is_qex_cond, A.QUERY)
do_check(checking_columns_qex, is_q_cond, A.QUERY_EX)
# 所有查询条件都被权限机制清空,被认为是出乎意料的结果,所以抛出异常
# 否则用户会得到一个无条件查询出的数据。
if not self.conditions:
raise PermissionDenied("No column had permission to %s: %r of %r" % (
A.QUERY, checking_columns.union(checking_columns_qex), table))
# READ 权限检查,通不过时将其过滤
checking_columns = self.loadfk.keys() # 外键过滤
new_loadfk = ability.can_with_columns(user, A.READ, table, checking_columns)
self.loadfk = dict_filter(self.loadfk, new_loadfk)
new_select = ability.can_with_columns(user, A.READ, table, self.select) # select 过滤
self.set_select(new_select)
# 设置附加条件
ability.setup_extra_query_conditions(user, table, self, view)
def bind(self, view: "AbstractSQLView"):
def check_column_exists(column):
if column is PRIMARY_KEY:
return
if column not in view.fields:
raise ColumnNotFound({column: [ERR_TEXT_ROGUE_FIELD]})
# select check
def show_select(s: Union[Literal[ALL_COLUMNS], Set]) -> Set:
if s is ALL_COLUMNS:
return view.fields.keys()
else:
for field_name in s:
check_column_exists(field_name)
if PRIMARY_KEY in s:
s.remove(PRIMARY_KEY)
s.add(view.primary_key)
return s
# select = normal select - reverse select
self.select = show_select(self.select) - show_select(self.select_exclude)
# where convert
for i in self.conditions:
field_name, op, value = i
check_column_exists(field_name)
if field_name == PRIMARY_KEY:
i[0] = field_name = view.primary_key
# permission check
# 是否需要一个 order 权限?
if view.ability:
self.check_query_permission(view)
# where check
for i in self.conditions:
field_name, op, value = i
check_column_exists(field_name)
if field_name == PRIMARY_KEY:
i[0] = field_name = view.primary_key
field_type: BaseType = view.fields[field_name]
# 此处会进行类型转换和校验
# 一个重点是,因为之前的 check_query_permission 会调用很多回调,他们输入的值一般认为是符合类型的最终值,
# 而又有可能原始的值来自于 parameters,他们是文本!这引发了下面TODO的两个连带问题
def conv(x):
nonlocal field_type
if op in (SQL_OP.CONTAINS, SQL_OP.CONTAINS_ANY):
assert isinstance(field_type, ListType), 'contains only works with ArrayField'
field_type2 = field_type.field
else:
field_type2 = field_type
# TODO: 这里的 null 感觉有很大问题,或许应该明确一下字符串"null"和null?
if x in ('null', None):
return None
else:
return field_type2.validate(x)
try:
# 注:外键的类型会是其指向的类型,这里不用额外处理
# TODO: Iterable 似乎不是一个靠谱的类型?这样想对吗?
if op in (SQL_OP.CONTAINS, SQL_OP.CONTAINS_ANY):
assert isinstance(field_type, ListType), 'contains only works with ArrayField'
if op in (SQL_OP.IN, SQL_OP.NOT_IN, SQL_OP.CONTAINS, SQL_OP.CONTAINS_ANY):
assert isinstance(value, Iterable)
i[2] = list(map(conv, value))
else:
i[2] = conv(value)
except ConversionError as e:
raise InvalidParams({field_name: e.to_primitive()})
except Exception as e:
# 这里本来设计了一个 condition name,但是觉得会对整体性造成破坏,就不用了
# cond_name = '%s.%s' % (field_name, op.value[0])
raise InvalidParams({field_name: ["Can not convert to data type of the field"]})
# order check
for i, od in enumerate(self.orders):
check_column_exists(od.column)
if od.column == PRIMARY_KEY:
self.orders[i] = view.primary_key
# foreign key check
app = view.app
def check_loadfk_data(the_view, data):
if PRIMARY_KEY in data:
data[the_view.primary_key] = data[PRIMARY_KEY]
del data[PRIMARY_KEY]
for field_name, values_lst in data.items():
# field_name: [{'role': role, 'loadfk': {...}}]
# field_name: [{'as': 's24h', 'table': 's24', 'role': role}]
# 检查列是否存在
if field_name not in the_view.fields:
raise ColumnNotFound({field_name: [ERR_TEXT_ROGUE_FIELD]})
# 检查列是否是合法的外键列
fks = the_view.foreign_keys.get(field_name, None)
if not fks: raise ColumnIsNotForeignKey({field_name: [ERR_TEXT_COLUMN_IS_NOT_FOREIGN_KEY]})
for values in values_lst:
# 检查是否采用别名将外键对应到特定表上
if values['table']:
if values['table'] not in the_view.foreign_keys_table_alias:
raise ResourceException('Foreign key not match the table: %r -> %r' % (field_name, values['table']))
fk = the_view.foreign_keys_table_alias[values['table']]
values['table'] = fk.rel_table
else:
fk = fks[0] # 取第一个结果(即默认外键)
values['table'] = fk.rel_table
# 检查对应的表是否存在
if fk.rel_table not in app.tables:
raise TableNotFound("Foreign key refer to a table not exists: %r -> %r" % (field_name, fk.rel_table))
# 检查对应的表的角色是否存在
if values['role'] not in app.permission.roles:
raise InvalidRole('%s of %s' % (values['role'], fk.rel_table))
# 递归外键读取
if values['loadfk']:
check_loadfk_data(app.tables[fk.rel_table], values['loadfk'])
if self.loadfk:
check_loadfk_data(view, self.loadfk)
class SQLValuesToWrite(dict):
def __init__(self, raw_data=None, view: 'AbstractSQLView'=None, action=None, records=None):
super().__init__()
self.returning = False
self.view = view
# design of incr/desc:
# 1. incr/desc/normal_set can't be appear in the same time
# 2. incr/desc use self to store data
self.incr_fields = set()
self.decr_fields = set()
self.set_add_fields = set()
self.set_remove_fields = set()
self.array_append = set()
self.array_remove = set()
if raw_data:
assert isinstance(raw_data, Mapping)
self.parse(raw_data)
if view: self.bind(view, action, records)
def parse(self, post_data: MultiDict):
self.clear()
if isinstance(post_data, dict):
post_data = MultiDict(post_data)
for k, v in post_data.items():
# 提交多个相同值,等价于提交一个数组(用于formdata和urlencode形式)
v_all = post_data.getall(k)
if len(v_all) > 1:
v = v_all
if k.startswith('$'):
continue
elif k == 'returning':
self.returning = True
continue
elif '.' in k:
# TODO: 不允许 incr 和普通赋值同时出现
k, op = k.rsplit('.', 1)
if op == 'incr':
self.incr_fields.add(k)
elif op == 'decr':
self.decr_fields.add(k)
elif op == 'set_add':
self.set_add_fields.add(k)
elif op == 'set_remove':
self.set_remove_fields.add(k)
# elif op == 'array_append':
# self.array_append.add(k)
# elif op == 'array_remove':
# self.array_remove.add(k)
self[k] = v
def check_insert_permission(self, user: "BaseUser", table: str, ability: "Ability"):
from .permission import A
columns = self.keys()
logger.debug('request permission as %r: [%s] of table %r, columns: %s' % (ability.role, A.CREATE, table, columns))
is_empty_input = not columns
# 如果插入数据项为空,那么用户应该至少有一个列的插入权限
if is_empty_input:
if self.view:
columns = self.view.fields.keys()
available = ability.can_with_columns(user, A.CREATE, table, columns)
if not available: raise PermissionDenied()
dict_filter_inplace(self, available)
valid = ability.can_with_columns(user, A.CREATE, table, available)
if is_empty_input:
if len(valid) <= 0:
logger.debug("request permission failed as %r. request / valid: %r, %r" % (ability.role, list(self.keys()), valid))
raise PermissionDenied()
else:
if len(valid) != len(self):
logger.debug("request permission failed as %r. request / valid: %r, %r" % (ability.role, list(self.keys()), valid))
raise PermissionDenied()
logger.debug("request permission successed as %r: %r" % (ability.role, list(self.keys())))
def check_update_permission(self, user: "BaseUser", table: str, ability: "Ability", records):
from .permission import A
columns = self.keys()
logger.debug('request permission as %r: [%s] of table %r, columns: %s' % (ability.role, A.WRITE, table, columns))
available = ability.can_with_columns(user, A.WRITE, table, columns)
if not available:
raise PermissionDenied()
dict_filter_inplace(self, available)
for record in records:
valid = ability.can_with_record(user, A.WRITE, record, available=available)
if len(valid) != len(self):
logger.debug("request permission failed as %r. request / valid: %r, %r" % (ability.role, list(self.keys()), valid))
raise PermissionDenied()
logger.debug("request permission successed as %r: %r" % (ability.role, list(self.keys())))
def check_write_permission(self, view: "AbstractSQLView", action, records=None):
from .permission import A
user = view.current_user if view.can_get_user else None
if action == A.WRITE:
self.check_update_permission(user, view.table_name, view.ability, records)
elif action == A.CREATE:
self.check_insert_permission(user, view.table_name, view.ability)
else:
raise SlimException("Invalid action to write: %r" % action)
def bind(self, view: "AbstractSQLView", action=None, records=None):
"""
建立写入值与 view 的联系。
由于这之后还有一个 before_insert / before_update 的过程,所以这里不尽量抛出异常,只是在装入 values 前把不合规的过滤
:param view:
:param action:
:param records:
:return:
"""
from .permission import Ability, A
# 1. 融合before_update / before_insert 的校验器,走一次过滤
if action == A.WRITE:
func = view.before_update
else:
func = view.before_insert
meta = get_meta(func)
model_cls = schematics_model_merge(view.data_model, *meta.va_write_value_lst)
try:
# 初次bind应该总在before_update / before_insert之前
# 因此进行带partial的校验(即忽略required=True项,因为接下来还会有补全的可能)
m = model_cls(self, strict=False, validate=True, partial=True)
data = m.to_native()
for k in self:
self[k] = data.get(k)
self.incr_fields.intersection_update(self.keys())
self.decr_fields.intersection_update(self.keys())
self.set_add_fields.intersection_update(self.keys())
self.set_remove_fields.intersection_update(self.keys())
self.array_append.intersection_update(self.keys())
self.array_remove.intersection_update(self.keys())
except DataError as e:
raise InvalidPostData(e.to_primitive())
# 没捕获 TypeError
dict_filter_inplace(self, view.fields.keys())
# 过滤后空 post 不代表无意义,因为插入值可能在 before_insert 中修改
# if len(self) == 0:
# raise InvalidPostData('Invalid post values for table: %s' % view.table_name)
# 同样,空值不做检查,因为会抛出无权访问
if action and len(self):
self.check_write_permission(view, action, records)
def validate_before_execute_insert(self, view: "AbstractSQLView"):
# 在执行insert之前,需要校验插入项是否完整(所有require项存在)
try:
view.data_model(self, strict=False, validate=True, partial=False)
except DataError as e:
raise InvalidPostData(e.to_primitive())
|
py | 7df739c643b937c682988ebc7fb4079262c83a09 | import pandas as pd
import geopandas as gpd
from shapely import wkb
from ....utils.utils import check_package
from ....utils.logger import log
def variable_describe(data):
if not data or not data.get('stats'):
return
stats = dict(data.get('stats'))
stats.update(data.get('quantiles'))
return pd.Series(stats)
def dataset_describe(variables):
describe = dict()
for variable in variables:
if variable.describe() is None:
continue
describe[variable.column_name] = variable.describe()
return pd.DataFrame.from_dict(describe)
def head(cls, data):
from .dataset import Dataset
from .variable import Variable
if not data:
return
if cls == Variable:
head = pd.Series(data['head'])
elif cls == Dataset:
head = pd.DataFrame(data['glimpses']['head'])
return head
def tail(cls, data):
from .dataset import Dataset
from .variable import Variable
if not data:
return
if cls == Variable:
tail = pd.Series(data['tail'])
elif cls == Dataset:
tail = pd.DataFrame(data['glimpses']['tail'])
return tail
def counts(data):
if not data:
return
return pd.Series(data['counts'])
def quantiles(data):
if not data:
return
return pd.Series(data['quantiles'])
def top_values(data):
check_package('matplotlib', is_optional=True)
import matplotlib.pyplot as plt
if not data:
return
top_values = pd.DataFrame(data['top_values'])
position = list(reversed(range(top_values.shape[0])))
plt.barh(position, top_values['count'], align='center', alpha=0.5)
plt.yticks(position, top_values['value'])
plt.xlabel('Count')
plt.ylabel('Value')
plt.title('Top values')
plt.show()
def fields_by_type(data):
if not data:
return
return pd.Series(data['fields_by_type'])
def geom_coverage(geography_id):
from .geography import Geography
from ....viz import Map, Layer
geography = Geography.get(geography_id)
if geography.geom_coverage:
geom_coverage = wkb.loads(geography.geom_coverage, hex=True)
geom_coverage_gdf = gpd.GeoDataFrame({'geometry': [geom_coverage]}, geometry='geometry')
return Map(Layer(geom_coverage_gdf))
else:
log.info('Geometry coverage not available')
def histogram(data):
check_package('matplotlib', is_optional=True)
import matplotlib.pyplot as plt
range_element = [round(element['min_range'], 2) for element in data['histogram']]
count = [element['count'] for element in data['histogram']]
count_normalized = [element/sum(count) for element in count]
position = list(range(len(range_element)))
plt.figure(figsize=(12, 7))
plt.bar(position, count_normalized, align='center', alpha=0.5, width=abs(position[1] - position[0]))
plt.xticks(position, range_element)
plt.title('Histogram')
plt.xticks(rotation=60)
plt.show()
|
py | 7df73b9a8d77d30c2b246551a4d4335659dc526a | from django.shortcuts import get_object_or_404, render
from django.http import Http404
from django.db.models import Avg
from .models import Book
# Create your views here.
def index(request):
books = Book.objects.all().order_by("-rating")
num_books = books.count()
avg_rating = books.aggregate(Avg("rating")) # rating__avg, rating__min
return render(request, "book_outlet/index.html", {
"books": books,
"total_number_of_books": num_books,
"average_rating": avg_rating
})
def book_detail(request, slug):
# try:
# book = Book.objects.get(pk=id)
# except:
# raise Http404()
book = get_object_or_404(Book, slug=slug)
return render(request, "book_outlet/book_detail.html", {
"title": book.title,
"author": book.author,
"rating": book.rating,
"is_bestseller": book.is_bestselling
}) |
py | 7df73bceb48a6999638db06476a0dda1c8f692d2 | from OpenTCLFile import *
def recorder_types(TCLFile):
recorder_dispInfo = OpenSeesTclRead(TCLFile, 'recorder Node -file Node_displacements.out', 4)
recorder_rotInfo = OpenSeesTclRead(TCLFile, 'recorder Node -file Node_rotations.out', 4)
recorder_forceInfo = OpenSeesTclRead(TCLFile, 'recorder Node -file Node_forceReactions.out', 4)
recorder_momentInfo = OpenSeesTclRead(TCLFile, 'recorder Node -file Node_momentReactions.out', 4)
recorder_accelInfo = OpenSeesTclRead(TCLFile, 'recorder Node -file Node_accelerations.out', 4)
recorder_vellInfo = OpenSeesTclRead(TCLFile, 'recorder Node -file Node_velocities.out', 4)
if (recorder_dispInfo.size>0):
recorder_disp=1
else :
recorder_disp=0
if (recorder_rotInfo.size>0):
recorder_rot=1
else:
recorder_rot = 0
if (recorder_forceInfo.size>0):
recorder_force=1
else:
recorder_force = 0
if (recorder_momentInfo.size>0):
recorder_moment=1
else:
recorder_moment = 0
if (recorder_accelInfo.size>0):
recorder_accel=1
else:
recorder_accel = 0
if (recorder_vellInfo.size>0):
recorder_vel=1
else:
recorder_vel = 0
return recorder_disp, recorder_rot, recorder_force, recorder_moment, recorder_accel, recorder_vel |
py | 7df73c9d7513bd333a83a80fe95e543597eea404 | # MIT licensed
# Copyright (c) 2013-2017 lilydjwg <[email protected]>, et al.
import re
import sre_constants
import structlog
from . import session
logger = structlog.get_logger(logger_name=__name__)
async def get_version(name, conf, **kwargs):
try:
regex = re.compile(conf['regex'])
except sre_constants.error:
logger.warning('bad regex, skipped.', name=name, exc_info=True)
return
encoding = conf.get('encoding', 'latin1')
kwargs = {}
headers = {}
if conf.get('proxy'):
kwargs["proxy"] = conf.get("proxy")
if conf.get('user_agent'):
headers['User-Agent'] = conf['user_agent']
async with session.get(conf['url'], headers=headers, **kwargs) as res:
body = (await res.read()).decode(encoding)
try:
version = regex.findall(body)
except ValueError:
version = None
if not conf.getboolean('missing_ok', False):
logger.error('version string not found.', name=name)
return version
|
py | 7df73cdf69152e43e1924e3b80a97330e77e33c7 | import os
from holoniq.utils import config, log
from random import randrange
from cachetools import TTLCache
from .template_mailer import TemplateMailer
from flask_login import LoginManager, UserMixin, login_user, logout_user, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy_utils import database_exists
VERIFICATION_TEMPLATE = """
Hi {{name}}
You recently requested to create a Dash/SPA account. In order to complete
the registration process please enter the following code:
{{code}}
This code is only valid for the next 30 minutes.
If you did not request an account, please ignore this
email or reply to let us know.
"""
FORGOT_TEMPLATE = """
You recently requested to reset the password on your Dash/SPA account. In order
to complete the reset process please enter the following code:
{{code}}
This code is only valid for the next 30 minutes.
If you did not request an account, please ignore this
email or reply to let us know.
"""
mail_options = config.get('mail_options')
user_db = config.get('user_db')
class VerificationRecord:
def __init__(self, name, email, password):
self.code = randomCode()
self.name = name
self.email = email
self.password = password
def randomCode(length=4):
return ''.join([chr(randrange(10) + 65) for n in range(length)])
class AdminLoginManager(LoginManager):
def __init__(self, app=None, add_context_processor=True):
super().__init__(app, add_context_processor)
self.app = app
self.verification_cache = TTLCache(maxsize=1000, ttl=30*60)
self.test_mode = os.environ.get("FLASK_ENV", "production") == "test"
app.config['SQLALCHEMY_DATABASE_URI'] = user_db.database_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
self.db = SQLAlchemy()
self.db.init_app(app)
self.User = self.user_model(self.db)
self.user_loader(self.load_user)
if not database_exists(user_db.database_uri):
app.app_context().push()
self.db.create_all()
self.db.session.commit()
# self.add_user("admin", "[email protected]", "passme99")
def flask_context(self, fn):
def _wrapper(*args, **kwargs):
ctx = None
try:
ctx = self.app.app_context()
ctx.push()
return fn(*args, **kwargs)
finally:
ctx.pop()
return _wrapper
def database_uri(self):
return self.app.config['SQLALCHEMY_DATABASE_URI']
def is_test(self):
return self.test_mode
def isAdmin(self):
try:
return 'admin' in current_user.role
except Exception:
pass
return False
def delete_user(self, email):
@self.flask_context
def _delete_user():
user = self.User.query.filter_by(email=email).first()
if not user:
raise Exception('Invalid user')
self.db.session.delete(user)
self.db.session.commit()
return _delete_user()
def add_user(self, name, email, password, role=[]):
@self.flask_context
def _add_user():
roles = ','.join(role) if isinstance(role, list) else role
new_user = self.User(email=email, name=name, password=generate_password_hash(password, method='sha256'), role=roles)
self.db.session.add(new_user)
self.db.session.commit()
return True
return _add_user()
def update_user(self, id, name, email, password, role=[]):
@self.flask_context
def _update_user():
user = self.User.query.filter_by(id=id).first()
if not user:
raise Exception('Invalid user')
user.role = ','.join(role) if isinstance(role, list) else role
user.password = generate_password_hash(password, method='sha256')
user.name = name
user.email = email
self.db.session.add(user)
self.db.session.commit()
return True
return _update_user()
def register(self, name, email, password, terms):
log.info('register [name: %s, email: %s, password: %s, terms: %s]', name, email, password, terms)
user = self.User.query.filter_by(email=email).first() # if this returns a user, then the email already exists in database
if user:
return False
verification_record = VerificationRecord(name, email, password)
self.verification_cache[email] = verification_record
mailer = TemplateMailer(VERIFICATION_TEMPLATE, {'name' : name, 'code': verification_record.code})
mailer.send(mail_options.sender, email, 'Password verification', self.is_test())
return True
def validate(self, email, code):
if not email in self.verification_cache:
return False
vrec = self.verification_cache[email]
if vrec.code == code:
new_user = self.User(email=vrec.email, name=vrec.name, password=generate_password_hash(vrec.password, method='sha256'))
self.db.session.add(new_user)
self.db.session.commit()
self.verification_cache[email] = None
return True
return False
def login(self, email, password, remember):
log.info('login [email: %s, password: %s, remember: %s]', email, password, remember)
user = self.User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
return False
login_user(user, remember=remember)
return True
def change_password(self, email, password):
user = self.User.query.filter_by(email=email).first()
if user:
user.password = generate_password_hash(password, method='sha256')
self.db.session.add(user)
self.db.session.commit()
return True
return False
def forgot(self, email):
log.info('forgot [email: %s]', email)
user = self.User.query.filter_by(email=email).first() # if this returns a user, then the email already exists in database
if user is None:
return False
self.code = randomCode(4)
self.email = email
log.info('code: %s', self.code)
mailer = TemplateMailer(FORGOT_TEMPLATE, {'code': self.code})
mailer.send(mail_options.sender, email, 'Password verification', self.is_test())
return True
def forgot_code_valid(self, code, email=None):
if email and email != self.email:
return False
return self.code == code.upper()
def get_email(self):
return self.email
def reload_user(self, user=None):
# log.info('reload_user user=%s', user)
super().reload_user(user)
return current_user
def load_user(self, user_id):
user = self.User.query.get(int(user_id))
# log.info('load_user: id=%s, email=%s', user_id, user.email)
return user
def logout_user(self):
log.info('logout_user')
logout_user()
def user_count(self):
@self.flask_context
def _user_count():
return self.User.query.count()
return _user_count()
def users(self):
@self.flask_context
def _users():
return self.User.query.all()
return _users()
def user_model(self, db):
class _User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
role = db.Column(db.String(100))
return _User
|
py | 7df73ce4cbced2d2056149e5d2ef5710066cea29 |
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteAbort(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def inferior_abort_received(self):
procs = self.prep_debug_monitor_and_inferior(inferior_args=["abort"])
self.assertIsNotNone(procs)
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
"capture": {1: "hex_exit_code"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
hex_exit_code = context.get("hex_exit_code")
self.assertIsNotNone(hex_exit_code)
self.assertEqual(int(hex_exit_code, 16),
lldbutil.get_signal_number('SIGABRT'))
@debugserver_test
def test_inferior_abort_received_debugserver(self):
self.build()
self.inferior_abort_received()
@skipIfWindows # No signal is sent on Windows.
@llgs_test
# std::abort() on <= API 16 raises SIGSEGV - b.android.com/179836
@expectedFailureAndroid(api_levels=list(range(16 + 1)))
def test_inferior_abort_received_llgs(self):
self.build()
self.inferior_abort_received()
|
py | 7df73d2f712093c962be2462847e40864d595046 | # -*- coding: utf-8 -*-
import json
from common.type_validation import TypeValidator
from project import (
ACCOUNT_PREFIX, DEFAULT_ACCOUNTS_COUNT, ECHO_INITIAL_BALANCE, INITIAL_ACCOUNTS_COUNT, INITIAL_ACCOUNTS_NAMES,
INITIAL_COMMITTEE_ETH_ADDRESSES, MAIN_TEST_ACCOUNT_COUNT, NATHAN_PK, ROPSTEN, WALLETS
)
BALANCE_TO_ACCOUNT = int(ECHO_INITIAL_BALANCE / (INITIAL_ACCOUNTS_COUNT + MAIN_TEST_ACCOUNT_COUNT))
def make_all_default_accounts_echo_holders(base_test, nathan_id, database_api):
list_operations = []
for i in range(1, DEFAULT_ACCOUNTS_COUNT):
to_account_id = get_account_id(get_account(base_test, ACCOUNT_PREFIX + str(i), database_api))
operation = base_test.echo_ops.get_transfer_operation(
base_test.echo, nathan_id, to_account_id, 1, signer=NATHAN_PK
)
collected_operation = base_test.collect_operations(operation, database_api)
list_operations.append(collected_operation)
broadcast_result = base_test.echo_ops.broadcast(
echo=base_test.echo, list_operations=list_operations, log_broadcast=False
)
return base_test.is_operation_completed(broadcast_result, expected_static_variant=0)
def add_balance_to_main_test_account(base_test, nathan_id, database_api):
to_account_id = get_account_id(get_account(base_test, base_test.accounts[0], database_api))
operation = base_test.echo_ops.get_transfer_operation(
base_test.echo, nathan_id, to_account_id, BALANCE_TO_ACCOUNT, signer=NATHAN_PK
)
collected_operation = base_test.collect_operations(operation, database_api)
broadcast_result = base_test.echo_ops.broadcast(
echo=base_test.echo, list_operations=collected_operation, log_broadcast=False
)
return base_test.is_operation_completed(broadcast_result, expected_static_variant=0)
def get_account_count(base_test, database_api):
response_id = base_test.send_request(base_test.get_request("get_account_count"), database_api)
return base_test.get_response(response_id)["result"]
def register_default_accounts(base_test, database_api):
main_account_count = get_account_count(base_test, database_api)
list_operations = []
for i in range(DEFAULT_ACCOUNTS_COUNT):
names = ACCOUNT_PREFIX + str(i)
public_key = base_test.store_new_account(names)
operation = base_test.echo_ops.get_account_create_operation(
base_test.echo, names, public_key, public_key, signer=NATHAN_PK
)
collected_operation = base_test.collect_operations(operation, database_api)
list_operations.append(collected_operation)
broadcast_result = base_test.echo_ops.broadcast(
echo=base_test.echo, list_operations=list_operations, log_broadcast=False
)
if not base_test.is_operation_completed(broadcast_result, expected_static_variant=1):
raise Exception("Default accounts are not created")
for i in range(DEFAULT_ACCOUNTS_COUNT):
account_id = broadcast_result.get("trx").get("operation_results")[i][1]
names = ACCOUNT_PREFIX + str(i)
with open(WALLETS, "r") as file:
data = json.load(file)
data[names].update({"id": account_id})
with open(WALLETS, "w") as new_file:
new_file.write(json.dumps(data))
after_creation_count = get_account_count(base_test, database_api)
return (after_creation_count - main_account_count) == DEFAULT_ACCOUNTS_COUNT
def distribute_balance_between_main_accounts(base_test, nathan_id, database_api):
list_operations = []
for i in range(INITIAL_ACCOUNTS_COUNT):
if INITIAL_ACCOUNTS_NAMES[i] != "nathan":
to_account_id = get_account_id(get_account(base_test, INITIAL_ACCOUNTS_NAMES[i], database_api))
operation = base_test.echo_ops.get_transfer_operation(
base_test.echo, nathan_id, to_account_id, BALANCE_TO_ACCOUNT, signer=NATHAN_PK
)
collected_operation = base_test.collect_operations(operation, database_api)
list_operations.append(collected_operation)
broadcast_result = base_test.echo_ops.broadcast(
echo=base_test.echo, list_operations=list_operations, log_broadcast=False
)
return base_test.is_operation_completed(broadcast_result, expected_static_variant=0)
def distribute_balance_between_committee_addresses(base_test):
eth_account_address = base_test.get_default_ethereum_account().address
default_account_balance = base_test.eth_trx.get_address_balance_in_eth_network(base_test.web3, eth_account_address)
balance_to_transfer = int('{:.0f}'.format(default_account_balance / 100 * 5))
for eth_address in INITIAL_COMMITTEE_ETH_ADDRESSES:
transaction = base_test.eth_trx.get_transfer_transaction(
web3=base_test.web3, _from=eth_account_address, _to=eth_address, value=balance_to_transfer
)
broadcast_result = base_test.eth_trx.broadcast(
web3=base_test.web3, transaction=transaction, log_transaction=True
)
if broadcast_result is None:
return False
return True
def get_public_key(account):
return account["active"]["key_auths"][0][0]
def get_account_id(account):
return account["id"]
def get_account(base_test, account_name, database_api):
type_validator = TypeValidator()
if type_validator.is_account_name(account_name):
response_id = base_test.send_request(base_test.get_request("get_account_by_name", [account_name]), database_api)
result = base_test.get_response(response_id)["result"]
elif type_validator.is_account_id(account_name):
response_id = base_test.send_request(base_test.get_request("get_accounts", [[account_name]]), database_api)
result = base_test.get_response(response_id)["result"][0]
return result
def import_balance_to_nathan(base_test, nathan_id, nathan_public_key, database_api):
operation = base_test.echo_ops.get_balance_claim_operation(
base_test.echo, nathan_id, nathan_public_key, ECHO_INITIAL_BALANCE, NATHAN_PK
)
collected_operation = base_test.collect_operations(operation, database_api)
broadcast_result = base_test.echo_ops.broadcast(
echo=base_test.echo, list_operations=collected_operation, log_broadcast=False
)
return base_test.is_operation_completed(broadcast_result, expected_static_variant=0)
def pre_deploy_echo(base_test, database_api, lcc):
nathan = get_account(base_test, "nathan", database_api)
nathan_id = get_account_id(nathan)
nathan_public_key = get_public_key(nathan)
if not ROPSTEN:
if not distribute_balance_between_committee_addresses(base_test):
raise Exception("Ethereum balance is not distributed")
lcc.log_info("Ethereum balance distributed between committee addresses successfully")
if not import_balance_to_nathan(base_test, nathan_id, nathan_public_key, database_api):
raise Exception("Broadcast failed")
lcc.log_info("Balance to nathan imported successfully")
if not distribute_balance_between_main_accounts(base_test, nathan_id, database_api):
raise Exception("Balance is not distributed")
lcc.log_info("Balance distributed between main accounts successfully")
if not register_default_accounts(base_test, database_api):
raise Exception("Default accounts are not created")
lcc.log_info("Default accounts created successfully. Accounts count: '{}'".format(DEFAULT_ACCOUNTS_COUNT))
if not add_balance_to_main_test_account(base_test, nathan_id, database_api):
raise Exception("Balance to main test account is not credited")
lcc.log_info("Balance added to main test account ({}) successfully".format(base_test.accounts[0]))
if not make_all_default_accounts_echo_holders(base_test, nathan_id, database_api):
raise Exception("Default accounts did not become asset echo holders")
lcc.log_info("All default accounts became echo holders successfully")
|
py | 7df73de536f80abe5a1990dfb096a8e89486bb0b | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from mock import Mock
from nose.tools import (
assert_equal,
assert_count_equal,
assert_raises,
)
from pyfakefs import fake_filesystem_unittest
from streamalert.shared.config import (
_validate_config,
load_config,
parse_lambda_arn,
ConfigError,
)
from tests.unit.helpers.config import basic_streamalert_config
def get_mock_lambda_context(func_name, milliseconds=100):
"""Helper function to create a fake context object using Mock"""
arn = 'arn:aws:lambda:us-east-1:123456789012:function:{}:development'
context = Mock(
invoked_function_arn=(arn.format(func_name)),
function_name=func_name,
function_version='production',
get_remaining_time_in_millis=Mock(return_value=milliseconds)
)
return context
class TestConfigLoading(fake_filesystem_unittest.TestCase):
"""Test config loading logic with a mocked filesystem."""
# pylint: disable=protected-access
def setUp(self):
self.setUpPyfakefs()
config_data = basic_streamalert_config()
mock_cluster_contents = '{"data_sources": {}, "classifier_config": {"foo": "bar"}}'
# Add config files which should be loaded
self.fs.create_file('conf/clusters/prod.json', contents=mock_cluster_contents)
self.fs.create_file('conf/clusters/dev.json', contents=mock_cluster_contents)
self.fs.create_file('conf/global.json', contents='{}')
self.fs.create_file('conf/lambda.json', contents='{}')
self.fs.create_file('conf/logs.json', contents='{}')
self.fs.create_file('conf/outputs.json', contents='{}')
self.fs.create_file(
'conf/threat_intel.json',
contents=json.dumps(config_data['threat_intel'])
)
self.fs.create_file(
'conf/normalized_types.json',
contents=json.dumps(config_data['normalized_types'])
)
self.fs.create_file(
'conf/schemas/csv.json',
contents='{"csv_log2": {"schema": {"data": "string","uid": "integer"},"parser": "csv"}}'
)
# Create similar structure but with schemas folder instead of logs.json and 2 clusters.
self.fs.create_file('conf_schemas/clusters/prod.json', contents=mock_cluster_contents)
self.fs.create_file('conf_schemas/clusters/dev.json', contents=mock_cluster_contents)
self.fs.create_file('conf_schemas/global.json', contents='{}')
self.fs.create_file('conf_schemas/lambda.json', contents='{}')
self.fs.create_file('conf_schemas/outputs.json', contents='{}')
self.fs.create_file(
'conf_schemas/schemas/csv.json',
contents='{"csv_log": {"schema": {"data": "string","uid": "integer"},"parser": "csv"}}'
)
self.fs.create_file(
'conf_schemas/schemas/json.json',
contents='{"json_log": {"schema": {"name": "string"},"parser": "json"}}'
)
self.fs.create_file(
'conf_schemas/schemas/json_log_with_dots.json',
contents='{"json:log.with.dots": {"schema": {"name": "string"},"parser": "json"}}'
)
def test_load_invalid_file(self):
"""Shared - Config Loading - Bad JSON"""
self.fs.create_file('conf/clusters/bad.json', contents='test string')
assert_raises(ConfigError, load_config)
@staticmethod
def test_load_invalid_path():
"""Shared - Config Loading - Bad JSON"""
assert_raises(ConfigError, load_config, include={'foobar.json'})
@staticmethod
def test_load_all():
"""Shared - Config Loading - All"""
config = load_config()
expected_keys = {
'clusters',
'global',
'lambda',
'logs',
'outputs',
'threat_intel',
'normalized_types'
}
assert_equal(set(config), expected_keys)
@staticmethod
def test_load_exclude():
"""Shared - Config Loading - Exclude"""
config = load_config(exclude={'global.json', 'logs.json'})
expected_keys = {
'clusters',
'lambda',
'outputs',
'threat_intel',
'normalized_types'
}
assert_equal(set(config), expected_keys)
@staticmethod
def test_load_exclude_clusters():
"""Shared - Config Loading - Exclude Clusters"""
config = load_config(exclude={'clusters'})
expected_keys = {
'global',
'lambda',
'logs',
'outputs',
'threat_intel',
'normalized_types'
}
assert_equal(set(config), expected_keys)
@staticmethod
def test_load_exclude_schemas():
"""Shared - Config Loading - Exclude Clusters"""
config = load_config(conf_dir='conf_schemas', exclude={'schemas'})
expected_keys = {
'clusters',
'global',
'lambda',
'outputs',
}
assert_equal(set(config), expected_keys)
@staticmethod
def test_load_include():
"""Shared - Config Loading - Include"""
config = load_config(include={'clusters', 'logs.json'})
expected_keys = ['clusters', 'logs']
expected_clusters_keys = ['prod', 'dev']
assert_count_equal(list(config.keys()), expected_keys)
assert_count_equal(list(config['clusters'].keys()), expected_clusters_keys)
@staticmethod
def test_load_schemas():
"""Shared - Config Loading - Schemas"""
# Load from separate dir where logs.json doesn't exist
config = load_config(conf_dir='conf_schemas')
basic_config = basic_streamalert_config()
assert_equal(config['logs'], basic_config['logs'])
@staticmethod
def test_load_schemas_logs():
"""Shared - Config Loading - Schemas and Logs.json Exist"""
# Check if data was loaded from conf/logs.json or the schemas dir if both exist
config = load_config(conf_dir='conf')
# Logs.json is preferred over schemas for backwards compatibility.
assert_equal(config['logs'], {})
class TestConfigValidation:
"""Test config validation"""
# pylint: disable=no-self-use
def test_config_no_schema(self):
"""Shared - Config Validator - No Schema in Log"""
# Load a valid config
config = basic_streamalert_config()
# Remove the 'schema' keys from the config
config['logs']['json_log'].pop('schema')
config['logs']['csv_log'].pop('schema')
assert_raises(ConfigError, _validate_config, config)
def test_config_no_parsers(self):
"""Shared - Config Validator - No Parser in Log"""
# Load a valid config
config = basic_streamalert_config()
# Remove the 'parser' keys from the config
config['logs']['json_log'].pop('parser')
config['logs']['csv_log'].pop('parser')
assert_raises(ConfigError, _validate_config, config)
def test_config_no_logs_key(self):
"""Shared - Config Validator - No Logs Key in Source"""
# Load a valid config
config = basic_streamalert_config()
# Remove everything from the sources entry
config['clusters']['prod']['data_sources']['kinesis']['stream_1'] = {}
assert_raises(ConfigError, _validate_config, config)
def test_config_empty_logs_list(self):
"""Shared - Config Validator - Empty Logs List in Source"""
# Load a valid config
config = basic_streamalert_config()
# Set the logs key to an empty list
config['clusters']['prod']['data_sources']['kinesis']['stream_1'] = []
assert_raises(ConfigError, _validate_config, config)
def test_config_invalid_datasources(self):
"""Shared - Config Validator - Invalid Datasources"""
# Load a valid config
config = basic_streamalert_config()
# Set the sources value to contain an invalid data source ('sqs')
config['clusters']['prod']['data_sources'] = {'sqs': {'queue_1': {}}}
assert_raises(ConfigError, _validate_config, config)
def test_parse_lambda_arn(self):
"""Shared - Config - Parse Lambda ARN"""
func_name = 'corp-prefix_prod_streamalert_classifer'
context = get_mock_lambda_context(func_name)
env = parse_lambda_arn(context.invoked_function_arn)
assert_equal(env['region'], 'us-east-1')
assert_equal(env['account_id'], '123456789012')
assert_equal(env['function_name'], func_name)
assert_equal(env['qualifier'], 'development')
def test_missing_streamalert_module(self):
"""Shared - Config Validator, Missing streamalert Module"""
config = basic_streamalert_config()
del config['clusters']['prod']['classifier_config']
assert_raises(ConfigError, _validate_config, config)
def test_config_invalid_ioc_types(self):
"""Shared - Config Validator - IOC Types, Invalid"""
# Load a valid config
config = basic_streamalert_config()
# Set the sources value to contain an invalid data source ('sqs')
config['threat_intel'] = {
'normalized_ioc_types': {'ip': ['foobar']}
}
config['normalized_types'] = {'log_type': {'sourceAddress': ['ip_address']}}
assert_raises(ConfigError, _validate_config, config)
def test_config_ioc_types_no_normalized_types(self):
"""Shared - Config Validator - IOC Types, Without Normalized Types"""
# Load a valid config
config = basic_streamalert_config()
# Set the sources value to contain an invalid data source ('sqs')
config['threat_intel'] = {
'normalized_ioc_types': {'ip': ['foobar']}
}
if 'normalized_types' in config:
del config['normalized_types']
assert_raises(ConfigError, _validate_config, config)
def test_config_duplicate_sources(self):
"""Shared - Config Validator - Duplicate Data Sources in Cluster Configs"""
config = basic_streamalert_config()
config['clusters']['dev'] = config['clusters']['prod']
assert_raises(ConfigError, _validate_config, config)
|
py | 7df73e9ad49a40cd7617257a21e8776c0650bd18 | WSGI_APPLICATION = 'management.wsgi.application'
|
py | 7df73ef8b25b80b55395ca7fa29c755509079c70 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
try:
from external.nms import soft_nms
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import ctdet_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def post_process_seg(self, seg, meta, scale=1):
seg = seg.detach().cpu().numpy()
# seg = ctdet_post_process_seg(seg.copy(), [meta['c']], [meta['s']], meta['out_height'], meta['out_width'], self.opt.num_classes)
return seg
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
debugger.show_all_imgs(pause=self.pause)
|
py | 7df73fadf2b561b0364d00eb7dc7047f10c5528b | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.consump import consump
def test_consump():
"""Test module consump.py by downloading
consump.csv and testing shape of
extracted data has 37 rows and 24 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = consump(test_path)
try:
assert x_train.shape == (37, 24)
except:
shutil.rmtree(test_path)
raise()
|
py | 7df73ff302211e97438d75cb9e121fe757bd2f24 | #Part 1: Terminology (15 points)
#1 1pt) What is the symbol "=" used for?
#The symbol "=" is an assignment opperator and it is use for defining the value of the function.
#
# +1 Assignment operator is clearly explained.
#2 3pts) Write a technical definition for 'function'
#A technical definition of 'function' is like a variable that the value has been put in already.
#
# -3 Calculation wasn't explained well.
#3 1pt) What does the keyword "return" do?
#The word "return" sets a variable to be a value from a function.
#
# +1 The definition of 'return' was clearly explained.
#4 5pts) We know 5 basic data types. Write the name for each one and provide two
# examples of each below
# 1:interger
# 2:float
# 3:string
# 4:boolean
# 5:len
# +2 Len isn't a data type, and all data type has no example.
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
#a "function definition" is a when we write "def" to define a function, or like to put a value in a variable, and "function call" is when you're telling the program to execute that function.
#
# +2 The definition is clearly explained.
#
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1:input
# 2:output
# 3:process
# +1.5 No explanation of the definition.
#Part 2: Programming (25 points)
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
#1 pt for header line
#3 pt for correct formula
#1 pt for return value
#1 pt for parameter name
#1 pt for function name
import math
def diameter(x):
return 2*(math.sqrt(x/math.pi))
def all_diameter(dia1,dia2,dia3):
return dia1+dia2+dia3
#1pt for header line
#1pt for parameter names
#1pt for return value
#1pt for correct output format
#3pt for correct use of format function
def output(dia1,dia2,dia3,total):
out="""
Circle Diameter
c1 {}
c2 {}
c3 {}
Totals {}
""".format(dia1,dia2,dia3,total)
return out
def main():
C1=raw_input("Area of cr1: ")
C2=raw_input("Area of cr2: ")
C3=raw_input("Area of cr3: ")
dia1=diameter(float(C1))
dia2=diameter(float(C2))
dia3=diameter(float(C3))
total=all_diameter(dia1,dia2,dia3)
out=output(dia1,dia2,dia3,total)
print out
main ()
#1pt header line
#0pt getting input
#0pt converting input
#0pt for calling output function
#2pt for correct diameter formula
#1pt for variable names
#1pt for calling main
#0pt explanatory comments
#1pt code format
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
# Hint: Radius is the square root of the area divided by pi
|
py | 7df7409cc64f5fe2b5a1969f00573f4b56570133 | """Methods to use for the communication with gitlab."""
from tornado.options import options
from gitlab import Gitlab
def get_commit_data(project_id, git_hash):
"""
Get commit information from the git hash.
Parameters
----------
project_id : int
Id of project in gitlab.
git_hash : str
Githash of commit to look up.
Returns
-------
commit object
requested commit
"""
if git_hash.endswith("-dirty"):
git_hash = git_hash.rstrip("-dirty")
client = Gitlab(options.gitlab_url, options.gitlab_private_token)
commit = client.projects.get(project_id).commits.get(git_hash)
return commit
def get_user_access_level(user_mail):
"""
From email, find if user is either in the authenticated group or in the authenticated project.
Parameters
----------
user_email : str
Email to search for.
Returns
-------
int
integer corresponding to gitlab access level (0: no, < 15: read, > 15: write, > 45: delete)
"""
client = Gitlab(options.gitlab_url, options.gitlab_private_token)
group_id = "integer"
group_users = client.groups.get(group_id).members.list(query=user_mail)
project_users = client.projects.get(
options.gitlab_project_ids["scip"]).members.list(query=user_mail)
# x.username -> user name in gitlab
# x.access_level -> user access level in gitlab
if ((len(group_users) > 1 or len(project_users) > 1) or (
len(group_users) == 0 and len(project_users) == 0)):
return 0
if len(group_users + project_users) == 2:
group_id = group_users[0].id
project_id = project_users[0].id
if not group_id == project_id:
return 0
access_level = group_users[0].access_level
project_access_level = project_users[0].access_level
access_level = max(access_level, project_access_level)
else:
if len(group_users) == 1:
access_level = group_users[0].access_level
else: # if len(project_users) = 1:
access_level = project_users[0].access_level
return access_level
def get_username(query_string):
"""
Get the gitlab/internal username from a search term (full name, email, etc).
Parameters
----------
query_string : str
String to search for.
Returns
-------
str
username
"""
client = Gitlab(options.gitlab_url, options.gitlab_private_token)
# here gitlab needs the "search" keyword, "query" will not work
authors = client.users.list(search=query_string)
if len(authors) < 1:
return query_string
elif len(authors) > 1:
return authors[0].username
return authors[0].username
|
py | 7df7429d2146617d719c1e8e33ada5779a33f620 | import pandas as pd
import boto3
import json
import configparser
import io
import os
import time
#get AWS parameter
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
REGION = config.get('AWS','REGION')
DWH_CLUSTER_IDENTIFIER = config.get("CLUSTER_CONFIG","DWH_CLUSTER_IDENTIFIER")
DWH_CLUSTER_TYPE = config.get("CLUSTER_CONFIG","DWH_CLUSTER_TYPE")
DWH_NUM_NODES = config.get("CLUSTER_CONFIG","DWH_NUM_NODES")
DWH_NODE_TYPE = config.get("CLUSTER_CONFIG","DWH_NODE_TYPE")
DWH_DB = config.get("CLUSTER","DB_NAME")
DWH_DB_USER = config.get("CLUSTER","DB_USER")
DWH_DB_PASSWORD = config.get("CLUSTER","DB_PASSWORD")
DWH_PORT = config.get("CLUSTER","DB_PORT")
DWH_IAM_ROLE_NAME = config.get("IAM_ROLE", "DWH_IAM_ROLE_NAME")
#create clients
ec2 = boto3.resource('ec2',region_name=REGION,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
s3 = boto3.resource('s3',region_name=REGION,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
iam = boto3.client('iam',region_name=REGION,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
redshift = boto3.client('redshift',region_name=REGION,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
#Create Iam ROLE &attach policy
try:
dwhRole =iam.create_role(Description = "Allows Redshift clusters to call AWS services on your behalf.",
RoleName=DWH_IAM_ROLE_NAME,
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
try:
iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")['ResponseMetadata']['HTTPStatusCode']
except Exception as e:
print(e)
try:
roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
except Exception as e:
print(e)
#Create Redshift cluster
try:
response = redshift.create_cluster(
# HW
ClusterType=DWH_CLUSTER_TYPE ,
NodeType=DWH_NODE_TYPE,
NumberOfNodes=int(DWH_NUM_NODES),
#identifiers & credentials
ClusterIdentifier=DWH_CLUSTER_IDENTIFIER ,
DBName=DWH_DB,
MasterUsername=DWH_DB_USER,
MasterUserPassword=DWH_DB_PASSWORD,
#parameter
IamRoles=[roleArn]
)
except Exception as e:
print(e)
#wait till cluster is available
status=redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]['ClusterStatus']
while status !="available" :
time.sleep(10)
status=redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]['ClusterStatus']
#get endpoint,ARN
DWH_ENDPOINT = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]['Endpoint']['Address']
DWH_ROLE_ARN = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]['IamRoles'][0]['IamRoleArn']
#open an incoming TCP port to access the cluster ednpoint
try:
vpc = ec2.Vpc(id=redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]['VpcId'])
defaultSg = list(vpc.security_groups.all())[1]
defaultSg.authorize_ingress(
GroupName= defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(DWH_PORT),
ToPort=int(DWH_PORT)
)
except Exception as e:
print(e)
#write missing elements
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
config['CLUSTER']['HOST']=DWH_ENDPOINT
config['IAM_ROLE']['ARN']=DWH_ROLE_ARN
config.write(open('dwh.cfg','w'))
|
py | 7df742cfc68a5216bb5f47fd3d2d15c74f7840b4 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import copy
import threading
import time
import numpy as np
import tensorlayer as tl
import scipy
import scipy.ndimage as ndi
from scipy import linalg
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
import skimage
from skimage import exposure
from skimage import transform
from skimage.morphology import disk
from skimage.morphology import erosion as _erosion
from skimage.morphology import binary_dilation as _binary_dilation
from skimage.morphology import binary_erosion as _binary_erosion
from six.moves import range
import PIL
# linalg https://docs.scipy.org/doc/scipy/reference/linalg.html
# ndimage https://docs.scipy.org/doc/scipy/reference/ndimage.html
__all__ = [
'threading_data',
'rotation',
'rotation_multi',
'crop',
'crop_multi',
'flip_axis',
'flip_axis_multi',
'shift',
'shift_multi',
'shear',
'shear_multi',
'shear2',
'shear_multi2',
'swirl',
'swirl_multi',
'elastic_transform',
'elastic_transform_multi',
'zoom',
'zoom_multi',
'brightness',
'brightness_multi',
'illumination',
'rgb_to_hsv',
'hsv_to_rgb',
'adjust_hue',
'imresize',
'pixel_value_scale',
'samplewise_norm',
'featurewise_norm',
'get_zca_whitening_principal_components_img',
'zca_whitening',
'channel_shift',
'channel_shift_multi',
'drop',
'transform_matrix_offset_center',
'apply_transform',
'projective_transform_by_points',
'array_to_img',
'find_contours',
'pt2map',
'binary_dilation',
'dilation',
'binary_erosion',
'erosion',
'obj_box_coords_rescale',
'obj_box_coord_rescale',
'obj_box_coord_scale_to_pixelunit',
'obj_box_coord_centroid_to_upleft_butright',
'obj_box_coord_upleft_butright_to_centroid',
'obj_box_coord_centroid_to_upleft',
'obj_box_coord_upleft_to_centroid',
'parse_darknet_ann_str_to_list',
'parse_darknet_ann_list_to_cls_box',
'obj_box_left_right_flip',
'obj_box_imresize',
'obj_box_crop',
'obj_box_shift',
'obj_box_zoom',
'pad_sequences',
'remove_pad_sequences',
'process_sequences',
'sequences_add_start_id',
'sequences_add_end_id',
'sequences_add_end_id_after_pad',
'sequences_get_mask',
]
def threading_data(data=None, fn=None, thread_count=None, **kwargs):
"""Process a batch of data by given function by threading.
Usually be used for data augmentation.
Parameters
-----------
data : numpy.array or others
The data to be processed.
thread_count : int
The number of threads to use.
fn : function
The function for data processing.
more args : the args for `fn`
Ssee Examples below.
Examples
--------
Process images.
>>> images, _, _, _ = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))
>>> images = tl.prepro.threading_data(images[0:32], tl.prepro.zoom, zoom_range=[0.5, 1])
Customized image preprocessing function.
>>> def distort_img(x):
>>> x = tl.prepro.flip_axis(x, axis=0, is_random=True)
>>> x = tl.prepro.flip_axis(x, axis=1, is_random=True)
>>> x = tl.prepro.crop(x, 100, 100, is_random=True)
>>> return x
>>> images = tl.prepro.threading_data(images, distort_img)
Process images and masks together (Usually be used for image segmentation).
>>> X, Y --> [batch_size, row, col, 1]
>>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], tl.prepro.zoom_multi, zoom_range=[0.5, 1], is_random=True)
data --> [batch_size, 2, row, col, 1]
>>> X_, Y_ = data.transpose((1,0,2,3,4))
X_, Y_ --> [batch_size, row, col, 1]
>>> tl.vis.save_image(X_, 'images.png')
>>> tl.vis.save_image(Y_, 'masks.png')
Process images and masks together by using ``thread_count``.
>>> X, Y --> [batch_size, row, col, 1]
>>> data = tl.prepro.threading_data(X, tl.prepro.zoom_multi, 8, zoom_range=[0.5, 1], is_random=True)
data --> [batch_size, 2, row, col, 1]
>>> X_, Y_ = data.transpose((1,0,2,3,4))
X_, Y_ --> [batch_size, row, col, 1]
>>> tl.vis.save_image(X_, 'after.png')
>>> tl.vis.save_image(Y_, 'before.png')
Customized function for processing images and masks together.
>>> def distort_img(data):
>>> x, y = data
>>> x, y = tl.prepro.flip_axis_multi([x, y], axis=0, is_random=True)
>>> x, y = tl.prepro.flip_axis_multi([x, y], axis=1, is_random=True)
>>> x, y = tl.prepro.crop_multi([x, y], 100, 100, is_random=True)
>>> return x, y
>>> X, Y --> [batch_size, row, col, channel]
>>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], distort_img)
>>> X_, Y_ = data.transpose((1,0,2,3,4))
Returns
-------
list or numpyarray
The processed results.
References
----------
- `python queue <https://pymotw.com/2/Queue/index.html#module-Queue>`__
- `run with limited queue <http://effbot.org/librarybook/queue.htm>`__
"""
def apply_fn(results, i, data, kwargs):
results[i] = fn(data, **kwargs)
if thread_count is None:
results = [None] * len(data)
threads = []
# for i in range(len(data)):
# t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs))
for i, d in enumerate(data):
t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, d, kwargs))
t.start()
threads.append(t)
else:
divs = np.linspace(0, len(data), thread_count + 1)
divs = np.round(divs).astype(int)
results = [None] * thread_count
threads = []
for i in range(thread_count):
t = threading.Thread(
name='threading_and_return', target=apply_fn, args=(results, i, data[divs[i]:divs[i + 1]], kwargs)
)
t.start()
threads.append(t)
for t in threads:
t.join()
if thread_count is None:
try:
return np.asarray(results)
except Exception:
return results
else:
return np.concatenate(results)
def rotation(
x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1
):
"""Rotate an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rg : int or float
Degree to rotate, usually 0 ~ 180.
is_random : boolean
If True, randomly rotate. Default is False
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x --> [row, col, 1]
>>> x = tl.prepro.rotation(x, rg=40, is_random=False)
>>> tl.vis.save_image(x, 'im.png')
"""
if is_random:
theta = np.pi / 180 * np.random.uniform(-rg, rg)
else:
theta = np.pi / 180 * rg
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def rotation_multi(
x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1
):
"""Rotate multiple images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.rotation``.
Returns
-------
numpy.array
A list of processed images.
Examples
--------
>>> x, y --> [row, col, 1] greyscale
>>> x, y = tl.prepro.rotation_multi([x, y], rg=90, is_random=False)
"""
if is_random:
theta = np.pi / 180 * np.random.uniform(-rg, rg)
else:
theta = np.pi / 180 * rg
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# crop
def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop an image.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : int
Size of width.
hrg : int
Size of height.
is_random : boolean,
If True, randomly crop, else central crop. Default is False.
row_index: int
index of row.
col_index: int
index of column.
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
# tl.logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape)
return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
return x[h_offset:h_end, w_offset:w_end]
# old implementation
# h_offset = (h - hrg)/2
# w_offset = (w - wrg)/2
# tl.logging.info(x[h_offset: h-h_offset ,w_offset: w-w_offset].shape)
# return x[h_offset: h-h_offset ,w_offset: w-w_offset]
# central crop
def crop_multi(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop multiple images.
Parameters
----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.crop``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
results = []
for data in x:
results.append(data[h_offset:hrg + h_offset, w_offset:wrg + w_offset])
return np.asarray(results)
else:
# central crop
h_offset = (h - hrg) / 2
w_offset = (w - wrg) / 2
results = []
for data in x:
results.append(data[h_offset:h - h_offset, w_offset:w - w_offset])
return np.asarray(results)
# flip
def flip_axis(x, axis=1, is_random=False):
"""Flip the axis of an image, such as flip left and right, up and down, randomly or non-randomly,
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
axis : int
Which axis to flip.
- 0, flip up and down
- 1, flip left and right
- 2, flip channel
is_random : boolean
If True, randomly flip. Default is False.
Returns
-------
numpy.array
A processed image.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
else:
return x
else:
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def flip_axis_multi(x, axis, is_random=False):
"""Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.flip_axis``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
else:
return np.asarray(x)
else:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
# shift
def shift(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : float
Percentage of shift in axis x, usually -0.25 ~ 0.25.
hrg : float
Percentage of shift in axis y, usually -0.25 ~ 0.25.
is_random : boolean
If True, randomly shift. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shift_multi(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# shear
def shear(
x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
intensity : float
Percentage of shear, usually -0.5 ~ 0.5 (is_random==True), 0 ~ 0.5 (is_random==False),
you can have a quick try by shear(X, 1).
is_random : boolean
If True, randomly shear. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
References
-----------
- `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__
"""
if is_random:
shear = np.random.uniform(-intensity, intensity)
else:
shear = intensity
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shear_multi(
x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shear``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
shear = np.random.uniform(-intensity, intensity)
else:
shear = intensity
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
def shear2(
x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
shear : tuple of two floats
Percentage of shear for height and width direction (0, 1).
is_random : boolean
If True, randomly shear. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
References
-----------
- `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__
"""
if len(shear) != 2:
raise AssertionError(
"shear should be tuple of 2 floats, or you want to use tl.prepro.shear rather than tl.prepro.shear2 ?"
)
if is_random:
shear[0] = np.random.uniform(-shear[0], shear[0])
shear[1] = np.random.uniform(-shear[1], shear[1])
shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shear_multi2(
x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shear2``.
Returns
-------
numpy.array
A list of processed images.
"""
if len(shear) != 2:
raise AssertionError(
"shear should be tuple of 2 floats, or you want to use tl.prepro.shear_multi rather than tl.prepro.shear_multi2 ?"
)
if is_random:
shear[0] = np.random.uniform(-shear[0], shear[0])
shear[1] = np.random.uniform(-shear[1], shear[1])
shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# swirl
def swirl(
x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0,
clip=True, preserve_range=False, is_random=False
):
"""Swirl an image randomly or non-randomly, see `scikit-image swirl API <http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.swirl>`__
and `example <http://scikit-image.org/docs/dev/auto_examples/plot_swirl.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
center : tuple or 2 int or None
Center coordinate of transformation (optional).
strength : float
The amount of swirling applied.
radius : float
The extent of the swirl in pixels. The effect dies out rapidly beyond radius.
rotation : float
Additional rotation applied to the image, usually [0, 360], relates to center.
output_shape : tuple of 2 int or None
Shape of the output image generated (height, width). By default the shape of the input image is preserved.
order : int, optional
The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See skimage.transform.warp for detail.
mode : str
One of `constant` (default), `edge`, `symmetric` `reflect` and `wrap`.
Points outside the boundaries of the input are filled according to the given mode, with `constant` used as the default. Modes match the behaviour of numpy.pad.
cval : float
Used in conjunction with mode `constant`, the value outside the image boundaries.
clip : boolean
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : boolean
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
is_random : boolean,
If True, random swirl. Default is False.
- random center = [(0 ~ x.shape[0]), (0 ~ x.shape[1])]
- random strength = [0, strength]
- random radius = [1e-10, radius]
- random rotation = [-rotation, rotation]
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x --> [row, col, 1] greyscale
>>> x = tl.prepro.swirl(x, strength=4, radius=100)
"""
if radius == 0:
raise AssertionError("Invalid radius value")
rotation = np.pi / 180 * rotation
if is_random:
center_h = int(np.random.uniform(0, x.shape[0]))
center_w = int(np.random.uniform(0, x.shape[1]))
center = (center_h, center_w)
strength = np.random.uniform(0, strength)
radius = np.random.uniform(1e-10, radius)
rotation = np.random.uniform(-rotation, rotation)
max_v = np.max(x)
if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required.
x = x / max_v
swirled = skimage.transform.swirl(
x, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape, order=order,
mode=mode, cval=cval, clip=clip, preserve_range=preserve_range
)
if max_v > 1:
swirled = swirled * max_v
return swirled
def swirl_multi(
x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0,
clip=True, preserve_range=False, is_random=False
):
"""Swirl multiple images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.swirl``.
Returns
-------
numpy.array
A list of processed images.
"""
if radius == 0:
raise AssertionError("Invalid radius value")
rotation = np.pi / 180 * rotation
if is_random:
center_h = int(np.random.uniform(0, x[0].shape[0]))
center_w = int(np.random.uniform(0, x[0].shape[1]))
center = (center_h, center_w)
strength = np.random.uniform(0, strength)
radius = np.random.uniform(1e-10, radius)
rotation = np.random.uniform(-rotation, rotation)
results = []
for data in x:
max_v = np.max(data)
if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required.
data = data / max_v
swirled = skimage.transform.swirl(
data, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape,
order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range
)
if max_v > 1:
swirled = swirled * max_v
results.append(swirled)
return np.asarray(results)
# elastic_transform
def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for image as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : numpy.array
A greyscale image.
alpha : float
Alpha value for elastic transformation.
sigma : float or sequence of float
The smaller the sigma, the more transformation. Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.
mode : str
See `scipy.ndimage.filters.gaussian_filter <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html>`__. Default is `constant`.
cval : float,
Used in conjunction with `mode` of `constant`, the value outside the image boundaries.
is_random : boolean
Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x = tl.prepro.elastic_transform(x, alpha=x.shape[1]*3, sigma=x.shape[1]*0.07)
References
------------
- `Github <https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`__.
- `Kaggle <https://www.kaggle.com/pscion/ultrasound-nerve-segmentation/elastic-transform-for-data-augmentation-0878921a>`__
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
#
is_3d = False
if len(x.shape) == 3 and x.shape[-1] == 1:
x = x[:, :, 0]
is_3d = True
elif len(x.shape) == 3 and x.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(x.shape) != 2:
raise AssertionError("input should be grey-scale image")
shape = x.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
if is_3d:
return map_coordinates(x, indices, order=1).reshape((shape[0], shape[1], 1))
else:
return map_coordinates(x, indices, order=1).reshape(shape)
def elastic_transform_multi(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for images as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : list of numpy.array
List of greyscale images.
others : args
See ``tl.prepro.elastic_transform``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
shape = x[0].shape
if len(shape) == 3:
shape = (shape[0], shape[1])
new_shape = random_state.rand(*shape)
results = []
for data in x:
is_3d = False
if len(data.shape) == 3 and data.shape[-1] == 1:
data = data[:, :, 0]
is_3d = True
elif len(data.shape) == 3 and data.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(data.shape) != 2:
raise AssertionError("input should be grey-scale image")
dx = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
# tl.logging.info(data.shape)
if is_3d:
results.append(map_coordinates(data, indices, order=1).reshape((shape[0], shape[1], 1)))
else:
results.append(map_coordinates(data, indices, order=1).reshape(shape))
return np.asarray(results)
# zoom
def zoom(
x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1
):
"""Zoom in and out of a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
zoom_range : list or tuple
Zoom range for height and width.
- If is_random=False, (h, w) are the fixed zoom factor for row and column axies, factor small than one is zoom in.
- If is_random=True, (h, w) are (min zoom out, max zoom out) for x and y with different random zoom in/out factor, e.g (0.5, 1) zoom in 1~2 times.
is_random : boolean
If True, randomly zoom. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
"""
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def zoom_multi(
x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1
):
"""Zoom in and out of images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.zoom``.
Returns
-------
numpy.array
A list of processed images.
"""
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
# x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
# return x
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
# image = tf.image.random_hue(image, max_delta=0.032)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
def brightness(x, gamma=1, gain=1, is_random=False):
"""Change the brightness of a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Non negative real number. Default value is 1.
- Small than 1 means brighter.
- If `is_random` is True, gamma in a range of (1-gamma, 1+gamma).
gain : float
The constant multiplier. Default value is 1.
is_random : boolean
If True, randomly change brightness. Default is False.
Returns
-------
numpy.array
A processed image.
References
-----------
- `skimage.exposure.adjust_gamma <http://scikit-image.org/docs/dev/api/skimage.exposure.html>`__
- `chinese blog <http://www.cnblogs.com/denny402/p/5124402.html>`__
"""
if is_random:
gamma = np.random.uniform(1 - gamma, 1 + gamma)
x = exposure.adjust_gamma(x, gamma, gain)
return x
def brightness_multi(x, gamma=1, gain=1, is_random=False):
"""Change the brightness of multiply images, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpyarray
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.brightness``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
gamma = np.random.uniform(1 - gamma, 1 + gamma)
results = []
for data in x:
results.append(exposure.adjust_gamma(data, gamma, gain))
return np.asarray(results)
def illumination(x, gamma=1., contrast=1., saturation=1., is_random=False):
"""Perform illumination augmentation for a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Change brightness (the same with ``tl.prepro.brightness``)
- if is_random=False, one float number, small than one means brighter, greater than one means darker.
- if is_random=True, tuple of two float numbers, (min, max).
contrast : float
Change contrast.
- if is_random=False, one float number, small than one means blur.
- if is_random=True, tuple of two float numbers, (min, max).
saturation : float
Change saturation.
- if is_random=False, one float number, small than one means unsaturation.
- if is_random=True, tuple of two float numbers, (min, max).
is_random : boolean
If True, randomly change illumination. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random
>>> x = tl.prepro.illumination(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True)
Non-random
>>> x = tl.prepro.illumination(x, 0.5, 0.6, 0.8, is_random=False)
"""
if is_random:
if not (len(gamma) == len(contrast) == len(saturation) == 2):
raise AssertionError("if is_random = True, the arguments are (min, max)")
## random change brightness # small --> brighter
illum_settings = np.random.randint(0, 3) # 0-brighter, 1-darker, 2 keep normal
if illum_settings == 0: # brighter
gamma = np.random.uniform(gamma[0], 1.0) # (.5, 1.0)
elif illum_settings == 1: # darker
gamma = np.random.uniform(1.0, gamma[1]) # (1.0, 5.0)
else:
gamma = 1
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
# tl.logging.info("using contrast and saturation")
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(np.random.uniform(contrast[0], contrast[1])) #0.3,0.9))
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(np.random.uniform(saturation[0], saturation[1])) # (0.7,1.0))
im_ = np.array(image) # PIL -> array
else:
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(contrast)
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(saturation)
im_ = np.array(image) # PIL -> array
return np.asarray(im_)
def rgb_to_hsv(rgb):
"""Input RGB image [0~255] return HSV image [0~1].
Parameters
------------
rgb : numpy.array
An image with values between 0 and 255.
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
def hsv_to_rgb(hsv):
"""Input HSV image [0~1] return RGB image [0~255].
Parameters
-------------
hsv : numpy.array
An image with values between 0.0 and 1.0
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.hsv_to_rgb
# h,s should be a numpy arrays with values between 0.0 and 1.0
# v should be a numpy array with values between 0.0 and 255.0
# hsv_to_rgb returns an array of uints between 0 and 255.
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False):
"""Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.
For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
Parameters
-----------
im : numpy.array
An image with values between 0 and 255.
hout : float
The scale value for adjusting hue.
- If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.
- If is_offset is True, add this value as the offset to the hue channel.
is_offset : boolean
Whether `hout` is added on HSV as offset or not. Default is True.
is_clip : boolean
If HSV value smaller than 0, set to 0. Default is True.
is_random : boolean
If True, randomly change hue. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random, add a random value between -0.2 and 0.2 as the offset to every hue values.
>>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)
Non-random, make all hue to green.
>>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)
References
-----------
- `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
- `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.
- `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
"""
hsv = rgb_to_hsv(im)
if is_random:
hout = np.random.uniform(-hout, hout)
if is_offset:
hsv[..., 0] += hout
else:
hsv[..., 0] = hout
if is_clip:
hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) # Hao : can remove green dots
rgb = hsv_to_rgb(hsv)
return rgb
# # contrast
# def constant(x, cutoff=0.5, gain=10, inv=False, is_random=False):
# # TODO
# x = exposure.adjust_sigmoid(x, cutoff=cutoff, gain=gain, inv=inv)
# return x
#
# def constant_multi():
# #TODO
# pass
def imresize(x, size=None, interp='bicubic', mode=None):
"""Resize an image by given output size and method.
Warning, this function will rescale the value to [0, 255].
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
size : list of 2 int or None
For height and width.
interp : str
Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).
mode : str
The PIL image mode (`P`, `L`, etc.) to convert arr before resizing.
Returns
-------
numpy.array
A processed image.
References
------------
- `scipy.misc.imresize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html>`__
"""
if size is None:
size = [100, 100]
if x.shape[-1] == 1:
# greyscale
x = scipy.misc.imresize(x[:, :, 0], size, interp=interp, mode=mode)
return x[:, :, np.newaxis]
elif x.shape[-1] == 3:
# rgb, bgr ..
return scipy.misc.imresize(x, size, interp=interp, mode=mode)
else:
raise Exception("Unsupported channel %d" % x.shape[-1])
# value scale
def pixel_value_scale(im, val=0.9, clip=None, is_random=False):
"""Scales each value in the pixels of the image.
Parameters
-----------
im : numpy.array
An image.
val : float
The scale value for changing pixel value.
- If is_random=False, multiply this value with all pixels.
- If is_random=True, multiply a value between [1-val, 1+val] with all pixels.
clip : tuple of 2 numbers
The minimum and maximum value.
is_random : boolean
If True, see ``val``.
Returns
-------
numpy.array
A processed image.
Examples
----------
Random
>>> im = pixel_value_scale(im, 0.1, [0, 255], is_random=True)
Non-random
>>> im = pixel_value_scale(im, 0.9, [0, 255], is_random=False)
"""
clip = clip if clip is not None else (-np.inf, np.inf)
if is_random:
scale = 1 + np.random.uniform(-val, val)
im = im * scale
else:
im = im * val
if len(clip) == 2:
im = np.clip(im, clip[0], clip[1])
else:
raise Exception("clip : tuple of 2 numbers")
return im
# normailization
def samplewise_norm(
x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=2, epsilon=1e-7
):
"""Normalize an image by rescale, samplewise centering and samplewise centering in order.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rescale : float
Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)
samplewise_center : boolean
If True, set each sample mean to 0.
samplewise_std_normalization : boolean
If True, divide each input by its std.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)
>>> print(x.shape, np.mean(x), np.std(x))
(160, 176, 1), 0.0, 1.0
Notes
------
When samplewise_center and samplewise_std_normalization are True.
- For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.
- For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1.
"""
if rescale:
x *= rescale
if x.shape[channel_index] == 1:
# greyscale
if samplewise_center:
x = x - np.mean(x)
if samplewise_std_normalization:
x = x / np.std(x)
return x
elif x.shape[channel_index] == 3:
# rgb
if samplewise_center:
x = x - np.mean(x, axis=channel_index, keepdims=True)
if samplewise_std_normalization:
x = x / (np.std(x, axis=channel_index, keepdims=True) + epsilon)
return x
else:
raise Exception("Unsupported channels %d" % x.shape[channel_index])
def featurewise_norm(x, mean=None, std=None, epsilon=1e-7):
"""Normalize every pixels by the same given mean and std, which are usually
compute from all examples.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
mean : float
Value for subtraction.
std : float
Value for division.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
"""
if mean:
x = x - mean
if std:
x = x / (std + epsilon)
return x
# whitening
def get_zca_whitening_principal_components_img(X):
"""Return the ZCA whitening principal components matrix.
Parameters
-----------
x : numpy.array
Batch of images with dimension of [n_example, row, col, channel] (default).
Returns
-------
numpy.array
A processed image.
"""
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
tl.logging.info("zca : computing sigma ..")
sigma = np.dot(flatX.T, flatX) / flatX.shape[0]
tl.logging.info("zca : computing U, S and V ..")
U, S, _ = linalg.svd(sigma) # USV
tl.logging.info("zca : computing principal components ..")
principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
return principal_components
def zca_whitening(x, principal_components):
"""Apply ZCA whitening on an image by given principal components matrix.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
principal_components : matrix
Matrix from ``get_zca_whitening_principal_components_img``.
Returns
-------
numpy.array
A processed image.
"""
flatx = np.reshape(x, (x.size))
# tl.logging.info(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1))
# flatx = np.reshape(x, (x.shape))
# flatx = np.reshape(x, (x.shape[0], ))
# tl.logging.info(flatx.shape) # (160, 176, 1)
whitex = np.dot(flatx, principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
# developing
# def barrel_transform(x, intensity):
# # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py
# # TODO
# pass
#
# def barrel_transform_multi(x, intensity):
# # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py
# # TODO
# pass
# channel shift
def channel_shift(x, intensity, is_random=False, channel_index=2):
"""Shift the channels of an image, randomly or non-randomly, see `numpy.rollaxis <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rollaxis.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
intensity : float
Intensity of shifting.
is_random : boolean
If True, randomly shift. Default is False.
channel_index : int
Index of channel. Default is 2.
Returns
-------
numpy.array
A processed image.
"""
if is_random:
factor = np.random.uniform(-intensity, intensity)
else:
factor = intensity
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
# x = np.rollaxis(x, channel_index, 0)
# min_x, max_x = np.min(x), np.max(x)
# channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
# for x_channel in x]
# x = np.stack(channel_images, axis=0)
# x = np.rollaxis(x, 0, channel_index+1)
# return x
def channel_shift_multi(x, intensity, is_random=False, channel_index=2):
"""Shift the channels of images with the same arguments, randomly or non-randomly, see `numpy.rollaxis <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rollaxis.html>`__.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.channel_shift``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-intensity, intensity)
else:
factor = intensity
results = []
for data in x:
data = np.rollaxis(data, channel_index, 0)
min_x, max_x = np.min(data), np.max(data)
channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]
data = np.stack(channel_images, axis=0)
data = np.rollaxis(x, 0, channel_index + 1)
results.append(data)
return np.asarray(results)
# noise
def drop(x, keep=0.5):
"""Randomly set some pixels to zero by a given keeping probability.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] or [row, col].
keep : float
The keeping probability (0, 1), the lower more values will be set to zero.
Returns
-------
numpy.array
A processed image.
"""
if len(x.shape) == 3:
if x.shape[-1] == 3: # color
img_size = x.shape
mask = np.random.binomial(n=1, p=keep, size=x.shape[:-1])
for i in range(3):
x[:, :, i] = np.multiply(x[:, :, i], mask)
elif x.shape[-1] == 1: # greyscale image
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception("Unsupported shape {}".format(x.shape))
elif len(x.shape) == 2 or 1: # greyscale matrix (image) or vector
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception("Unsupported shape {}".format(x.shape))
return x
# x = np.asarray([[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]])
# x = np.asarray([x,x,x,x,x,x])
# x.shape = 10, 4, 3
# tl.logging.info(x)
# # exit()
# tl.logging.info(x.shape)
# # exit()
# tl.logging.info(drop(x, keep=1.))
# exit()
# manual transform
def transform_matrix_offset_center(matrix, x, y):
"""Return transform matrix offset center.
Parameters
----------
matrix : numpy.array
Transform matrix.
x and y : 2 int
Size of image.
Returns
-------
numpy.array
The transform matrix.
Examples
--------
- See ``tl.prepro.rotation``, ``tl.prepro.shear``, ``tl.prepro.zoom``.
"""
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=2, fill_mode='nearest', cval=0., order=1):
"""Return transformed images by given ``transform_matrix`` from ``transform_matrix_offset_center``.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
transform_matrix : numpy.array
Transform matrix (offset center), can be generated by ``transform_matrix_offset_center``
channel_index : int
Index of channel, default 2.
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0
order : int
The order of interpolation. The order has to be in the range 0-5:
- 0 Nearest-neighbor
- 1 Bi-linear (default)
- 2 Bi-quadratic
- 3 Bi-cubic
- 4 Bi-quartic
- 5 Bi-quintic
- `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
Examples
--------
- See ``tl.prepro.rotation``, ``tl.prepro.shift``, ``tl.prepro.shear``, ``tl.prepro.zoom``.
"""
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [
ndi.interpolation.affine_transform(
x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval
) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
def projective_transform_by_points(
x, src, dst, map_args=None, output_shape=None, order=1, mode='constant', cval=0.0, clip=True,
preserve_range=False
):
"""Projective transform by given coordinates, usually 4 coordinates.
see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
src : list or numpy
The original coordinates, usually 4 coordinates of (width, height).
dst : list or numpy
The coordinates after transformation, the number of coordinates is the same with src.
map_args : dictionary or None
Keyword arguments passed to inverse map.
output_shape : tuple of 2 int
Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified.
order : int
The order of interpolation. The order has to be in the range 0-5:
- 0 Nearest-neighbor
- 1 Bi-linear (default)
- 2 Bi-quadratic
- 3 Bi-cubic
- 4 Bi-quartic
- 5 Bi-quintic
mode : str
One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`.
Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad.
cval : float
Used in conjunction with mode `constant`, the value outside the image boundaries.
clip : boolean
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : boolean
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
Returns
-------
numpy.array
A processed image.
Examples
--------
Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3)
>>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h]
>>> dst = [[10,10],[0,32],[32,0],[32,32]]
>>> x = tl.prepro.projective_transform_by_points(X, src, dst)
References
-----------
- `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__
- `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__
"""
if map_args is None:
map_args = {}
# if type(src) is list:
if isinstance(src, list): # convert to numpy
src = np.array(src)
# if type(dst) is list:
if isinstance(dst, list):
dst = np.array(dst)
if np.max(x) > 1: # convert to [0, 1]
x = x / 255
m = transform.ProjectiveTransform()
m.estimate(dst, src)
warped = transform.warp(
x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range
)
return warped
# Numpy and PIL
def array_to_img(x, dim_ordering=(0, 1, 2), scale=True):
"""Converts a numpy array to PIL image object (uint8 format).
Parameters
----------
x : numpy.array
An image with dimension of 3 and channels of 1 or 3.
dim_ordering : tuple of 3 int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
scale : boolean
If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True.
Returns
-------
PIL.image
An image.
References
-----------
`PIL Image.fromarray <http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=fromarray>`__
"""
# if dim_ordering == 'default':
# dim_ordering = K.image_dim_ordering()
# if dim_ordering == 'th': # theano
# x = x.transpose(1, 2, 0)
x = x.transpose(dim_ordering)
if scale:
x += max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
# tl.logging.info(x_max)
# x /= x_max
x = x / x_max
x *= 255
if x.shape[2] == 3:
# RGB
return PIL.Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return PIL.Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[2])
def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value, returns list of (n, 2)-ndarrays
see `skimage.measure.find_contours <http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.find_contours>`__.
Parameters
------------
x : 2D ndarray of double.
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str
Either `low` or `high`. Indicates whether array elements below the given level value are to be considered fully-connected (and hence elements above the value will only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : str
Either `low` or `high`. Indicates whether the output contours will produce positively-oriented polygons around islands of low- or high-valued elements. If `low` then contours will wind counter-clockwise around elements below the iso-value. Alternately, this means that low-valued elements are always on the left of the contour.
Returns
--------
list of (n,2)-ndarrays
Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.
"""
return skimage.measure.find_contours(
x, level, fully_connected=fully_connected, positive_orientation=positive_orientation
)
def pt2map(list_points=None, size=(100, 100), val=1):
"""Inputs a list of points, return a 2D image.
Parameters
--------------
list_points : list of 2 int
[[x, y], [x, y]..] for point coordinates.
size : tuple of 2 int
(w, h) for output size.
val : float or int
For the contour value.
Returns
-------
numpy.array
An image.
"""
if list_points is None:
raise Exception("list_points : list of 2 int")
i_m = np.zeros(size)
if len(list_points) == 0:
return i_m
for xx in list_points:
for x in xx:
# tl.logging.info(x)
i_m[int(np.round(x[0]))][int(np.round(x[1]))] = val
return i_m
def binary_dilation(x, radius=3):
"""Return fast binary morphological dilation of an image.
see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image.
"""
mask = disk(radius)
x = _binary_dilation(x, selem=mask)
return x
def dilation(x, radius=3):
"""Return greyscale morphological dilation of an image,
see `skimage.morphology.dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.dilation>`__.
Parameters
-----------
x : 2D array
An greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = dilation(x, selem=mask)
return x
def binary_erosion(x, radius=3):
"""Return binary morphological erosion of an image,
see `skimage.morphology.binary_erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_erosion>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image.
"""
mask = disk(radius)
x = _binary_erosion(x, selem=mask)
return x
def erosion(x, radius=3):
"""Return greyscale morphological erosion of an image,
see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`__.
Parameters
-----------
x : 2D array
A greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = _erosion(x, selem=mask)
return x
def obj_box_coords_rescale(coords=None, shape=None):
"""Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates.
"""
if coords is None:
coords = []
if shape is None:
shape = [100, 200]
imh, imw = shape[0], shape[1]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x = coord[0] / imw
y = coord[1] / imh
w = coord[2] / imw
h = coord[3] / imh
coords_new.append([x, y, w, h])
return coords_new
def obj_box_coord_rescale(coord=None, shape=None):
"""Scale down one coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
It is the reverse process of ``obj_box_coord_scale_to_pixelunit``.
Parameters
------------
coords : list of 4 int or None
One coordinates of one image e.g. [x, y, w, h].
shape : list of 2 int or None
For [height, width].
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> coord = tl.prepro.obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100])
[0.3, 0.4, 0.5, 0.5]
"""
if coord is None:
coord = []
if shape is None:
shape = [100, 200]
return obj_box_coords_rescale(coords=[coord], shape=shape)[0]
def obj_box_coord_scale_to_pixelunit(coord, shape=None):
"""Convert one coordinate [x, y, w (or x2), h (or y2)] in ratio format to image coordinate format.
It is the reverse process of ``obj_box_coord_rescale``.
Parameters
-----------
coord : list of 4 float
One coordinate of one image [x, y, w (or x2), h (or y2)] in ratio format, i.e value range [0~1].
shape : tuple of 2 or None
For [height, width].
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([0.2, 0.3, 0.5, 0.7], shape=(100, 200, 3))
[40, 30, 100, 70]
"""
if shape is None:
shape = [100, 100]
imh, imw = shape[0:2]
x = int(coord[0] * imw)
x2 = int(coord[2] * imw)
y = int(coord[1] * imh)
y2 = int(coord[3] * imh)
return [x, y, x2, y2]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
# tl.logging.info(coords)
# # [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
# tl.logging.info(coords)
# # [[0.3, 0.8, 0.5, 1.0]]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
# tl.logging.info(coords)
# # [[0.15, 0.4, 0.25, 0.5]]
# exit()
def obj_box_coord_centroid_to_upleft_butright(coord, to_int=False):
"""Convert one coordinate [x_center, y_center, w, h] to [x1, y1, x2, y2] in up-left and botton-right format.
Parameters
------------
coord : list of 4 int/float
One coordinate.
to_int : boolean
Whether to convert output as integer.
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20])
[20, 30, 40, 50]
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x_center, y_center, w, h = coord
x = x_center - w / 2.
y = y_center - h / 2.
x2 = x + w
y2 = y + h
if to_int:
return [int(x), int(y), int(x2), int(y2)]
else:
return [x, y, x2, y2]
# coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20])
# tl.logging.info(coord) [20, 30, 40, 50]
# exit()
def obj_box_coord_upleft_butright_to_centroid(coord):
"""Convert one coordinate [x1, y1, x2, y2] to [x_center, y_center, w, h].
It is the reverse process of ``obj_box_coord_centroid_to_upleft_butright``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x1, y1, x2, y2]")
x1, y1, x2, y2 = coord
w = x2 - x1
h = y2 - y1
x_c = x1 + w / 2.
y_c = y1 + h / 2.
return [x_c, y_c, w, h]
def obj_box_coord_centroid_to_upleft(coord):
"""Convert one coordinate [x_center, y_center, w, h] to [x, y, w, h].
It is the reverse process of ``obj_box_coord_upleft_to_centroid``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x_center, y_center, w, h = coord
x = x_center - w / 2.
y = y_center - h / 2.
return [x, y, w, h]
def obj_box_coord_upleft_to_centroid(coord):
"""Convert one coordinate [x, y, w, h] to [x_center, y_center, w, h].
It is the reverse process of ``obj_box_coord_centroid_to_upleft``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x, y, w, h = coord
x_center = x + w / 2.
y_center = y + h / 2.
return [x_center, y_center, w, h]
def parse_darknet_ann_str_to_list(annotations):
r"""Input string format of class, x, y, w, h, return list of list format.
Parameters
-----------
annotations : str
The annotations in darkent format "class, x, y, w, h ...." seperated by "\\n".
Returns
-------
list of list of 4 numbers
List of bounding box.
"""
annotations = annotations.split("\n")
ann = []
for a in annotations:
a = a.split()
if len(a) == 5:
for i, _v in enumerate(a):
if i == 0:
a[i] = int(a[i])
else:
a[i] = float(a[i])
ann.append(a)
return ann
def parse_darknet_ann_list_to_cls_box(annotations):
"""Parse darknet annotation format into two lists for class and bounding box.
Input list of [[class, x, y, w, h], ...], return two list of [class ...] and [[x, y, w, h], ...].
Parameters
------------
annotations : list of list
A list of class and bounding boxes of images e.g. [[class, x, y, w, h], ...]
Returns
-------
list of int
List of class labels.
list of list of 4 numbers
List of bounding box.
"""
class_list = []
bbox_list = []
for ann in annotations:
class_list.append(ann[0])
bbox_list.append(ann[1:])
return class_list, bbox_list
def obj_box_horizontal_flip(im, coords=None, is_rescale=False, is_center=False, is_random=False):
"""Left-right flip the image and coordinates for object detection.
Parameters
----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
is_random : boolean
If True, randomly flip. Default is False.
Returns
-------
numpy.array
A processed image
list of list of 4 numbers
A list of new bounding boxes.
Examples
--------
>>> im = np.zeros([80, 100]) # as an image with shape width=100, height=80
>>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)
>>> print(coords)
[[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)
>>> print(coords)
[[0.5, 0.4, 0.3, 0.3]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)
>>> print(coords)
[[80, 40, 30, 30]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)
>>> print(coords)
[[50, 40, 30, 30]]
"""
if coords is None:
coords = []
def _flip(im, coords):
im = flip_axis(im, axis=1, is_random=False)
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
if is_center:
# x_center' = 1 - x
x = 1. - coord[0]
else:
# x_center' = 1 - x - w
x = 1. - coord[0] - coord[2]
else:
if is_center:
# x' = im.width - x
x = im.shape[1] - coord[0]
else:
# x' = im.width - x - w
x = im.shape[1] - coord[0] - coord[2]
coords_new.append([x, coord[1], coord[2], coord[3]])
return im, coords_new
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
return _flip(im, coords)
else:
return im, coords
else:
return _flip(im, coords)
obj_box_left_right_flip = obj_box_horizontal_flip
# im = np.zeros([80, 100]) # as an image with shape width=100, height=80
# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)
# tl.logging.info(coords)
# # [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]
# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)
# tl.logging.info(coords)
# # [[0.5, 0.4, 0.3, 0.3]]
# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)
# tl.logging.info(coords)
# # [[80, 40, 30, 30]]
# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)
# tl.logging.info(coords)
# # [[50, 40, 30, 30]]
# exit()
def obj_box_imresize(im, coords=None, size=None, interp='bicubic', mode=None, is_rescale=False):
"""Resize an image, and compute the new bounding box coordinates.
Parameters
-------------
im : numpy.array
An image with dimension of [row, col, channel] (default).
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
size interp and mode : args
See ``tl.prepro.imresize``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1], then return the original coordinates. Default is False.
Returns
-------
numpy.array
A processed image
list of list of 4 numbers
A list of new bounding boxes.
Examples
--------
>>> im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)
>>> print(coords)
[[40, 80, 60, 60], [20, 40, 40, 40]]
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)
>>> print(coords)
[[20, 20, 30, 15]]
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)
>>> print(coords)
[[30, 30, 45, 22]]
>>> im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)
>>> print(coords, im2.shape)
[[0.2, 0.4, 0.3, 0.3]] (160, 200, 3)
"""
if coords is None:
coords = []
if size is None:
size = [100, 100]
imh, imw = im.shape[0:2]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
im = imresize(im, size=size, interp=interp, mode=mode)
if is_rescale is False:
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
# x' = x * (imw'/imw)
x = int(coord[0] * (size[1] / imw))
# y' = y * (imh'/imh)
# tl.logging.info('>>', coord[1], size[0], imh)
y = int(coord[1] * (size[0] / imh))
# w' = w * (imw'/imw)
w = int(coord[2] * (size[1] / imw))
# h' = h * (imh'/imh)
h = int(coord[3] * (size[0] / imh))
coords_new.append([x, y, w, h])
return im, coords_new
else:
return im, coords
# im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)
# tl.logging.info(coords)
# # [[40, 80, 60, 60], [20, 40, 40, 40]]
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)
# tl.logging.info(coords)
# # [20, 20, 30, 15]
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)
# tl.logging.info(coords)
# # [30, 30, 45, 22]
# im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)
# tl.logging.info(coords, im2.shape)
# # [0.2, 0.4, 0.3, 0.3] (160, 200, 3)
# exit()
def obj_box_crop(
im, classes=None, coords=None, wrg=100, hrg=100, is_rescale=False, is_center=False, is_random=False,
thresh_wh=0.02, thresh_wh2=12.
):
"""Randomly or centrally crop an image, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
wrg hrg and is_random : args
See ``tl.prepro.crop``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean, default False
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
h, w = im.shape[0], im.shape[1]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
h_end = hrg + h_offset
w_end = wrg + w_offset
im_new = im[h_offset:h_end, w_offset:w_end]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
im_new = im[h_offset:h_end, w_offset:w_end]
# w
# _____________________________
# | h/w offset |
# | ------- |
# h | | | |
# | | | |
# | ------- |
# | h/w end |
# |___________________________|
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
##======= pixel unit format and upleft, w, h ==========##
# x = np.clip( coord[0] - w_offset, 0, w_end - w_offset)
# y = np.clip( coord[1] - h_offset, 0, h_end - h_offset)
# w = np.clip( coord[2] , 0, w_end - w_offset)
# h = np.clip( coord[3] , 0, h_end - h_offset)
x = coord[0] - w_offset
y = coord[1] - h_offset
w = coord[2]
h = coord[3]
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
## convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def obj_box_shift(
im, classes=None, coords=None, wrg=0.1, hrg=0.1, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Shift an image randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
wrg, hrg row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.shift``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
imh, imw = im.shape[row_index], im.shape[col_index]
if (hrg >= 1.0) and (hrg <= 0.) and (wrg >= 1.0) and (wrg <= 0.):
raise AssertionError("shift range should be (0, 1)")
if is_random:
tx = np.random.uniform(-hrg, hrg) * imh
ty = np.random.uniform(-wrg, wrg) * imw
else:
tx, ty = hrg * imh, wrg * imw
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
##======= pixel unit format and upleft, w, h ==========##
x = coord[0] - ty # only change this
y = coord[1] - tx # only change this
w = coord[2]
h = coord[3]
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
## convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def obj_box_zoom(
im, classes=None, coords=None, zoom_range=(0.9,
1.1), row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = im.shape[row_index], im.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
# ======= pixel unit format and upleft, w, h ==========
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this
w = coord[2] / zy # only change this
h = coord[3] / zx # only change thisS
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
# convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='pre', value=0.):
"""Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
Parameters
----------
sequences : list of list of int
All sequences where each row is a sequence.
maxlen : int
Maximum length.
dtype : numpy.dtype or str
Data type to cast the resulting sequence.
padding : str
Either 'pre' or 'post', pad either before or after each sequence.
truncating : str
Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence
value : float
Value to pad the sequences to the desired value.
Returns
----------
x : numpy.array
With dimensions (number_of_sequences, maxlen)
Examples
----------
>>> sequences = [[1,1,1,1,1],[2,2,2],[3,3]]
>>> sequences = pad_sequences(sequences, maxlen=None, dtype='int32',
... padding='post', truncating='pre', value=0.)
[[1 1 1 1 1]
[2 2 2 0 0]
[3 3 0 0 0]]
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
'Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape)
)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x.tolist()
def remove_pad_sequences(sequences, pad_id=0):
"""Remove padding.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
----------
>>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]
>>> print(remove_pad_sequences(sequences, pad_id=0))
[[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]
"""
sequences_out = copy.deepcopy(sequences)
for i, _ in enumerate(sequences):
# for j in range(len(sequences[i])):
# if sequences[i][j] == pad_id:
# sequences_out[i] = sequences_out[i][:j]
# break
for j in range(1, len(sequences[i])):
if sequences[i][-j] != pad_id:
sequences_out[i] = sequences_out[i][0:-j + 1]
break
return sequences_out
def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_end_id=False):
"""Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The special token for END.
pad_val : int
Replace the `end_id` and the IDs after `end_id` to this value.
is_shorten : boolean
Shorten the sequences. Default is True.
remain_end_id : boolean
Keep an `end_id` in the end. Default is False.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4, 3, 5, 3, 2, 2, 2, 2], <-- end_id is 2
... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2
>>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True)
[[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]]
"""
max_length = 0
for _, seq in enumerate(sequences):
is_end = False
for i_w, n in enumerate(seq):
if n == end_id and is_end == False: # 1st time to see end_id
is_end = True
if max_length < i_w:
max_length = i_w
if remain_end_id is False:
seq[i_w] = pad_val # set end_id to pad_val
elif is_end ==True:
seq[i_w] = pad_val
if remain_end_id is True:
max_length += 1
if is_shorten:
for i, seq in enumerate(sequences):
sequences[i] = seq[:max_length]
return sequences
def sequences_add_start_id(sequences, start_id=0, remove_last=False):
"""Add special start token(id) in the beginning of each sequence.
Parameters
------------
sequences : list of list of int
All sequences where each row is a sequence.
start_id : int
The start ID.
remove_last : boolean
Remove the last value of each sequences. Usually be used for removing the end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)
[[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)
[[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]
For Seq2seq
>>> input = [a, b, c]
>>> target = [x, y, z]
>>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True)
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
if remove_last:
sequences_out[i] = [start_id] + sequences[i][:-1]
else:
sequences_out[i] = [start_id] + sequences[i]
return sequences_out
def sequences_add_end_id(sequences, end_id=888):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,3],[4,5,6,7]]
>>> print(sequences_add_end_id(sequences, end_id=999))
[[1, 2, 3, 999], [4, 5, 6, 999]]
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
sequences_out[i] = sequences[i] + [end_id]
return sequences_out
def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]
>>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))
[[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]]
"""
# sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences)
sequences_out = copy.deepcopy(sequences)
# # add a pad to all
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i].append(pad_id)
# # pad -- > end
# max_len = 0
for i, v in enumerate(sequences):
for j, _v2 in enumerate(v):
if sequences[i][j] == pad_id:
sequences_out[i][j] = end_id
# if j > max_len:
# max_len = j
break
# # remove pad if too long
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i] = sequences_out[i][:max_len+1]
return sequences_out
def sequences_get_mask(sequences, pad_val=0):
"""Return mask for sequences.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_val : int
The pad value.
Returns
----------
list of list of int
The mask.
Examples
---------
>>> sentences_ids = [[4, 0, 5, 3, 0, 0],
... [5, 3, 9, 4, 9, 0]]
>>> mask = sequences_get_mask(sentences_ids, pad_val=0)
[[1 1 1 1 0 0]
[1 1 1 1 1 0]]
"""
mask = np.ones_like(sequences)
for i, seq in enumerate(sequences):
for i_w in reversed(range(len(seq))):
if seq[i_w] == pad_val:
mask[i, i_w] = 0
else:
break # <-- exit the for loop, prepcess next sequence
return mask
|
py | 7df744b53ee1d43e29ee9c45de044a0010f9cbd7 | import gc
import glob
import random
import torch
from others.logging import logger
class Batch(object):
def _pad(self, data, pad_id, width=-1):
if (width == -1):
width = max(len(d) for d in data)
rtn_data = [d + [pad_id] * (width - len(d)) for d in data]
return rtn_data
def __init__(self, data=None, device=None, is_test=False):
"""Create a Batch from a list of examples."""
if data is not None:
# print("Data: ", len(data))
self.batch_size = len(data)
pre_src = [x[0] for x in data]
pre_labels = [x[1] for x in data]
pre_segs = [x[2] for x in data]
pre_clss = [x[3] for x in data]
src = torch.tensor(self._pad(pre_src, 0))
labels = torch.tensor(self._pad(pre_labels, 0))
segs = torch.tensor(self._pad(pre_segs, 0))
mask = ~(src == 0)
clss = torch.tensor(self._pad(pre_clss, -1))
mask_cls = ~(clss == -1)
clss[clss == -1] = 0
setattr(self, 'clss', clss.to(device))
setattr(self, 'mask_cls', mask_cls.to(device))
setattr(self, 'src', src.to(device))
setattr(self, 'labels', labels.to(device))
setattr(self, 'segs', segs.to(device))
setattr(self, 'mask', mask.to(device))
if (is_test):
src_str = [x[-2] for x in data]
setattr(self, 'src_str', src_str)
tgt_str = [x[-1] for x in data]
setattr(self, 'tgt_str', tgt_str)
def __len__(self):
return self.batch_size
def batch(data, batch_size):
"""Yield elements from data in chunks of batch_size."""
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = simple_batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def load_dataset(args, corpus_type, shuffle):
"""
Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded.
"""
assert corpus_type in ["train", "valid", "test"]
def _lazy_dataset_loader(pt_file, corpus_type):
dataset = torch.load(pt_file)
logger.info('Loading %s dataset from %s, number of examples: %d' %
(corpus_type, pt_file, len(dataset)))
return dataset
# Sort the glob output by file name (by increasing indexes).
pts = sorted(glob.glob(args.bert_data_path + '.' + corpus_type + '.[0-9]*.pt'))
if pts:
if (shuffle):
random.shuffle(pts)
for pt in pts:
yield _lazy_dataset_loader(pt, corpus_type)
else:
# Only one inputters.*Dataset, simple!
pt = args.bert_data_path + '.' + corpus_type + '.pt'
yield _lazy_dataset_loader(pt, corpus_type)
def simple_batch_size_fn(new, count):
src, labels = new[0], new[1]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents=0
max_n_tokens=0
max_n_sents = max(max_n_sents, len(src))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
return src_elements
class Dataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.datasets = datasets
self.batch_size = batch_size
self.device = device
self.shuffle = shuffle
self.is_test = is_test
self.cur_iter = self._next_dataset_iterator(datasets)
assert self.cur_iter is not None
def __iter__(self):
dataset_iter = (d for d in self.datasets)
while self.cur_iter is not None:
for batch in self.cur_iter:
yield batch
self.cur_iter = self._next_dataset_iterator(dataset_iter)
def _next_dataset_iterator(self, dataset_iter):
try:
# Drop the current dataset for decreasing memory
if hasattr(self, "cur_dataset"):
self.cur_dataset = None
gc.collect()
del self.cur_dataset
gc.collect()
self.cur_dataset = next(dataset_iter)
except StopIteration:
return None
return DataIterator(args = self.args,
dataset=self.cur_dataset, batch_size=self.batch_size,
device=self.device, shuffle=self.shuffle, is_test=self.is_test)
class DataIterator(object):
def __init__(self, args, dataset, batch_size, device=None, is_test=False,
shuffle=True):
self.args = args
self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset
self.iterations = 0
self.device = device
self.shuffle = shuffle
self.sort_key = lambda x: len(x[1])
self._iterations_this_epoch = 0
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
if('labels' in ex):
labels = ex['labels']
else:
labels = ex['src_sent_labels']
segs = ex['segs']
if(not self.args.use_interval):
segs=[0]*len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
if(is_test):
return src,labels,segs, clss, src_txt, tgt_txt
else:
return src,labels,segs, clss
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if(len(ex['src'])==0):
continue
ex = self.preprocess(ex, self.is_test)
if(ex is None):
continue
minibatch.append(ex)
size_so_far = simple_batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
""" Create batches """
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 50):
p_batch = sorted(buffer, key=lambda x: len(x[3]))
p_batch = batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
|
py | 7df745c0b6a636cbe4f292f063f3229b08af9b2a | # Generated by Django 3.2.12 on 2022-02-23 11:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TextContent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Text')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('edited_at', models.DateTimeField(auto_created=True, blank=True, null=True, verbose_name='Edited at')),
('is_deleted', models.BooleanField(default=False, help_text='Is item deleted or not', verbose_name='Is deleted')),
('chat_id', models.BigIntegerField()),
('content_type', models.CharField(choices=[('text', 'Text'), ('image', 'Image'), ('video', 'Video'), ('file', 'File')], db_index=True, max_length=10, verbose_name='Content type')),
('content_id', models.BigIntegerField()),
('is_edited', models.BooleanField(default=False, verbose_name='Is edited')),
('sent_at', models.DateTimeField(auto_now_add=True, verbose_name='Sent at')),
('chat_content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.contenttype')),
('content_content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.contenttype')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_messages', to=settings.AUTH_USER_MODEL, verbose_name='Sender')),
],
options={
'abstract': False,
},
),
]
|
py | 7df7466004426020c424914bfc26780a77be99d1 | """ Interacts with a Docker Daemon on a remote instance"""
import random
from typing import (
Any,
Dict,
List,
Optional
)
import docker
from requests.exceptions import ConnectionError, Timeout
from loadsbroker import logger
from loadsbroker.util import retry
StrDict = Dict[str, str]
DOCKER_RETRY_EXC = (ConnectionError, Timeout)
def split_container_name(container_name):
"""Pulls apart a container name from its tag"""
parts = container_name.split(":")
if len(parts) > 1:
return parts
else:
return parts, None
class DockerDaemon:
def __init__(self, host, timeout=5):
self.host = host
self.timeout = timeout
self.responded = False
self._client = docker.Client(base_url=host, timeout=timeout)
def get_containers(self, all=False):
"""Returns a list of containers
:param all: Whether to include **non-running** containers.
"""
return {cont['Id']: cont
for cont in self._client.containers(all=all)}
def _create_container(self, image, cmd=None):
"""creates a container
"""
name = 'loads_%d' % random.randint(1, 9999)
container = self._client.create_container(image, name=name,
command=cmd,
detach=True)
id = container['Id']
self._client.start(container=id, publish_all_ports=True)
return name, id
def run(self, commands, image):
"""Runs commands in a new container.
Sends back a blocking iterator on the log output.
"""
cmd = '/bin/sh -c "%s"' % ';'.join(commands)
cname, cid = self._create_container(image, cmd=cmd)
return cid, self._client.attach(cid, stream=True, logs=True)
def exec_run(self, cid: str, cmd: str) -> bytes:
"""Run a command in an existing container."""
execid = self._client.exec_create(cid, cmd)
return self._client.exec_start(execid['Id'])
def kill(self, cid):
"""Kills and remove a container.
"""
self._client.remove_container(cid, force=True)
def stop(self, cid, timeout=15, capture_stream=None):
"""Stops and removes a container."""
self._client.stop(cid, timeout)
self._client.wait(cid)
if capture_stream:
capture_stream.write(self._client.logs(cid, timestamps=True))
self._client.remove_container(cid)
def pull_container(self, container_name):
"""Pulls a container image from the repo/tag for the provided
container name"""
result = self._client.pull(container_name, stream=True)
return list(result)
def import_container(self, client, container_url):
"""Imports a container from a URL"""
stdin, stdout, stderr = client.exec_command(
'curl %s | docker load' % container_url)
# Wait for termination
output = stdout.channel.recv(4096)
stdin.close()
stdout.close()
stderr.close()
return output
@retry(on_exception=lambda exc: isinstance(exc, DOCKER_RETRY_EXC))
def has_image(self, container_name):
"""Indicates whether this instance already has the desired
container name/tag loaded.
Example of what the images command output looks like:
[{'Created': 1406605442,
'RepoTags': ['bbangert/simpletest:dev'],
'Id': '824823...31ae0d6fc69e6e666a4b44118b0a3',
'ParentId': 'da7b...ee6b9eb2ee47c2b1427eceb51d291a',
'Size': 0,
'VirtualSize': 1400958681}]
"""
name, tag = split_container_name(container_name)
images = self._client.images(all=True)
return any(container_name in image["RepoTags"] for image in images)
def run_container(self,
name: str,
command: Optional[str] = None,
env: Optional[StrDict] = None,
volumes: Optional[Dict[str, StrDict]] = None,
ports: Optional[Dict[Any, Any]] = None,
dns: Optional[List[str]] = None,
pid_mode: Optional[str] = None,
entrypoint: Optional[str] = None):
"""Run a container given the container name, env, command args, data
volumes, and port bindings."""
if volumes is None:
volumes = {}
if dns is None:
dns = []
expose = []
port_bindings = {}
for port in ports.keys():
if isinstance(port, tuple):
proto = port[1] if len(port) == 2 else "tcp"
key = "%d/%s" % (port[0], proto)
else:
key = port
port_bindings[key] = ports[port]
expose.append(port)
result = self._client.create_container(
name, command=command, environment=env,
volumes=[volume['bind'] for volume in volumes.values()],
ports=expose,
entrypoint=entrypoint)
container = result["Id"]
result = self._client.start(container, binds=volumes,
port_bindings=port_bindings, dns=dns,
pid_mode=pid_mode)
response = self._client.inspect_container(container)
return response
def safe_run_container(self, name: str, *args, **kwargs) -> Any:
"""Call run_container until it succeeds
Max of 5 tries w/ attempts to stop potential zombie
containers.
"""
for i in range(5):
try:
return self.run_container(name, *args, **kwargs)
except Exception as exc:
logger.debug("Exception with run_container (%s)",
name, exc_info=True)
if i == 4:
logger.debug("Giving up on running container.")
raise
self.stop_container(name)
def containers_by_name(self, container_name):
"""Yields all containers that match the given name."""
containers = self._client.containers()
return (container for container in containers
if container_name in container["Image"])
def kill_container(self, container_name):
"""Locate the container of the given container_name and kill
it"""
for container in self.containers_by_name(container_name):
self.kill(container["Id"])
def stop_container(self,
container_name,
timeout=15,
capture_stream=None):
"""Locates and gracefully stops a container by name."""
for container in self.containers_by_name(container_name):
self.stop(container["Id"], timeout, capture_stream)
|
py | 7df746d861f457609e9cebe35c7833b1a49578f6 | from functools import partial
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from soundcloud.resource import wrapped_resource
from soundcloud.request import make_request
class Client(object):
"""A client for interacting with Soundcloud resources."""
use_ssl = True
host = 'api.soundcloud.com'
def __init__(self, **kwargs):
"""Create a client instance with the provided options. Options should
be passed in as kwargs.
"""
self.use_ssl = kwargs.get('use_ssl', self.use_ssl)
self.host = kwargs.get('host', self.host)
self.scheme = self.use_ssl and 'https://' or 'http://'
self.options = kwargs
self._authorize_url = None
self.client_id = kwargs.get('client_id')
if 'access_token' in kwargs:
self.access_token = kwargs.get('access_token')
return
if 'client_id' not in kwargs:
raise TypeError("At least a client_id must be provided.")
if 'scope' in kwargs:
self.scope = kwargs.get('scope')
# decide which protocol flow to follow based on the arguments
# provided by the caller.
if self._options_for_authorization_code_flow_present():
self._authorization_code_flow()
elif self._options_for_credentials_flow_present():
self._credentials_flow()
elif self._options_for_token_refresh_present():
self._refresh_token_flow()
def exchange_token(self, code):
"""Given the value of the code parameter, request an access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'grant_type': 'authorization_code',
'redirect_uri': self._redirect_uri(),
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'code': code,
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token
return self.token
def authorize_url(self):
"""Return the authorization URL for OAuth2 authorization code flow."""
return self._authorize_url
def _authorization_code_flow(self):
"""Build the the auth URL so the user can authorize the app."""
options = {
'scope': getattr(self, 'scope', 'non-expiring'),
'client_id': self.options.get('client_id'),
'response_type': 'code',
'redirect_uri': self._redirect_uri()
}
url = '%s%s/connect' % (self.scheme, self.host)
self._authorize_url = '%s?%s' % (url, urlencode(options))
def _refresh_token_flow(self):
"""Given a refresh token, obtain a new access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'grant_type': 'refresh_token',
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'refresh_token': self.options.get('refresh_token')
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token
def _credentials_flow(self):
"""Given a username and password, obtain an access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'scope': getattr(self, 'scope', ''),
'grant_type': 'client_credentials'
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token
def _request(self, method, resource, **kwargs):
"""Given an HTTP method, a resource name and kwargs, construct a
request and return the response.
"""
url = self._resolve_resource_name(resource)
if hasattr(self, 'access_token'):
kwargs.update(dict(oauth_token=self.access_token))
if hasattr(self, 'client_id'):
kwargs.update(dict(client_id=self.client_id))
kwargs.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
return wrapped_resource(make_request(method, url, kwargs))
def __getattr__(self, name, **kwargs):
"""Translate an HTTP verb into a request method."""
if name not in ('get', 'post', 'put', 'head', 'delete'):
raise AttributeError
return partial(self._request, name, **kwargs)
def _resolve_resource_name(self, name):
"""Convert a resource name (e.g. tracks) into a URI."""
if name[:4] == 'http': # already a url
return name
name = name.rstrip('/').lstrip('/')
return '%s%s/%s' % (self.scheme, self.host, name)
def _redirect_uri(self):
"""
Return the redirect uri. Checks for ``redirect_uri`` or common typo,
``redirect_url``
"""
return self.options.get(
'redirect_uri',
self.options.get('redirect_url', None))
# Helper functions for testing arguments provided to the constructor.
def _options_present(self, options, kwargs):
return all(map(lambda k: k in kwargs, options))
def _options_for_credentials_flow_present(self):
required = ('client_id', 'client_secret')
return self._options_present(required, self.options)
def _options_for_authorization_code_flow_present(self):
required = ('client_id', 'redirect_uri')
or_required = ('client_id', 'redirect_url')
return (self._options_present(required, self.options) or
self._options_present(or_required, self.options))
def _options_for_token_refresh_present(self):
required = ('client_id', 'client_secret', 'refresh_token')
return self._options_present(required, self.options)
|
py | 7df746e3d44d5a3da77726b436147b566721958e | # Generated by Django 2.0 on 2019-10-03 13:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('post', '0011_remove_posts_createdat'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL),
),
]
|
py | 7df746fd918148e2218c908570f044913d7f4824 | #!/usr/bin/env python3
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
def generate_launch_description():
# RViZ2 settings
rviz2_config = os.path.join(
get_package_share_directory('ldlidar_sl_ros2'),
'rviz2',
'ldlidar.rviz'
)
rviz2_node = Node(
package='rviz2',
executable='rviz2',
name='rviz2_show_ld14',
arguments=['-d',rviz2_config],
output='screen'
)
#Include LDLidar launch file
ldlidar_launch = IncludeLaunchDescription(
launch_description_source=PythonLaunchDescriptionSource([
get_package_share_directory('ldlidar_sl_ros2'),
'/launch/ld14.launch.py'
])
)
# Define LaunchDescription variable
ld = LaunchDescription()
ld.add_action(ldlidar_launch)
ld.add_action(rviz2_node)
return ld |
py | 7df7487b4987e7daf25c8c5aff4930a472c542d6 | import os
import math
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import plotly.offline as py
import plotly.graph_objects as go
import matplotlib.patches as mpatches
from colour import Color
from itertools import chain
from shapely import affinity
from shapely.geometry.point import Point
from scipy import interpolate
from matplotlib import gridspec
from collections import defaultdict
from rpy2.robjects import r, FloatVector
from rpy2.robjects.packages import importr, STAP
from rpy2.robjects.numpy2ri import numpy2rpy
from matplotlib.patches import Polygon
from scipy.signal import argrelextrema
from matplotlib.patches import Ellipse
from scipy.stats import ks_2samp, kstwobign
from matplotlib.ticker import MaxNLocator, MultipleLocator
from analysis.functions import read_hdf5, trim_data, calibrate_data, get_boxplots, parse_filename, subsampling
flatten = chain.from_iterable
class Metadata:
def __init__(self, pickle_folder=None, filename=None, sdict=None):
if sdict:
self.pdata = sdict
else:
if pickle_folder and filename:
self.pdata = self.unpickle_data(pickle_folder, filename)
else:
raise Exception(f"Check the filename '{filename}' and folder '{pickle_folder}'")
def unpickle_data(self, pickle_folder, filename):
filename = filename if ".pickle" in filename else f"{filename}.pickle"
with open(f"{pickle_folder}/{filename.replace('_E_', '_')}", 'rb') as handle:
return pickle.load(handle)
def get_rats_id(self, muscle='E'):
return self.pdata['rats_data'][muscle].keys()
def get_myograms(self, rat, muscle='E'):
return self.pdata['rats_data'][muscle][rat]['data']
def get_peak_times(self, rat, muscle='E', flat=False, unslice=False):
data = self.pdata['rats_data'][muscle][rat]['times']
if flat:
return np.array(list(flatten(flatten(data))))
if unslice:
return [list(flatten(d)) for d in data]
return data
def get_peak_durations(self, rat, muscle='E', flat=False, unslice=False):
data = self.pdata['rats_data'][muscle][rat]['durations']
if flat:
return np.array(list(flatten(flatten(data))))
if unslice:
return [list(flatten(d)) for d in data]
return data
def get_peak_ampls(self, rat, muscle='E', flat=False, unslice=False):
data = self.pdata['rats_data'][muscle][rat]['ampls']
if flat:
return np.array(list(flatten(flatten(data))))
if unslice:
return [list(flatten(d)) for d in data]
return data
def get_peak_slices(self, rat, muscle='E', flat=False, unslice=False):
data = self.pdata['rats_data'][muscle][rat]['slices']
if flat:
return np.array(list(flatten(flatten(data))))
if unslice:
return [list(flatten(d)) for d in data]
return data
def get_fMEP_count(self, rat, muscle='E'):
return len(self.pdata['rats_data'][muscle][rat]['data'])
def get_peak_counts(self, rat, border, muscle='E'):
times = self.get_peak_times(rat, muscle, flat=True) * self.pdata['dstep_to']
fMEPs = self.get_fMEP_count(rat, muscle)
if border == 'poly_tail':
# [0 - 3] and [8 - 25]
mask = (times <= 3) | (8 <= times)
else:
# [T1, T2]
mask = (border[0] <= times) & (times <= border[1])
return len(times[mask]) / fMEPs
def get_peak_median_height(self, rat, border, muscle='E'):
ampls = self.get_peak_ampls(rat, muscle, flat=True)
times = self.get_peak_times(rat, muscle, flat=True) * self.pdata['dstep_to']
if border == 'poly_tail':
# [0 - 3] and [8 - 25]
mask = (times <= 3) | (8 <= times)
else:
# [T1, T2]
mask = (border[0] <= times) & (times <= border[1])
return np.median(ampls[mask])
def get_latency_volume(self, rat, muscle='E'):
return self.pdata['rats_data'][muscle][rat]['latency_volume']
@property
def shortname(self):
return self.pdata['shortname']
@property
def rate(self):
return self.pdata['rate']
@property
def dstep_to(self):
return self.pdata['dstep_to']
@property
def dstep_from(self):
return self.pdata['dstep_from']
@property
def slice_in_ms(self):
return self.pdata['slice_in_ms']
@property
def slice_count(self):
return self.pdata['slices_count']
@property
def speed(self):
return self.pdata['speed']
class Analyzer:
def __init__(self, pickle_folder, debug=False):
self.pickle_folder = pickle_folder
self.plots_folder = f"{pickle_folder}/plots"
self.debug = debug
if not os.path.exists(self.pickle_folder):
os.makedirs(self.pickle_folder)
if not os.path.exists(self.plots_folder):
os.makedirs(self.plots_folder)
logging.basicConfig(format='[%(funcName)s]: %(message)s', level=logging.INFO)
self.log = logging.getLogger()
@staticmethod
def disable_log():
logging.disable(logging.CRITICAL)
@staticmethod
def _recolor(boxplot_elements, color, fill_color, fill_alpha=0.0):
"""
Add colors to bars (setup each element)
Args:
boxplot_elements (dict): components of the boxplot
color (str): HEX color of outside lines
fill_color (str): HEX color of filling
"""
assert 0 <= fill_alpha <= 1
hex_alpha = hex(int(fill_alpha * 255))[2:]
# TODO убрать plt, сделать привязку к axes
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(boxplot_elements[element], color=color, linewidth=2)
plt.setp(boxplot_elements["fliers"], markeredgecolor=color)
for patch in boxplot_elements['boxes']:
patch.set(facecolor=f"{fill_color}{hex_alpha}")
def KDE_plot(self, xy1, xy2, names, rats, border):
plt.close()
gs = gridspec.GridSpec(2, 2, width_ratios=[3, 1], height_ratios=[1, 4])
plt.figure(figsize=(8, 6))
title_ax = plt.subplot(gs[0, 1])
for spine in ['top', 'right', 'left', 'bottom']:
title_ax.spines[spine].set_visible(False)
title_ax.set_xticks([])
title_ax.set_yticks([])
kde_ax = plt.subplot(gs[1, 0])
kde_ax.spines['top'].set_visible(False)
kde_ax.spines['right'].set_visible(False)
label_pathes = []
#
for (x, y), name, color in zip([xy1, xy2],
[f"bio", f"NEURON"],
['#124460', '#472650']):
self._contour_plot(x=x, y=y, color=color, ax=kde_ax, z_prev=[0],
borders=border, levels_num=15)
t, r = self.joint_plot(x, y, kde_ax, gs, **{"color": color}, borders=border, with_boxplot=False)
label_pathes.append(mpatches.Patch(color=color, label=f"{name}"))
# kde_ax.plot(x, y, '.', c=color)
t.set_xticklabels([])
r.set_yticklabels([])
# self.axis_article_style(ax, axis='x')
# r.yticks(fontsize=30)
t.set_xlim(border[0], border[1])
r.set_ylim(border[2], border[3])
kde_ax.legend(handles=label_pathes, fontsize=17)
self.axis_article_style(kde_ax, axis='both')
kde_ax.set_xlim(border[0], border[1])
kde_ax.set_ylim(border[2], border[3])
plt.tight_layout()
@staticmethod
def form_1d_kde(X, xmin, xmax):
xx = np.linspace(xmin, xmax, 1000)
dx = st.gaussian_kde(X)(xx)
return xx, dx
@staticmethod
def form_2d_kde(x, y, xmin, xmax, ymin, ymax):
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
# re-present grid in 1D and pair them as (x1, y1 ...)
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
# use a Gaussian KDE
a = st.gaussian_kde(values)(positions).T
# re-present grid back to 2D
z = np.reshape(a, xx.shape)
return xx, yy, z
@staticmethod
def overlap_density(d1, d2):
assert d1.size == d2.size
overlay = np.sum(d1[d1 <= d2]) + np.sum(d2[d2 < d1])
area1, area2 = np.sum(d1), np.sum(d2)
iou = overlay / (area1 + area2 - overlay)
return iou
def outside_compare(self, comb, border, axis, muscletype='E', per_step=False, get_iou=False, get_pvalue=False, plot=False, show=False):
if len(comb) != 2:
raise Exception("Only pairing analysis")
#
metadata1 = Metadata(self.pickle_folder, comb[0][0])
metadata2 = Metadata(self.pickle_folder, comb[1][0])
# do not change an order!
axis_names = ('time', 'ampl', 'slice') # do not change an order!
axis_borders = ([8, 28] if border == 'poly_tail' else border,
(0, 1.2),
(0, min(metadata1.slice_count, metadata2.slice_count)))
#
slice_border = min(metadata1.slice_count, metadata2.slice_count)
ax1_index = axis_names.index(axis[0])
ax2_index = axis_names.index(axis[1])
meta_names = (metadata1.shortname, metadata2.shortname)
#
all_pval = []
iou_values = []
for rat1 in comb[0][1]:
for rat2 in comb[1][1]:
# check if pval file is exist to save a calulcation time
if per_step:
if get_iou:
times1 = metadata1.get_peak_times(rat=rat1, unslice=True, muscle=muscletype)
ampls1 = metadata1.get_peak_ampls(rat=rat1, unslice=True, muscle=muscletype)
slice1 = metadata1.get_peak_slices(rat=rat1, unslice=True, muscle=muscletype)
times2 = metadata2.get_peak_times(rat=rat2, unslice=True, muscle=muscletype)
ampls2 = metadata2.get_peak_ampls(rat=rat2, unslice=True, muscle=muscletype)
slice2 = metadata2.get_peak_slices(rat=rat2, unslice=True, muscle=muscletype)
t1, a1, s1 = [], [], []
t2, a2, s2 = [], [], []
# 1 rat
for time, ampl, sl in zip(times1, ampls1, slice1):
time = np.array(time) * metadata1.dstep_to
sl = np.array(sl)
ampl = np.array(ampl)
if border == 'poly_tail':
time[time <= 3] += 25
mask = time >= 8
else:
mask = (border[0] <= time) & (time <= border[1]) & (sl <= slice_border)
t1.append(time[mask])
a1.append(ampl[mask])
s1.append(sl[mask])
# 2 rat
for time, ampl, sl in zip(times2, ampls2, slice2):
time = np.array(time) * metadata2.dstep_to
sl = np.array(sl)
ampl = np.array(ampl)
if border == 'poly_tail':
time[time <= 3] += 25
mask = time >= 8
else:
mask = (border[0] <= time) & (time <= border[1]) & (sl <= slice_border)
t2.append(time[mask])
a2.append(ampl[mask])
s2.append(sl[mask])
data1 = [t1, a1, s1]
data2 = [t2, a2, s2]
X1, Y1 = data1[ax1_index], data1[ax2_index]
X2, Y2 = data2[ax1_index], data2[ax2_index]
xmin, xmax, ymin, ymax = *axis_borders[ax1_index], *axis_borders[ax2_index]
iou2d = []
for x1, y1 in zip(X1, Y1):
for x2, y2 in zip(X2, Y2):
# 1D x1 x2
# xx1, dx1 = self.form_1d_kde(X1, xmin=xmin, xmax=xmax)
# xx2, dx2 = self.form_1d_kde(X2, xmin=xmin, xmax=xmax)
# iou1d_x = self.overlap_density(dx1, dx2)
# 1D y1 y2
# yy1, dy1 = self.form_1d_kde(Y1, xmin=ymin, xmax=ymax)
# yy2, dy2 = self.form_1d_kde(Y2, xmin=ymin, xmax=ymax)
# iou1d_y = self.overlap_density(dy1, dy2)
# 2D
_, _, z1 = self.form_2d_kde(x1, y1, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
_, _, z2 = self.form_2d_kde(x2, y2, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
iou2d.append(self.overlap_density(z1, z2))
iou_values.append(iou2d)
if get_pvalue:
"""
Here p-value will be calculated for each step in R KDE test.
Convertation lists with different lengths to matrix is only way to pass the matrix to R.
Because of that the emptines filled by -9999 (trash) values, whtich will be filtered in R.
np.where is used instead of classic slice because of saving true shape of the matrix
"""
dataXY = []
for r, m in zip((rat1, rat2), (metadata1, metadata2)):
t = m.get_peak_times(rat=r, unslice=True, muscle=muscletype)
a = m.get_peak_ampls(rat=r, unslice=True, muscle=muscletype)
s = m.get_peak_slices(rat=r, unslice=True, muscle=muscletype)
maxlen = max(map(len, t))
t = np.array([d + [-9999] * (maxlen - len(d)) for d in t]) * m.dstep_to
a = np.array([d + [-9999] * (maxlen - len(d)) for d in a])
s = np.array([d + [-9999] * (maxlen - len(d)) for d in s])
shape = t.shape
if border == 'poly_tail':
t[t <= 3] += 25 # all peaks at [0, 3] becomes [25, 28]
mask = t >= 8
else:
mask = (border[0] <= t) & (t <= border[1]) & (s <= slice_border)
# pick the data which corresponds to the axis name with saving shape
data = (np.where(mask, t, -9999).reshape(shape),
np.where(mask, a, -9999).reshape(shape),
np.where(mask, s, -9999).reshape(shape))
X, Y = data[ax1_index], data[ax2_index]
# number of point must be more than 16!
not_good_rows = [i for i, d in enumerate(X) if len(d[d >= 0]) < 10]
X = np.delete(X, not_good_rows, axis=0)
Y = np.delete(Y, not_good_rows, axis=0)
if len(X) == 0:
print('EMPTY')
break
dataXY.append((X, Y))
else:
# !
# todo about .T
pval_t, pval_a, pval_2d = self._multi_R_KDE_test(*dataXY[0], *dataXY[1]).T
print(f"{meta_names[0]} rat {rat1} vs {meta_names[1]} rat {rat2} muscle {muscletype}"
f"'{axis[0]} by step': {np.median(pval_t)}; "
f"'{axis[1]} by step': {np.median(pval_a)}; "
f"2D by step: {np.median(pval_2d)}")
all_pval.append(pval_2d)
# plot data if necessary
if plot:
kde_border = (*axis_borders[ax1_index], *axis_borders[ax2_index])
for xstep1, ystep1 in zip(*dataXY[0]):
for xstep2, ystep2 in zip(*dataXY[1]):
xy1 = xstep1[xstep1 >= 0], ystep1[ystep1 >= 0]
xy2 = xstep2[xstep2 >= 0], ystep2[ystep2 >= 0]
self.KDE_plot(xy1, xy2, meta_names, (rat1, rat2), kde_border)
filename = f"{meta_names[0]}_rat{rat1}_{meta_names[1]}_rat{rat2}_{muscletype}_merged"
plt.savefig(f'{self.plots_folder}/{filename}.pdf', format='pdf')
if show:
plt.show()
plt.close()
else:
"""
The simpliest way to compare data -- 1D representation.
That way all steps merged into one dataset.
P-value will critical differ from median p-value of step-by-step analysis
"""
dataXY = []
for r, m in zip((rat1, rat2), (metadata1, metadata2)):
t = m.get_peak_times(rat=r, flat=True, muscle=muscletype) * m.dstep_to
# s = 'm'
# for i in t:
# s = s + str(i) + ','
#
# print(s)
a = m.get_peak_ampls(rat=r, flat=True, muscle=muscletype)
s = m.get_peak_slices(rat=r, flat=True, muscle=muscletype)
if border == 'poly_tail':
t[t <= 3] += 25 # all peaks at [0, 3] becomes [25, 28]
mask = 8 <= t
else:
mask = (border[0] <= t) & (t <= border[1])
# pick the data which corresponds to the axis name
t = t[mask]
a = a[mask]
s = s[mask]
# d = (t[mask], a[mask], s[mask])
mask2 = (0 <= s) & (s <= slice_border)
d = (t[mask2], a[mask2], s[mask2])
X, Y = d[ax1_index], d[ax2_index]
dataXY.append((X, Y))
# plt.close()
# print(m.shortname, r)
# for si in range(0, max(s) + 1):
# plt.boxplot(a[s == si], positions=[si], whis=[5, 95])
# plt.ylim(0, 1.7)
# plt.show()
# calc the p-value by KDE test
pval_t, pval_a, pval_2d = self._multi_R_KDE_test(*dataXY[0], *dataXY[1])
print(f"{meta_names[0]} rat {rat1} vs {meta_names[1]} rat {rat2} - {muscletype} muscle"
f"'{axis[0]} merged': {pval_t}; "
f"'{axis[1]} merged': {pval_a}; "
f"2D merged: {pval_2d}")
# plot data if necessary
if plot:
kde_border = (*axis_borders[ax1_index], *axis_borders[ax2_index])
self.KDE_plot(dataXY[0], dataXY[1], meta_names, (rat1, rat2), kde_border)
filename = f"{meta_names[0]}_rat{rat1}_{meta_names[1]}_rat{rat2}_{muscletype}_merged"
plt.savefig(f'{self.plots_folder}/{filename}.pdf', format='pdf')
if show:
plt.show()
plt.close()
filename = f"{muscletype}_{meta_names[0]}_{meta_names[1]}_{ax1_index}_{ax2_index}"
fullname = f'{self.plots_folder}/{filename}_box.pdf'
if get_pvalue:
plt.boxplot(list(flatten(all_pval)))
if get_iou:
plt.boxplot(iou_values)
plt.ylim(0, 1)
plt.savefig(fullname, format='pdf')
def plot_cumulative(self, cmltv, border, order=None, pval_slices_peak=False):
"""
"""
data = []
wspace = 6
pos_dict = []
clr_height = '#472650'
clr_count = '#a6261d'
clr_volume = '#287a72'
global_significancies = defaultdict(list)
names = np.array(cmltv)[:, :2]
uniq_names = list(np.unique(names))
if order:
if sorted(uniq_names) != sorted(order):
raise Exception("Order is not contain all uniq names of 'cmltv' list")
uniq_names = order
def add_signific(source, target, value):
#
if source > target:
source, target = target, source
#
top = max(pos_dict[source:target + 1]) + tickrisk
global_significancies[target - source].append([source, target, top, value])
global_significancies[target - source] = sorted(global_significancies[target - source], key=lambda d: d[2])
def apply_signific():
last_max = 0
#
for key in sorted(global_significancies.keys()):
for d in global_significancies[key]:
if d[2] > last_max:
last_max = d[2]
else:
while d[2] <= last_max:
d[2] += 3 * tickrisk
last_max = d[2]
#
for meta in sum(global_significancies.values(), []):
xl, xr, yt, textval = meta
yt += 3 * tickrisk
ax1.plot(np.array((xl + 0.05, xl + 0.05, xr - 0.05, xr - 0.05)) * wspace,
(yt - 1.2 * tickrisk, yt, yt, yt - 1.2 * tickrisk), lw=2, c='k')
ax1.text(np.mean([xr, xl]) * wspace, yt + tickrisk, textval, c='k', ha='center', fontsize=15)
return last_max
#
for uniq_key in uniq_names:
filename = [f for f in os.listdir(self.pickle_folder) if uniq_key in f and f.endswith(".pickle")][0]
pdata = Metadata(self.pickle_folder, filename)
rats = pdata.get_rats_id(muscle='E')
counts = [pdata.get_peak_counts(rat, border=border) for rat in rats]
heights = [pdata.get_peak_median_height(rat, border=border) for rat in rats]
volumes = [pdata.get_latency_volume(rat) for rat in rats]
for rat_id, count, height, volume in zip(rats, counts, heights, volumes):
data.append([uniq_key, rat_id, count, height, volume])
grouped_height = {k: [] for k in uniq_names}
grouped_count = {k: [] for k in uniq_names}
grouped_volume = {k: [] for k in uniq_names}
for d in data:
grouped_count[d[0]].append(d[2])
grouped_height[d[0]].append(d[3])
grouped_volume[d[0]].append(d[4])
all_vals_ax1 = [d[2] for d in data]
all_vals_ax2 = [d[3] for d in data]
all_vals_ax3 = [d[4] for d in data]
ax1_min, ax1_max = min(all_vals_ax1), max(all_vals_ax1)
ax2_min, ax2_max = min(all_vals_ax2), max(all_vals_ax2)
ax3_min, ax3_max = min(all_vals_ax3), max(all_vals_ax3)
tickrisk = (ax1_max - ax1_min) * 0.02
fig, ax1 = plt.subplots(figsize=(16, 9))
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax3 = ax1.twinx()
ax3.spines["right"].set_position(("axes", 1.1))
i = 0
averagesX = []
averagesY = []
for name, c, h, v in zip(uniq_names, grouped_count.values(), grouped_height.values(), grouped_volume.values()):
i1, i2, i3 = i - 1, i, i + 1
# make the same data ratio
h = [(ax1_max - ax1_min) * (h_elem - ax2_min) / (ax2_max - ax2_min) + ax1_min for h_elem in h]
v = [(ax1_max - ax1_min) * (v_elem - ax3_min) / (ax3_max - ax3_min) + ax1_min for v_elem in v]
# for trendlines
averagesX.append((i1 - 0.3, i2 - 0.3, i3 - 0.3))
averagesY.append((np.mean(c), np.mean(h), np.mean(v)))
for ax, index, dat, color in zip([ax1, ax2, ax3], [i1, i2, i3],
[c, h, v], [clr_count, clr_height, clr_volume]):
ax.plot([index - 0.4, index - 0.2], [np.mean(dat)] * 2, color=color, lw=4)
ax.plot([index - 0.3] * 2, [max(dat), min(dat)], color=color, lw=1.5)
ax.plot([index] * len(dat), dat, '.', ms=15, color=color)
pos_dict.append(max(h + c + v))
i += wspace
for xx, yy, clr in zip(np.array(averagesX).T, np.array(averagesY).T, [clr_count, clr_height, clr_volume]):
ax.plot(xx, yy, c=clr, alpha=0.4)
if len(cmltv[0]) == 3:
for p1, p2, pval in cmltv:
logging.info(f"Pair {p1} and {p2}")
source = uniq_names.index(p1)
target = uniq_names.index(p2)
add_signific(source, target, value=f"{pval:.2f}")
else:
for p1, p2 in cmltv:
logging.info(f"Pair {p1} and {p2}")
source = uniq_names.index(p1)
target = uniq_names.index(p2)
filename1 = [f for f in os.listdir(self.pickle_folder) if p1 in f and f.endswith(".pickle")][0]
filename2 = [f for f in os.listdir(self.pickle_folder) if p2 in f and f.endswith(".pickle")][0]
pdata1 = self.get_pickle_data(filename1)
pdata2 = self.get_pickle_data(filename2)
dstep_to1 = pdata1['dstep_to']
dstep_to2 = pdata2['dstep_to']
pvalues = []
for rat1 in self.get_rats_id(pdata1):
for rat2 in self.get_rats_id(pdata2):
# check if pval file is exist to save a calulcation time
pval_file = f"{self.pickle_folder}/pval_ampl_{p1}_{rat1}+{p2}_{rat2}"
if os.path.exists(pval_file) and os.path.getsize(pval_file) > 0:
with open(f"{self.pickle_folder}/pval_ampl_{p1}_{rat1}+{p2}_{rat2}") as file:
pval_x, pval_y, pval_2d = [], [], []
for line in file.readlines():
pval_x, pval_y, pval_2d = list(map(float, line.split("\t")))
else:
x1 = [(np.array(d) * dstep_to1).tolist() for d in self.get_peak_times(pdata1, rat=rat1, unslice=True)]
x2 = [(np.array(d) * dstep_to2).tolist() for d in self.get_peak_times(pdata2, rat=rat2, unslice=True)]
if pval_slices_peak:
y1 = self.get_peak_slices(pdata1, rat=rat1, unslice=True)
y2 = self.get_peak_slices(pdata2, rat=rat2, unslice=True)
else:
y1 = self.get_peak_ampls(pdata1, rat=rat1, unslice=True)
y2 = self.get_peak_ampls(pdata2, rat=rat2, unslice=True)
pval_x, pval_y, pval_2d = self._multi_R_KDE_test(x1, y1, x2, y2)
# save pvalues
with open(pval_file, 'w') as file:
for px, py, p2d in zip(pval_x, pval_y, pval_2d):
file.write(f"{px:.5f}\t{py:.5f}\t{p2d:.5f}\n")
#
ind = 0
for aa, bb in zip(x1, y1):
for cc, dd in zip(x2, y2):
kde_pval_t = pval_x[ind]
kde_pval_a = pval_y[ind]
kde_pval_2d = pval_2d[ind]
gs = gridspec.GridSpec(2, 2, width_ratios=[3, 1], height_ratios=[1, 4])
fig = plt.figure(figsize=(8, 6))
title_ax = plt.subplot(gs[0, 1])
title_ax.text(0.5, 0.5, f"T: {kde_pval_t:.3f}\nA: {kde_pval_a:.3f}\n2D: {kde_pval_2d:.3f}",
horizontalalignment='center',
verticalalignment='center',
fontsize=18, transform=title_ax.transAxes)
for spine in ['top', 'right', 'left', 'bottom']:
title_ax.spines[spine].set_visible(False)
title_ax.set_xticks([])
title_ax.set_yticks([])
kde_ax = plt.subplot(gs[1, 0])
kde_ax.spines['top'].set_visible(False)
kde_ax.spines['right'].set_visible(False)
# 2D joint plot
z = []
label_pathes = []
z_prev = np.zeros(1)
#
border = [8, 28, 0, 1.4]
for x, y, name, color in zip([np.array(aa) * dstep_to1, np.array(cc) * dstep_to2],
[bb, dd], [f"{p1} {rat1}", f"{p2} {rat2}"], ['#A6261D', '#472650']):
z_prev = self._contour_plot(x=x, y=y, color=color, ax=kde_ax, z_prev=z_prev, borders=border,
levels_num=15)
z.append(z_prev)
t, r = self.joint_plot(x, y, kde_ax, gs, **{"color": color}, borders=border, with_boxplot=False)
label_pathes.append(mpatches.Patch(color=color, label=f"{name}"))
t.set_xticklabels([])
r.set_yticklabels([])
t.set_xlim(border[0], border[1])
r.set_ylim(border[2], border[3])
kde_ax.plot(x, y, '.', color=color)
kde_ax.legend(handles=label_pathes, fontsize=17)
kde_ax.set_xlim(border[0], border[1])
kde_ax.set_ylim(border[2], border[3])
plt.tight_layout()
plt.show()
plt.close(fig)
ind += 1
# end if
pvalues.append((rat1, rat2, np.median(pval_2d)))
logging.info(f"{p1}_{rat1} vs {p2}_{rat2} {np.median(pval_x)} {np.median(pval_y)} {np.median(pval_2d)}")
# end rats for loop
pvals = [p[2] for p in pvalues]
add_signific(source, target, value=f"{np.median(pvals):.2f}")
# end p1, p2 for loop
# end if
max_val = apply_signific()
# make more readable
ax1.set_ylim(10, max_val + 5 * tickrisk)
ax2.set_ylim(10, max_val + 5 * tickrisk)
ax3.set_ylim(10, max_val + 5 * tickrisk)
# back to original tick labels
new_ax2_ticklabels = [(ax2_max - ax2_min) * (tick - ax1_min) / (ax1_max - ax1_min) + ax2_min for tick in
ax2.yaxis.get_major_locator()()]
new_ax3_ticklabels = [(ax3_max - ax3_min) * (tick - ax1_min) / (ax1_max - ax1_min) + ax3_min for tick in
ax3.yaxis.get_major_locator()()]
ax2.set_yticklabels(np.round(new_ax2_ticklabels, 2))
ax3.set_yticklabels(np.round(new_ax3_ticklabels, 2))
for ax, color, label in zip([ax1, ax2, ax3],
[clr_count, clr_height, clr_volume],
['Number of peaks per fMEP', 'Median peak height', 'Latency volume']):
ax.set_ylabel(label, color=color, fontsize=23)
ax.tick_params(axis='y', labelcolor=color, labelsize=20)
ax.tick_params(axis="x", labelsize=20)
ax.spines['top'].set_visible(False)
xticks = ["\n".join(xtick.split("_")[1:]) for xtick in uniq_names]
plt.xticks(np.arange(len(uniq_names)) * wspace, xticks, fontsize=15)
plt.tight_layout()
plt.savefig(f"{self.plots_folder}/cumulative.pdf", format='pdf')
plt.show()
def _lw_prepare_data(self, folder, muscle, metadata, fill_zeros, filter_val, hz_analysis=False):
"""
Args:
muscle:
metadata:
"""
# constant number of slices (mode, speed): (extensor, flexor))
slices_number_dict = {
("PLT", '21'): (6, 5),
("PLT", '13.5'): (12, 5),
("PLT", '6'): (30, 5),
("TOE", '21'): (4, 4),
("TOE", '13.5'): (8, 4),
("AIR", '13.5'): (5, 4),
("QPZ", '13.5'): (12, 5),
("STR", '21'): (6, 5),
("STR", '13.5'): (12, 5),
("STR", '6'): (30, 5),
}
print(slices_number_dict)
#
dstep_from = metadata['dstep_from']
filename = metadata['filename']
dstep_to = metadata['dstep_to']
source = metadata['source']
speed = metadata['speed']
mode = metadata['mode']
slise_in_ms = 1 / int(metadata['rate']) * 1000
if hz_analysis:
slise_in_ms = 50
if muscle == 'F':
filename = filename.replace('_E_', '_F_')
standard_slice_length_in_steps = int(25 / dstep_to)
e_slices_number, f_slices_number = slices_number_dict[(mode, speed)]
#
for rat_id in range(10):
abs_filename = f"{folder}/{filename}"
# read the raw data
dataset = read_hdf5(abs_filename, rat=rat_id)
if dataset is None:
continue
dataset = subsampling(dataset, dstep_from=dstep_from, dstep_to=dstep_to)
if source == "bio":
# get the size of the data which we are waiting for
if muscle == "E":
full_size = int(e_slices_number * 25 / dstep_to)
else:
full_size = int(f_slices_number * 25 / dstep_to)
#
if fill_zeros:
prepared_data = np.array([d + [0] * (full_size - len(d)) for d in calibrate_data(dataset, source)])
else:
prepared_data = np.array([d for d in calibrate_data(dataset, source) if len(d) == full_size])
else:
# extract data of extensor
if muscle == "E":
begin = 0 * standard_slice_length_in_steps
print(begin)
end = begin + e_slices_number * standard_slice_length_in_steps
full_size = int(e_slices_number * 25 / dstep_to)
# extract data of flexor
else:
begin = standard_slice_length_in_steps * (e_slices_number + 0)
full_size = int(f_slices_number * 25 / dstep_to)
if hz_analysis:
ees_int = int(1000 / metadata['rate'])
slice_len = 50#((50 // ees_int) * ees_int)
slice_frame = int(slice_len / dstep_to)
begin = slice_frame * 3
end = begin + (7 if metadata['pedal'] == "4" else 5) * standard_slice_length_in_steps
print(begin, end)
# trim redundant simulation data
dataset = trim_data(dataset, begin, end)
prepared_data = np.array([d + [0] * (full_size - len(d)) for d in calibrate_data(dataset, source)])
# print(len(prepared_data))
if hz_analysis:
if muscle == "E":
K = []
for i in range(len(prepared_data)):
stepdata = prepared_data[i]
ees_int = int(1000 / metadata['rate'])
print(f'interval EES {ees_int}')
slice_len = ((50 // ees_int) * ees_int)
print(f'slice length {slice_len}')
slice_frame = slice_len / dstep_to
if slice_frame == 0:
slice_frame = 50 / dstep_to
print(f'slice frame {slice_frame}')
slices_begin_indexes = range(0, len(stepdata) + 1, int(slice_frame))
for beg in slices_begin_indexes:
print(f'beginings {beg}')
splitted_per_slice = [stepdata[beg:(beg + int(50 / dstep_to))] for beg in slices_begin_indexes]
print(len(splitted_per_slice))
splitted_per_slice = splitted_per_slice[:6]
# remove tails
print(list(map(len, splitted_per_slice)))
K.append(np.array(splitted_per_slice))
else:
K = []
for i in range(len(prepared_data)):
stepdata = prepared_data[i]
ees_int = int(1000 / metadata['rate'])
slice_len = ((50 // ees_int) * ees_int)
print(f'slice length {slice_len}')
slice_frame = slice_len / dstep_to
if slice_frame == 0:
slice_frame = 50 / dstep_to
slices_begin_indexes = range(0, len(stepdata) + 1, int(slice_frame))
splitted_per_slice = [stepdata[beg:(beg + int(50 / dstep_to))] for beg in slices_begin_indexes]
print(len(splitted_per_slice))
splitted_per_slice = splitted_per_slice[:2]
# remove tails
print(list(map(len, splitted_per_slice)))
K.append(np.array(splitted_per_slice))
sliced_data = np.array(K)
else:
if muscle == "E":
sliced_data = [np.array_split(beg, e_slices_number) for beg in prepared_data]
else:
sliced_data = [np.array_split(beg, f_slices_number) for beg in prepared_data]
sliced_data = np.array(sliced_data)
print(sliced_data.shape)
if len(sliced_data) == 0:
metadata['rats_data'][muscle][rat_id] = dict(data=None,
times=None,
ampls=None,
slices=None,
latency_volume=None)
continue
#
# print(sliced_data)
sliced_time, sliced_ampls, sliced_index = self._get_peaks(sliced_data, dstep_to,
[0, slise_in_ms], filter_val, debug=self.debug)
metadata['rats_data'][muscle][rat_id] = dict(data=sliced_data,
times=sliced_time,
ampls=sliced_ampls,
slices=sliced_index)
# do not calculate volume for FLEXOR (is redundant)
if muscle == 'E':
latency_volume = self.plot_density_3D(source=metadata, rats=rat_id, factor=15, only_volume=True)[0]
else:
latency_volume = None#self.plot_density_3D(source=metadata, rats=rat_id, factor=15, only_volume=True)[0]
metadata['rats_data'][muscle][rat_id]['latency_volume'] = latency_volume
#
def prepare_data(self, folder, dstep_to=None, fill_zeros=True, filter_val=0.05, hz_analysis=False):
"""
Args:
folder:
dstep_to:
fill_zeros:
filter_val:
"""
# check each .hdf5 file in the folder
for filename in [f for f in os.listdir(folder) if f.endswith('.hdf5') and '_E_' in f]:
source, muscle, mode, speed, rate, pedal, dstep = parse_filename(filename)
shortname = f"{source}_{mode}_{speed}_{rate}hz_{pedal}ped"
print(shortname)
#
if dstep_to is None:
dstep_to = dstep
# prepare the metadata dict
metadata = {
'filename': filename,
'source': source,
'muscle': muscle,
'mode': mode,
'speed': speed,
'rate': rate,
'slice_in_ms': 1 / rate * 1000,
'pedal': pedal,
'dstep_from': dstep,
'dstep_to': dstep_to,
'shortname': shortname,
'rats_data': {
'E': {},
'F': {}
}
}
# fill the metadata for each muscle (number of peaks, median height and etc)
self._lw_prepare_data(folder, 'E', metadata, fill_zeros, filter_val, hz_analysis)
self._lw_prepare_data(folder, 'F', metadata, fill_zeros, filter_val, hz_analysis)
#
print(metadata['rats_data'][muscle].keys())
any_rat = list(metadata['rats_data'][muscle].keys())[0]
metadata['slices_count'] = len(metadata['rats_data']['E'][any_rat]['data'][0])
# save metadata as pickle object (dict)
pickle_save = f"{self.pickle_folder}/{os.path.splitext(filename.replace('_E_', '_'))[0]}.pickle"
with open(pickle_save, 'wb') as handle:
pickle.dump(metadata, handle)
logging.info(pickle_save)
del metadata
@staticmethod
def _form_ticklabels(ticks_length):
"""
Form a ticklabels there are 4 ticks: begin, 1/3, 2/3 and the end
Args:
ticks_length:
Returns:
list: prepared ticklabels
"""
ytick_labels = [None] * ticks_length
yticks_indices = range(1, ticks_length + 1)
for i in [0, -1, int(1 / 3 * ticks_length), int(2 / 3 * ticks_length)]:
ytick_labels[i] = yticks_indices[i]
return ytick_labels
@staticmethod
def fft(myogram, sampling_interval, title):
min_Hz, max_Hz = 5, 250
sampling_frequency = 1000 / sampling_interval # frequency of the data [Hz]
sampling_size = len(myogram) # get size (length) of the data
# frequency domain representation
fourier_transform = np.fft.fft(myogram) / sampling_size # normalize amplitude
fourier_transform = abs(fourier_transform[range(int(sampling_size / 2))]) # exclude sampling frequency
# remove the mirrored part of the FFT
values = np.arange(int(sampling_size / 2))
time_period = sampling_size / sampling_frequency
frequencies = values / time_period
#
mask = (frequencies <= max_Hz) & (frequencies >= min_Hz)
frequencies = frequencies[mask]
fourier_transform = fourier_transform[mask]
# plot FFT
plt.close()
plt.title("FFT " + title)
plt.plot(frequencies, fourier_transform)
plt.xlabel('Frequency')
plt.ylabel('Amplitude')
plt.grid(axis='x')
plt.xlim([min_Hz, max_Hz])
xticks = np.arange(0, frequencies.max() + 1, 10)
plt.xticks(xticks)
plt.tight_layout()
plt.show()
def fft_analysis(self, source, rats, muscle='E'):
if type(source) is not Metadata:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
for rat_id in rats:
merged_myogram = metadata.get_myograms(rat_id, muscle=muscle).flatten()
self.fft(merged_myogram, metadata.dstep_to, title=f"{metadata.shortname} rat {rat_id}")
def print_metainfo(self, source, rats):
if type(source) is not Metadata:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
for muscle in ['E']:#, 'F']:
print("Filename | rat id | fMEPs | number of peaks per fMEP | median peak height | latency volume")
for rat_id in rats:
c = metadata.get_peak_counts(rat_id, border=[0, 25], muscle=muscle)
h = metadata.get_peak_median_height(rat_id, border='poly_tail', muscle=muscle)
f = metadata.get_fMEP_count(rat_id, muscle=muscle)
v = metadata.get_latency_volume(rat_id, muscle=muscle)
plt.plot(metadata.get_peak_ampls(rat_id, muscle='E', flat=True))
print(f"{metadata.shortname} _ {muscle} | {rat_id} | {f} | {c} | {h} | {v}")
x = metadata.get_peak_times(rat_id, muscle=muscle, flat=True) * metadata.dstep_to
# x = metadata.get_peak_slices(rat_id, muscle=muscle, unslice=True, flat=True)
# x = metadata.get_peak_ampls(rat_id, muscle='F', flat=True) * 100
# xx = np.linspace(min(x), max(x), 100)
# dx = st.gaussian_kde(x)(xx)
importr('ks')
data = FloatVector(x)
kde_data = r['kde'](data)
kde_data = dict(zip(kde_data.names, list(kde_data)))
ydata = kde_data["estimate"]
xdata = kde_data["eval.points"]
# plt.plot(xdata, ydata, label=metadata.shortname)
# plt.xlim(right=25) # adjust the right leaving left unchanged
# plt.xlim(left=0) # adjust the right leaving left unchanged
# modes = np.array(self._find_extrema(dx, np.greater)[0]) * 25 / 100
# distr = {1: 'uni', 2: 'bi'}
# print(f"{metadata.shortname} #{rat_id} ({distr.get(len(modes), 'multi')}modal): {modes} ms")
# plt.plot(xx, dx, label=metadata.shortname)
print("- " * 10)
def plot_fMEP_boxplots(self, source, borders, rats=None, show=False, slice_ms=None):
if type(source) is not Metadata:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
speed = metadata.speed
dstep_to = metadata.dstep_to
shortname = metadata.shortname
if slice_ms is None:
slice_in_ms = metadata.slice_in_ms
else:
slice_in_ms = slice_ms
# plot per each rat
for rat_id in rats:
rat_myograms = metadata.get_myograms(rat_id, muscle='E')
rat_peak_times = metadata.get_peak_times(rat_id, muscle='E')
total_rat_steps = rat_myograms.shape[0]
total_slices = rat_myograms.shape[1]
total_datasteps = rat_myograms.shape[2]
plt.close()
if speed == "6":
fig, ax = plt.subplots(figsize=(20, 20))
elif speed == "13.5":
fig, ax = plt.subplots(figsize=(16, 12))
else:
fig, ax = plt.subplots(figsize=(16, 8))
colors = iter(["#275b78", "#f2aa2e", "#a6261d", "#472650"] * total_rat_steps)
xticks = np.arange(total_datasteps) * dstep_to
# plot sliced myogram data
for myogram_fMEP in rat_myograms:
color = next(colors)
for slice_index, slice_data in enumerate(myogram_fMEP):
plt.plot(xticks, np.array(slice_data) + slice_index, alpha=0.5, color=color, zorder=1)
# for each border (if it is a list of lists) find peaks inside and form boxplots
if type(borders[0]) is not list:
borders = [borders]
for border in borders:
# meta info about percent of existing at least on peak in the border
passed = 0
alles = total_rat_steps * total_slices
# prepare lists for boxplots forming
sliced_x = [[] for _ in range(total_slices)]
# find peaks and plot them
for myogram_fMEP, time_per_step in zip(rat_myograms, rat_peak_times):
for slice_index, (slice_data, peak_time_per_slice) in enumerate(zip(myogram_fMEP, time_per_step)):
# # raw data before filtering
peaks_time = np.array(peak_time_per_slice) * dstep_to
peaks_value = np.array(slice_data)[peak_time_per_slice] + slice_index
# get peaks only inside the borders
filter_mask = (border[0] <= peaks_time) & (peaks_time <= border[1])
# filter data
peaks_time = peaks_time[filter_mask]
peaks_value = peaks_value[filter_mask]
# # plot peaks if not void
if len(peaks_time):
passed += 1
sliced_x[slice_index] += list(peaks_time)
plt.plot(peaks_time, peaks_value, '.', c='k', zorder=3, ms=4)
# plot boxplots
for i, x in enumerate(sliced_x):
if len(x):
bx = plt.boxplot(x, positions=[i], widths=0.8, whis=[10, 90],
showfliers=False, patch_artist=True, vert=False, zorder=5)
starts = bx['whiskers'][0].get_xdata()[1]
plt.text(x=starts - 1.5, y=i + 0.2, s=f"{starts:.1f}", fontsize=25)
self._recolor(bx, color="#287a72", fill_color="#287a72", fill_alpha=0.2)
logging.info(f"{shortname}, rat {rat_id}, {passed / alles * 100:.1f}% of peaks prob. at {border}ms")
save_filename = f"{shortname}_{rat_id}_fMEP_boxplot"
plt.grid(which='both', axis='x')
self.axis_article_style(ax, axis='x')
plt.yticks(range(0, total_slices), self._form_ticklabels(total_slices), fontsize=30)
plt.xlim(0, 25)
plt.tight_layout()
plt.savefig(f"{self.plots_folder}/{save_filename}.pdf", format="pdf")
if show:
plt.show()
plt.close()
def plot_amp_boxplots(self, source, borders, rats=None, show=False, slice_ms=None):
if type(source) is not Metadata:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
speed = metadata.speed
dstep_to = metadata.dstep_to
shortname = metadata.shortname
if slice_ms is None:
slice_in_ms = metadata.slice_in_ms
else:
slice_in_ms = slice_ms
if speed == "6":
allr = [[] for _ in range(30)]
elif speed == "13.5":
if metadata.speed == "AIR":
allr = [[] for _ in range(5)]
elif metadata.speed == "TOE":
allr = [[] for _ in range(8)]
else:
allr = [[] for _ in range(12)]
else:
allr = [[] for _ in range(6)]
# plot per each rat
for rat_id in rats:
rat_myograms = metadata.get_myograms(rat_id, muscle='E')
rat_peak_ampls = metadata.get_peak_ampls(rat_id, muscle='E')
rat_peak_times = metadata.get_peak_times(rat_id, muscle='E')
total_rat_steps = rat_myograms.shape[0]
total_slices = rat_myograms.shape[1]
total_datasteps = rat_myograms.shape[2]
#
plt.close()
if speed == "6":
fig, ax = plt.subplots(figsize=(20, 20))
elif speed == "13.5":
fig, ax = plt.subplots(figsize=(16, 12))
else:
fig, ax = plt.subplots(figsize=(16, 8))
colors = iter(["#275b78", "#f2aa2e", "#a6261d", "#472650"] * total_rat_steps)
xticks = np.arange(total_datasteps) * dstep_to
# plot sliced myogram data
# for myogram_fMEP in rat_myograms:
# color = next(colors)
# for slice_index, slice_data in enumerate(myogram_fMEP):
# plt.plot(xticks, np.array(slice_data) + slice_index, alpha=0.5, color=color, zorder=1)
# for each border (if it is a list of lists) find peaks inside and form boxplots
if type(borders[0]) is not list:
borders = [borders]
for border in borders:
# meta info about percent of existing at least on peak in the border
passed = 0
alles = total_rat_steps * total_slices
# prepare lists for boxplots forming
sliced_x = [[] for _ in range(total_slices)]
# find peaks and plot them
for myogram_fMEP, time_per_step, amp_per_step in zip(rat_myograms, rat_peak_times, rat_peak_ampls):
for slice_index, (slice_data, peak_time_per_slice, peak_amp_per_step) in enumerate(zip(myogram_fMEP, time_per_step, amp_per_step)):
# # raw data before filtering
peaks_time = np.array(peak_time_per_slice) * dstep_to
peaks_amp = np.array(peak_amp_per_step) #* dstep_to
# peaks_value = np.array(slice_data)[peak_time_per_slice] + slice_index
# # get peaks only inside the borders
filter_mask = (border[0] <= peaks_time) & (peaks_time <= border[1])
# filter data
peaks_time = peaks_time[filter_mask]
peaks_amp = peaks_amp[filter_mask]
# peaks_value = peaks_value[filter_mask]
# # plot peaks if not void
if len(peaks_time):
passed += 1
sliced_x[slice_index] += list(peaks_amp)
allr[slice_index] += list(peaks_amp)
# plt.plot(peaks_time, peaks_value, '.', c='k', zorder=3, ms=4)
# plot boxplots
# np.append(allr, sliced_x, axis=1)
for i, x in enumerate(sliced_x):
if len(x):
bx = plt.boxplot(x, positions=[i], widths=0.8, whis=[10, 90],
showfliers=False, patch_artist=True, vert=False, zorder=5)
starts = bx['whiskers'][0].get_xdata()[1]
# plt.text(x=starts - 1.5, y=i + 0.2, s=f"{starts:.1f}", fontsize=25)
self._recolor(bx, color="#124460", fill_color="#124460", fill_alpha=0.2)
# logging.info(f"{shortname}, rat, {passed / alles * 100:.1f}% of peaks prob. at {border}ms")
save_filename = f"{shortname}_{rat_id}_amp_boxplot"
# plt.grid(which='both', axis='x')
self.axis_article_style(ax, axis='x')
# print(medianx)
# plt.yticks(range(0, total_slices), self._form_ticklabels(total_slices), fontsize=30)
# plt.xlim(0, 1)
plt.tight_layout()
plt.savefig(f"{self.plots_folder}/{save_filename}.pdf", format="pdf")
lefty = []
leftx = []
righty = []
rightx = []
medianx = []
mediany = []
print(len(allr))
plt.close()
for i, x in enumerate(allr):
if len(x):
bx = plt.boxplot(x, positions=[i], widths=0.2, whis=[10, 90],
showfliers=False, patch_artist=True, vert=False, zorder=5)
starts = bx['whiskers'][0].get_xdata()[1]
medianx.append(bx['medians'][0].get_xdata()[0])
mediany.append(bx['medians'][0].get_ydata()[0])
leftx.append(bx['whiskers'][0].get_xdata()[1])
lefty.append(bx['whiskers'][0].get_ydata()[1])
rightx.append(bx['whiskers'][1].get_xdata()[1])
righty.append(bx['whiskers'][1].get_ydata()[1])
# plt.text(x=starts - 1.5, y=i + 0.2, s=f"{starts:.1f}", fontsize=25)
self._recolor(bx, color="#227872", fill_color="#227872", fill_alpha=0.2)
# logging.info(f"{shortname}, rat, {passed / alles * 100:.1f}% of peaks prob. at {border}ms")
save_filename = f"{shortname}_amp_boxplot"
# plt.grid(which='both', axis='x')
self.axis_article_style(ax, axis='x')
# print(medianx)
plt.plot(medianx,mediany)
plt.plot(leftx,lefty)
plt.plot(rightx,righty)
# plt.yticks(range(0, total_slices), self._form_ticklabels(total_slices), fontsize=30)
# plt.xlim(0, 1)
plt.tight_layout()
plt.savefig(f"{self.plots_folder}/{save_filename}.pdf", format="pdf")
if show:
plt.show()
plt.close()
# ar1 = np.array([allr[0]])
# ratsall = np.hstack(([allr[0]], [allr[2]],[allr[2]]))
print(allr)
@staticmethod
def joint_plot(X, Y, ax, gs, borders, **kwargs):
"""
TODO: add docstring
Args:
X (np.ndarray):
Y (np.ndarray):
ax:
gs:
borders:
**kwargs:
"""
color = kwargs['color']
xmin, xmax, ymin, ymax = borders
if kwargs['with_boxplot']:
pos = kwargs['pos']
# create X-marginal (top)
ax_top = plt.subplot(gs[0, 1])
ax_top.spines['top'].set_visible(False)
ax_top.spines['right'].set_visible(False)
# create Y-marginal (right)
ax_right = plt.subplot(gs[1, 2])
ax_right.spines['top'].set_visible(False)
ax_right.spines['right'].set_visible(False)
ax_left = plt.subplot(gs[1, 0])
ax_left.spines['top'].set_visible(False)
ax_left.spines['right'].set_visible(False)
ax_bottom = plt.subplot(gs[2, 1])
ax_bottom.spines['top'].set_visible(False)
ax_bottom.spines['right'].set_visible(False)
flierprops = dict(marker='.', markersize=1, linestyle='none')
bxt = ax_left.boxplot(Y, positions=[pos * 10], widths=3, patch_artist=True, flierprops=flierprops)
recolor(bxt, 'k', color)
ax_left.set_ylim([ymin, ymax])
ax_left.set_xticks([])
bxt = ax_bottom.boxplot(X, positions=[pos], widths=0.4, vert=False, patch_artist=True,
flierprops=flierprops)
recolor(bxt, 'k', color)
ax_bottom.set_xlim([xmin, xmax])
ax_bottom.set_yticks([])
else:
ax_top = plt.subplot(gs[0, 0])
ax_top.spines['top'].set_visible(False)
ax_top.spines['right'].set_visible(False)
# create Y-marginal (right)
ax_right = plt.subplot(gs[1, 1])
ax_right.spines['top'].set_visible(False)
ax_right.spines['right'].set_visible(False)
# add grid
ax_top.grid(which='minor', axis='x')
ax_right.grid(which='minor', axis='y')
# gaussian_kde calculation
xx = np.linspace(xmin, xmax, 100)
yy = np.linspace(ymin, ymax, 100)
dx = st.gaussian_kde(X)(xx)
dy = st.gaussian_kde(Y)(yy)
ax_top.plot(xx, dx, color=color)
ax_right.plot(dy, yy, color=color)
# plt.plot([0], [kwargs['k']], 'D', color=color, ms=10)
ax_top.set_yticks([])
ax_right.set_xticks([])
return ax_top, ax_right
def plot_mono_Hz(self, source, rats):
""""""
if type(source) is not Metadata:
if type(source) is dict:
metadata = Metadata(sdict=source)
else:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
shortname = metadata.shortname
print(shortname)
#
slice_length = 50
ees = int(1 / metadata.rate * 1000)
# process each rat's data
for rat_id in rats:
print(f" rat ID {rat_id} (MONO)".center(30, "-"))
T = metadata.get_peak_times(rat_id, muscle='E', flat=True) * metadata.dstep_to
A = metadata.get_peak_ampls(rat_id, muscle='E', flat=True)
S = metadata.get_peak_slices(rat_id, muscle='E', flat=True)
D = metadata.get_myograms(rat_id, muscle='E')
# collect peaks' amplitudes which located inside of mono
monos = []
# plot myogram
colors = iter(["#275b78", "#f2aa2e", "#a6261d", "#472650"] * 100)
for exp in D:
for i, data in enumerate(exp):
color = next(colors)
plt.plot(np.arange(len(data)) * metadata.dstep_to, data + i, color=color)
plt.plot(T, S, '.', c='k')
# process each mono after EES
for i in range(0, slice_length, ees):
start = i + 3
end = i + 7.5
mask_inside_mono = (start <= T) & (T <= end)
monos.append(A[mask_inside_mono])
x, y = T[mask_inside_mono], S[mask_inside_mono]
plt.axvspan(xmin=start, xmax=end, alpha=0.5)
plt.plot(x, y, '.', color='r', ms=10)
# show ratio of average ampls
for i in range(1, len(monos)):
print(f"mono #{i} with #0: avg ampls ratio "
f"{np.median(monos[i]) / np.median(monos[0]):.3f}\t"
f"({np.median(monos[i]):.3f} / {np.median(monos[0]):.3f})")
plt.show()
@staticmethod
def is_inside(points, rc, rx, ry, angle=0):
cos_angle = np.cos(np.radians(180 - angle))
sin_angle = np.sin(np.radians(180 - angle))
xc = points[:, 0] - rc[0]
yc = points[:, 1] - rc[1]
xct = xc * cos_angle - yc * sin_angle
yct = xc * sin_angle + yc * cos_angle
rad_cc = (xct ** 2 / rx ** 2) + (yct ** 2 / ry ** 2)
return rad_cc <= 1
def ellipse_form(self, meta_ellipse):
"""
create a shapely ellipse. adapted from
https://gis.stackexchange.com/a/243462
"""
ell_c, ell_w, ell_h, ell_angle = meta_ellipse
# create ellipse
circ = Point(ell_c).buffer(1)
ell = affinity.scale(circ, ell_w, ell_h)
ellipse = affinity.rotate(ell, ell_angle)
# form polygon for drawing
verts = np.array(ellipse.exterior.coords.xy)
patch = Polygon(verts.T, alpha=0.5)
return ellipse, patch
def plot_poly_Hz(self, source, rats):
""""""
if type(source) is not Metadata:
if type(source) is dict:
metadata = Metadata(sdict=source)
else:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
shortname = metadata.shortname
ell_width = 25
ell_height = 6
# ellipse form
rx = ell_width / 2
ry = ell_height / 2
print(shortname)
slice_length = 50
ees = int(1 / metadata.rate * 1000)
#
for rat_id in rats:
print(f" rat ID {rat_id} (POLY)".center(30, "-"))
ellipses = []
#
plt.figure(figsize=(10, 5))
T = metadata.get_peak_times(rat_id, muscle='E', flat=True)
A = metadata.get_peak_ampls(rat_id, muscle='E', flat=True)
S = metadata.get_peak_slices(rat_id, muscle='E', flat=True)
D = metadata.get_myograms(rat_id, muscle='E')
# process ellipses after each EES
for i in range(0, slice_length, ees):
print(i)
mono_start = i + 3
mono_end = i + 7
rc = (mono_end + 1 + ell_width / 2, 2.5)
print(mono_end)
# find peaks (time, slice index, ampl) inside ellipse
points = np.vstack((T * metadata.dstep_to, S, A)).T
mask_inside = self.is_inside(points, rc=rc, rx=rx, ry=ry)
points = points[mask_inside]
ampls = A[mask_inside]
plt.axvspan(xmin=mono_start, xmax=mono_end, alpha=0.5, color='#472650')
# remove points inside mono answers
for time in range(0, slice_length, ees):
mask_outside_mono = (points[:, 0] < (time + 3)) | ((time + 7.5) < points[:, 0])
points = points[mask_outside_mono]
ampls = ampls[mask_outside_mono]
if len(points) == 0:
continue
# ell = Ellipse(xy=rc, width=rx * 2, height=ry * 2, alpha=0.3, edgecolor='k')
# plt.gca().add_artist(ell)
ellipses.append((rc, rx, ry, 0, points, ampls))
print(f"Ellipse #{int(i / ees)} {rc, rx, ry, 0} ampls avg: {np.mean(ampls):.3f}")
# plot mono area
print(mono_end)
plt.plot(points[:, 0], points[:, 1], '.', c='#a6261d', ms=10)
if len(ellipses) == 1:
ellipse, patch = self.ellipse_form(ellipses[0][:4])
plt.gca().add_patch(patch)
else:
for i in range(len(ellipses) - 1):
# first ellipse
meta_ell1 = ellipses[i]
ell_polygon1, patch1 = self.ellipse_form(meta_ell1[:4])
plt.gca().add_patch(patch1)
# second ellipse
meta_ell2 = ellipses[i + 1]
ell_polygon2, patch2 = self.ellipse_form(meta_ell2[:4])
plt.gca().add_patch(patch2)
# intersect
intersect = ell_polygon1.intersection(ell_polygon2)
if intersect:
verts3 = np.array(intersect.exterior.coords.xy)
patch3 = Polygon(verts3.T, facecolor='none', edgecolor='black')
plt.gca().add_patch(patch3)
mask_common = (meta_ell1[4][:, None] == meta_ell2[4]).all(-1).any(-1)
avg_ampls = np.mean(meta_ell1[5][mask_common])
print(f'area of intersect (#{i} and #{i + 1}): {intersect.area:.3f}, avg ampl in intersect: {avg_ampls:.3f}')
# just plot all peaks
colors = iter(["#275b78", "#f2aa2e", "#a6261d", "#472650", "#287a72"] * 100)
for exp, texp in zip(D, metadata.get_peak_times(rat_id, muscle='E')):
for islice, (data, tdata) in enumerate(zip(exp, texp)):
color = next(colors)
plt.plot(np.arange(len(data)) * metadata.dstep_to, data + islice, color=color)
if tdata:
tdata = np.array(tdata)
plt.plot(tdata * metadata.dstep_to, data[tdata] + islice, '.', c='k', ms=4, zorder=4)
plt.xlim(0, 50)
plt.ylim(-1, 6)
plt.tight_layout()
#plt.show()
save_filename = f"{shortname}_hzs.pdf"
plt.savefig(f"{self.plots_folder}/{save_filename}", dpi=250, format="pdf")
plt.close()
def plot_density_3D(self, source, rats, factor=8, show=False, only_volume=False, slice_ms=25):
"""
Args:
source:
rats (int or tuple):
factor:
show:
only_volume:
Returns:
"""
if type(source) is not Metadata:
if type(source) is dict:
metadata = Metadata(sdict=source)
else:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
shortname = metadata.shortname
volumes = []
#
for rat_id in rats:
X = metadata.get_peak_ampls(rat_id, muscle='E', flat=True)
Y = metadata.get_peak_slices(rat_id, muscle='E', flat=True)
times = metadata.get_peak_times(rat_id, muscle='E', flat=True) * metadata.dstep_to
mask = (times <= 3) | (8 <= times)
X = X[mask]
Y = Y[mask]
print(X)
print(Y)
zip_iterator = zip(Y, X)
a_dictionary = dict(zip_iterator)
print(a_dictionary)
save_filename = f"{shortname}_3D_rat={rat_id}"
# form a mesh grid
xmax, ymax = 1, max(Y)
xborder_l, xborder_r = 0, 1
gridsize_x, gridsize_y = factor * xmax, factor * ymax
xmesh, ymesh = np.meshgrid(np.linspace(0, xmax, gridsize_x),
np.linspace(0, ymax, gridsize_y))
xmesh = xmesh.T
ymesh = ymesh.T
# re-present grid in 1D and pair them as (x1, y1 ...)
positions = np.vstack([xmesh.ravel(), ymesh.ravel()])
values = np.vstack((X, Y))
# use a Gaussian KDE
a = st.gaussian_kde(values)(positions).T
# re-present grid back to 2D
z = np.reshape(a, xmesh.shape)
# set the mid isoline (2/3)
if any(n in shortname for n in ["AIR", "TOE", "PLT"]):
z_mid = (np.max(z) + np.min(z)) / 2
else:
z_mid = (np.max(z) + np.min(z)) / 3 * 2
conty_ymax = -np.inf
conty_ymin = np.inf
for i, cont in enumerate(plt.contour(xmesh, ymesh, z, levels=10, alpha=0).allsegs[::-1]):
if cont:
contour = max(cont, key=np.size)
for islice in range(ymax + 1):
mask = contour[:, 1].astype(int) == islice
if any(mask):
# print(f"slice {islice}, ampl = {contour[mask, 0][-1]}")
contr = np.array(contour[mask, 0][-1])
# print(contr)
print(f"{contour[mask, 0][-1]:.3f}", end='\t')
else:
print(0, end='\t')
print()
else:
print("empty contour")
print("=====")
# plt.plot(contr)
# plt.show()
mid_contours = plt.contour(xmesh, ymesh, z, levels=[z_mid], alpha=0).allsegs[0]
for contour in mid_contours:
for x, y in contour:
if xborder_l <= x <= xborder_r:
if y > conty_ymax:
conty_ymax = y
if y < conty_ymin:
conty_ymin = y
# clip data by time [5, 20] and slices [contour ymin, contour ymax]
xslice = slice(xborder_l * factor, xborder_r * factor)
yslice = slice(int(round(conty_ymin * factor)), int(round(conty_ymax * factor)))
zclip = z[xslice, yslice]
# filter clipped data that lower than 2/3 isoline
zunder = zclip[zclip <= z_mid]
# calculate a volune
cellsize = xmesh[1][0] * ymesh[0][1]
zvol = np.sum(np.abs(z_mid - zunder)) * cellsize
if only_volume:
volumes.append(zvol)
continue
surface = go.Surface(contours=dict(z={"show": True,
"start": np.min(z) - 0.00001,
"end": np.max(z) + 0.00001,
"size": (np.max(z) - np.min(z)) / 16,
'width': 1,
"color": "gray"}),
x=xmesh,
y=ymesh,
z=z,
opacity=1)
# left plane of time border [5]
plane1 = go.Surface(x=[xborder_l, xborder_l],
y=[0, ymax],
z=[[np.min(z), np.max(z)], [np.min(z), np.max(z)]],
showscale=False, surfacecolor=[0] * 4, opacity=0.7, cmax=1, cmin=0)
# right plane of time border [20]
plane2 = go.Surface(x=[xborder_r, xborder_r],
y=[0, ymax],
z=[[np.min(z), np.max(z)], [np.min(z), np.max(z)]],
showscale=False, surfacecolor=[0] * 4, opacity=0.7, cmax=1, cmin=0)
# bottom plane of slice border
plane3 = go.Surface(x=[xborder_l, xborder_r],
y=[conty_ymin, conty_ymin],
z=[[np.min(z), np.min(z)], [np.max(z), np.max(z)]],
showscale=False, surfacecolor=[0] * 4, opacity=0.7, cmax=1, cmin=0)
# top plane of slice border
plane4 = go.Surface(x=[xborder_l, xborder_r],
y=[conty_ymax, conty_ymax],
z=[[np.min(z), np.min(z)], [np.max(z), np.max(z)]],
showscale=False, surfacecolor=[0] * 4, opacity=0.7, cmax=1, cmin=0)
# form data pack to visualize all in one axes
data = [surface]#, plane1, plane2, plane3, plane4]
# plot isoline
for contour in mid_contours:
data.append(go.Scatter3d(x=contour[:, 0], y=contour[:, 1], z=[z_mid] * len(contour[:, 0]),
line=dict(color='#000000', width=6), mode='lines', showlegend=False))
# plot dots under isoline
data.append(go.Scatter3d(x=xmesh[xslice, yslice][zclip <= z_mid].ravel(), # X under isoline
y=ymesh[xslice, yslice][zclip <= z_mid].ravel(), # Y under isoline
z=zunder.ravel(), # Z under isoline
mode='markers', marker=dict(size=2, color=['rgb(0,0,0)'] * len(zunder.ravel()))))
# plot all
fig = go.Figure(data=data)
# change a camera view and etc
fig.update_layout(title=f'{shortname} | RAT {rat_id} | V: {zvol:.3f}',
autosize=False,
width=1000,
height=800,
scene_camera=dict(
up=dict(x=0, y=0, z=1),
eye=dict(x=-1.25, y=-1.25, z=1.25)
),
scene=dict(
xaxis=dict(title_text="Time, ms",
titlefont=dict(size=30),
ticktext=list(range(26))),
yaxis=dict(title_text="Slices",
titlefont=dict(size=30),
tickvals=list(range(ymax + 1)),
ticktext=list(range(1, ymax + 2))),
aspectratio={"x": 1, "y": 1, "z": 0.5}
))
py.plot(fig, validate=False,
filename=f"{self.plots_folder}/{save_filename}.html",
auto_open=show)
if only_volume:
return volumes
def plot_shadow_slices(self, source, rats=None, only_extensor=False, add_kde=False, show=False):
shadow_color = "#472650"
kde_color = "#275b78"
k_fliers_high, k_fliers_low = 5, 6
if type(source) is not Metadata:
metadata = Metadata(self.pickle_folder, source)
else:
metadata = source
if rats is None or rats is all:
rats = metadata.get_rats_id()
if type(rats) is int:
rats = [rats]
shortname = metadata.shortname
dstep_to = metadata.dstep_to
speed = metadata.speed
slice_in_ms = 1 / 40 * 1000
if speed == "6":
figsize = (20, 20)
elif speed == "13.5":
figsize = (16, 12)
else:
figsize = (16, 8)
#
for rat_id in rats:
extensor_data = metadata.get_myograms(rat_id, muscle='E')
# check rat's flexor, in some cases there are no data
flexor_flag = rat_id in metadata.get_rats_id(muscle='F') and not only_extensor
# get number of slices per muscle
e_slices_number = len(extensor_data[0])
steps_in_slice = len(extensor_data[0][0])
# calc boxplots of original data ()
e_boxplots = get_boxplots(extensor_data)
# combine data into one list
plt.close('all')
fig, ax = plt.subplots(figsize=figsize)
yticks = []
f_slices_number = 0 # init flexor number of slices
shared_x = np.arange(steps_in_slice) * dstep_to
# plot extensor
for slice_index, data in enumerate(e_boxplots):
# set ideal or median
ideal_data = extensor_data[0][slice_index] + slice_index
data += slice_index
# fliers shadow
ax.fill_between(shared_x, data[:, k_fliers_high], data[:, k_fliers_low],
color=shadow_color, alpha=0.7, zorder=3)
# ideal pattern
ax.plot(shared_x, ideal_data, color='k', linewidth=2, zorder=4)
yticks.append(ideal_data[0])
# plot.extensor_data[:, slice_index]
# for exp_data in extensor_data:
# ax.plot(shared_x, exp_data[slice_index] + slice_index, color='r', linewidth=1, zorder=4)
if flexor_flag:
flexor_data = metadata.get_myograms(rat_id, muscle='F')
f_slices_number = len(flexor_data[0])
# print(flexor_data)
#
# print(len(flexor_data[0]))
f_boxplots = get_boxplots(flexor_data)
# plot flexor
for slice_index, data in enumerate(f_boxplots):
# set ideal or median
ideal_data = flexor_data[0][slice_index] + slice_index + e_slices_number + 2
data += slice_index + e_slices_number + 2
# fliers shadow
ax.fill_between(shared_x, data[:, k_fliers_high], data[:, k_fliers_low],
color=shadow_color, alpha=0.7, zorder=3)
# ideal pattern
ax.plot(shared_x, ideal_data, color='k', linewidth=2, zorder=4)
yticks.append(ideal_data[0])
if add_kde:
x = metadata.get_peak_times(rat_id, muscle='E', flat=True) * dstep_to
y = metadata.get_peak_slices(rat_id, muscle='E', flat=True)
borders = 0, slice_in_ms, -1, e_slices_number
self._contour_plot(x=x, y=y, color=kde_color, ax=ax, z_prev=[0], borders=borders, levels_num=15, addtan=False)
# ax.plot(x, y, '.', c='k', zorder=3, ms=4)
if flexor_flag:
'''flexor'''
x = metadata.get_peak_times(rat_id, muscle='F', flat=True) * dstep_to
y = metadata.get_peak_slices(rat_id, muscle='F', flat=True) + e_slices_number + 2
borders = 0, slice_in_ms, e_slices_number, e_slices_number + f_slices_number + 2
self._contour_plot(x=x, y=y, color=kde_color, ax=ax, z_prev=[0], borders=borders, levels_num=15)
# form ticks
self.axis_article_style(ax, axis='x')
slices_number = e_slices_number
if flexor_flag:
slices_number += f_slices_number
plt.yticks(yticks, self._form_ticklabels(slices_number), fontsize=30)
plt.xlim(0, slice_in_ms)
plt.tight_layout()
save_filename = f"{shortname}_{rat_id}_sliced.pdf"
plt.savefig(f"{self.plots_folder}/{save_filename}", dpi=250, format="pdf")
if show:
plt.show()
plt.close()
logging.info(f"{shortname}, rat {rat_id} "
f"{'' if flexor_flag else 'WITHOUT FLEXOR'} "
f"saved to {self.plots_folder}/{save_filename}")
@staticmethod
def axis_article_style(ax, axis='both', auto_nbins=False, xshift=None, xmin=None, xmax=None):
"""
ToDo add info
Args:
ax (matplotlib.axes): currect figure axes
axis (str): which axes change, both -- all
auto_nbins (bool):
xshift (None or float): offset of xticklabels
xmin (None or float): set xlim for minimum
xmax (None or float): set xlim for maximum
"""
# hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# make ticks more visible
ax.tick_params(which='major', length=10, width=3, labelsize=20)
ax.tick_params(which='minor', length=4, width=2, labelsize=20)
# set automatical locator for chosen axis
if axis == 'x' or axis == 'both':
ax.xaxis.set_minor_locator(MultipleLocator())
if auto_nbins:
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
else:
ax.xaxis.set_major_locator(MaxNLocator(nbins=len(ax.get_xticks()), integer=True))
if xshift or xmin or xmax:
if xshift:
xticks = (ax.get_xticks() + 1).astype(int)
else:
xticks = ax.get_xticks()
xmax = np.inf if xmax is None else xmax
xmin = -np.inf if xmin is None else xmin
xticklabels = [int(label) if xmin <= label <= xmax else "" for label in xticks]
ax.set_xticklabels(xticklabels)
if axis == 'y' or axis == 'both':
ax.yaxis.set_major_locator(MaxNLocator(nbins=len(ax.get_yticks()), integer=True))
ax.yaxis.set_minor_locator(MultipleLocator(base=(ax.get_yticks()[1] - ax.get_yticks()[0]) / 2))
@staticmethod
def _shortname(name):
"""
Args:
name (str):
Returns:
"""
source, muscle, mode, speed, rate, pedal, stepsize = parse_filename(name)
return f"{source}_{muscle}_{mode}_{speed}_{pedal}ped"
@staticmethod
def _ecdf(sample):
# convert sample to a numpy array, if it isn't already
sample = np.array(sample)
# find the unique values and their corresponding counts
quantiles, counts = np.unique(sample, return_counts=True)
# take the cumulative sum of the counts and divide by the sample size to
# get the cumulative probabilities between 0 and 1
cumul_prob = np.cumsum(counts).astype(np.double) / sample.size
return quantiles, cumul_prob
@staticmethod
def _find_extrema(array, condition):
"""
Advanced wrapper of numpy.argrelextrema
Args:
array (np.ndarray): data array
condition (np.ufunc): e.g. np.less (<), np.great_equal (>=) and etc.
Returns:
np.ndarray: indexes of extrema
np.ndarray: values of extrema
"""
# get indexes of extrema
indexes = argrelextrema(array, condition)[0]
# in case where data line is horisontal and doesn't have any extrema -- return None
if len(indexes) == 0:
return None, None
# get values based on found indexes
values = array[indexes]
# calc the difference between nearby extrema values
diff_nearby_extrema = np.abs(np.diff(values, n=1))
# form indexes where no twin extrema (the case when data line is horisontal and have two extrema on borders)
indexes = np.array([index for index, diff in zip(indexes, diff_nearby_extrema) if diff > 0] + [indexes[-1]])
# get values based on filtered indexes
values = array[indexes]
return indexes, values
@staticmethod
def _list3d(h, w):
return [[[] for _ in range(w)] for _ in range(h)]
def _get_peaks(self, sliced_datasets, dstep, borders, filter_val, tails=False, debug=False):
"""
Finds all peaks times and amplitudes at each slice
Args:
sliced_datasets (np.ndarray):
dstep (float): data step size
borders (list): time borders for searching peaks
filter_val (float): default is 0.028 but can be changed
tails (bool): move the peaks of first 3 ms to the previous slice
debug (bool): debugging flag
Returns:
list: 3D list of peak times [experiment_index][slice_index][peak times]
list: 3D list of peak ampls [experiment_index][slice_index][peak ampls]
list: 3D list of peak slices [experiment_index][slice_index][peak slices indices]
"""
if type(sliced_datasets) is not np.ndarray:
raise TypeError("Non valid type of data - use only np.ndarray")
# form parameters for filtering peaks
min_ampl = 0.3
min_dist = int(0.2 / dstep) # 0.15
max_dist = int(4 / dstep)
# interpritate shape of dataset
tests_count, slices_count, slice_length = sliced_datasets.shape
peak_per_slice_list = self._list3d(h=tests_count, w=slices_count)
ampl_per_slice_list = self._list3d(h=tests_count, w=slices_count)
peak_slice_num_list = self._list3d(h=tests_count, w=slices_count)
# find all peaks times and amplitudes per slice
if debug:
plt.figure(figsize=(16, 9))
for experiment_index, slices_data in enumerate(sliced_datasets):
# combine slices into one myogram
y = np.array(slices_data).ravel()
# find all extrema
e_maxima_indexes, e_maxima_values = self._find_extrema(y, np.greater)
e_minima_indexes, e_minima_values = self._find_extrema(y, np.less)
# start pairing extrema from maxima
if e_minima_indexes[0] < e_maxima_indexes[0]:
comb = list(zip(e_maxima_indexes, e_minima_indexes[1:]))
combA = list(zip(e_maxima_values, e_minima_values[1:]))
else:
comb = list(zip(e_maxima_indexes, e_minima_indexes))
combA = list(zip(e_maxima_values, e_minima_values))
# # process each extrema pair
if debug:
xticks = np.arange(len(y)) * dstep
plt.plot(xticks, y, color='k')
plt.plot(e_maxima_indexes * dstep, e_maxima_values, '.', color='r')
plt.plot(e_minima_indexes * dstep, e_minima_values, '.', color='b')
per_dT = np.percentile(np.abs(np.diff(np.array(comb))) * dstep, q=[25, 50, 75])
per_dA = np.percentile(np.abs(np.diff(np.array(combA))), q=[25, 50, 75])
for max_index, min_index in comb:
max_value = e_maxima_values[e_maxima_indexes == max_index][0]
min_value = e_minima_values[e_minima_indexes == min_index][0]
dT = abs(max_index - min_index)
dA = abs(max_value - min_value)
# check the difference between maxima and minima
if (min_dist <= dT <= max_dist) and dA >= filter_val or dA >= min_ampl:
slice_index = int(max_index // slice_length)
peak_time = max_index - slice_length * slice_index
# change slice index for "tails" peaks
if tails and peak_time * dstep <= 3 and slice_index > 0:
slice_index -= 1
peak_time = max_index - slice_length * slice_index
if debug:
plt.plot(max_index * dstep, y[max_index], '.', color='k')
plt.text(max_index * dstep, y[max_index], f"({peak_time * dstep:.1f}, {slice_index})")
if borders[0] <= peak_time * dstep < borders[1]:
peak_per_slice_list[experiment_index][slice_index].append(peak_time)
ampl_per_slice_list[experiment_index][slice_index].append(dA)
peak_slice_num_list[experiment_index][slice_index].append(slice_index)
if debug:
plt.plot([max_index * dstep, min_index * dstep], [max_value, max_value], ls='--', color='k')
plt.plot([min_index * dstep, min_index * dstep], [max_value, min_value], ls='--', color='k')
plt.plot(max_index * dstep, max_value, '.', color='r', ms=15)
plt.plot(min_index * dstep, min_value, '.', color='b', ms=15)
plt.text(max_index * dstep, max_value + 0.05,
f"dT {(min_index - max_index) * dstep:.1f}\ndA {dA:.2f}", fontsize=10)
if debug:
plt.show()
return peak_per_slice_list, ampl_per_slice_list, peak_slice_num_list
@staticmethod
def _example_bio_sample(folder, filename):
"""
Return y-data of best bio sample. File must exists
Args:
folder (str): current folder with hdf5 files and best sample
filename (str): best sample filename
Returns:
np.ndarray: y-data of best sample
"""
raise NotImplemented
best_samplse_filename = f"{folder}/best_samples/{filename.replace('.hdf5', '')}"
print(best_samplse_filename)
if not os.path.exists(best_samplse_filename):
# raise Exception(f"Where is best sample for bio data?! I can't find it here '{folder}'")
ideal_sample = np.array([[0] * 250 for _ in range(22)])
return ideal_sample
bio_ideal_y_data = []
# collect extensor data
with open(best_samplse_filename) as file:
for d in file.readlines():
bio_ideal_y_data.append(list(map(float, d.split())))
# collect flexor_data
with open(best_samplse_filename.replace('e_', 'f_')) as file:
for d in file.readlines():
bio_ideal_y_data.append(list(map(float, d.split())))
# convert list to array for more simplicity using
ideal_sample = np.array(bio_ideal_y_data)
return ideal_sample
@staticmethod
def _example_sample(latencies_matrix, peaks_matrix, step_size):
"""
ToDo add info
Args:
latencies_matrix (np.ndarray):
peaks_matrix (np.ndarray):
step_size (float): data step size
Returns:
int: index of sample
"""
raise NotImplemented
ideal_example_index = 0
peaks_sum = np.sum(peaks_matrix, axis=1)
index = np.arange(len(peaks_sum))
merged = np.array(list(zip(index, peaks_sum)))
# at the top located experimental runs with the greatest number of peaks
sorted_by_sum = merged[merged[:, 1].argsort()][::-1]
for index, value in sorted_by_sum:
index = int(index)
# check difference between latencies -- how far they are from each other
diff = np.diff(latencies_matrix[index] * step_size, n=1)
# acceptable border is -3 .. 3 ms
if all(map(lambda x: -3 <= x <= 3, diff)):
ideal_example_index = index
break
return ideal_example_index
@staticmethod
def _get_KS_2samp_pvalue(y1, y2):
dvalue, _ = ks_2samp(y1, y2)
en = np.sqrt(len(y1) * len(y2) / (len(y1) + len(y2)))
den = dvalue * en
pvalue = kstwobign.sf(den)
return pvalue
@staticmethod
def _multi_R_KDE_test(x1, y1, x2, y2):
r_fct_string = """
KDE_test <- function(X1, Y1, X2, Y2){
library("ks")
if(length(dim(X1)) == 1){
X1 <- as.vector(X1)
X2 <- as.vector(X2)
Y1 <- as.vector(Y1)
Y2 <- as.vector(Y2)
res_time <- kde.test(x1=X1, x2=X2)$pvalue
res_ampl <- kde.test(x1=Y1, x2=Y2)$pvalue
mat1 <- matrix(c(X1, Y1), nrow=length(X1))
mat2 <- matrix(c(X2, Y2), nrow=length(X2))
res_2d <- kde.test(x1=mat1, x2=mat2)$pvalue
return(c(res_time, res_ampl, res_2d))
}
results <- matrix(, nrow = nrow(X1) * nrow(X2), ncol = 3)
index <- 1
#
for(i1 in 1:nrow(X1)) {
#
x1 <- X1[i1, ]
x1 <- x1[x1 >= 0]
y1 <- Y1[i1, ]
y1 <- y1[y1 >= 0]
#
for(i2 in 1:nrow(X2)) {
#
x2 <- X2[i2, ]
x2 <- x2[x2 >= 0]
y2 <- Y2[i2, ]
y2 <- y2[y2 >= 0]
#
mat1 <- matrix(c(x1, y1), nrow=length(x1))
mat2 <- matrix(c(x2, y2), nrow=length(x2))
#
res_time <- kde.test(x1=x1, x2=x2)$pvalue
res_ampl <- kde.test(x1=y1, x2=y2)$pvalue
res_2d <- kde.test(x1=mat1, x2=mat2)$pvalue
results[index, ] <- c(res_time, res_ampl, res_2d)
index <- index + 1
}
}
return(results)
}
"""
r_pkg = STAP(r_fct_string, "r_pkg")
rx1, ry1, rx2, ry2 = map(numpy2rpy, (x1, y1, x2, y2))
return np.asarray(r_pkg.KDE_test(rx1, ry1, rx2, ry2))
@staticmethod
def _contour_plot(x, y, color, ax, z_prev, borders, levels_num, addtan = False):
"""
Args:
x:
y:
color:
ax:
z_prev:
borders:
Returns:
np.ndarray:
"""
xmin, xmax, ymin, ymax = borders
# form a mesh grid
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
# re-present grid in 1D and pair them as (x1, y1 ...)
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
# use a Gaussian KDE
a = st.gaussian_kde(values)(positions).T
# re-present grid back to 2D
z = np.reshape(a, xx.shape)
# find an index of the maximal element
m = np.amax(z)
# form a step and levels
step = (np.amax(a) - np.amin(a)) / levels_num
levels = np.arange(0, m, step) + step
# convert HEX to HSL
h_norm, s_norm, l_norm = Color(color).hsl
# make an gradient based on number of levels
light_gradient = np.linspace(l_norm, 0.95, len(levels))[::-1]
# generate colors for contours level from HSL (normalized) to RGB (normalized)
colors = [Color(hsl=(h_norm, s_norm, l_level)).rgb for l_level in light_gradient]
# plot filled contour
ax.contour(xx, yy, z, levels=levels, linewidths=1, colors=color)
z_mid = (np.max(z) + np.min(z)) / 3 * 2
mid_contours = plt.contour(xx, yy, z, levels=[z_mid], alpha=0).allsegs[0]
# for contour in mid_contours:
# plt.plot(contour[:, 0], contour[:, 1], c='#f2aa2e', linewidth=4)
if addtan:
max_contour = max(mid_contours, key=np.size)
unique, index = np.unique(max_contour[:, 0], axis=0, return_index=True)
x = np.array(max_contour[:, 0])[index]
y = np.array(max_contour[:, 1])[index]
ind = np.lexsort((y,x))
sorted_x = np.array([x[i] for i in ind])
sorted_y = np.array([y[i] for i in ind])
print(sorted_x)
print(sorted_y)
mask = ((sorted_x >= 10) & (sorted_x <= 23) & (sorted_y > 2))
masked_x = sorted_x[mask]
masked_y = sorted_y[mask]
print(masked_x)
print(masked_y)
t,c,k = interpolate.splrep(masked_x, masked_y, k=3)
b = interpolate.BSpline(t, c, k)
fsec = b.derivative(nu=2)
# fsec = interpolate.splev(24.4, spl, der=2)
# print(fsec)
# print(interpolate.sproot((t, c - fsec, k)))
pointcur = interpolate.sproot((fsec.t, fsec.c, k))[0]
# print(interpolate.sproot((fsec.t, fsec.c, k)))
spl = interpolate.splrep(sorted_x, sorted_y, k=1)
small_t = np.arange(pointcur - 0.25, pointcur + 0.35, 0.05)
# print(small_t)
# t,c,k = interpolate.splrep(masked_x, masked_y, k=1)
fa = interpolate.splev(pointcur, spl, der=0) # f(a)
# print(fa)
fprime = interpolate.splev(pointcur, spl, der=1) # f'(a)
tan = fa + fprime * (small_t - pointcur) # tangent
# print(tan)
slopedegree = math.atan2((small_t[-1] - small_t[0]), (tan[-1] - tan[0])) * 180 / math.pi
print(f'SLOPE IN DEGREE - {slopedegree}')
plt.plot(small_t, tan, c='#a6261d', linewidth=5)
# plt.plot(pointcur, fa, 'om')
ax.contourf(xx, yy, z, levels=levels, colors=colors, alpha=0.7, zorder=0)
# ax.scatter(x, y, s=0.1, color=color)
return z
@staticmethod
def _get_color(filename, clrs):
if "bio" in filename:
color = next(clrs)
elif "gras" in filename:
color = '#287a72'
elif "neuron" in filename:
color = '#F2AA2E'
elif "nest" in filename:
color = '#472650'
else:
raise Exception("Can't set color for data")
return color
|
py | 7df74919107ce3b7e97a913584fa358a59747950 | # Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ortools.util.sorted_interval_list."""
import unittest
from ortools.util import sorted_interval_list
class SortedIntervalListTest(unittest.TestCase):
def testCtorAndGetter(self):
bool_domain = sorted_interval_list.Domain(0, 1)
self.assertEqual(2, bool_domain.Size())
self.assertEqual(0, bool_domain.Min())
self.assertEqual(1, bool_domain.Max())
self.assertFalse(bool_domain.IsEmpty())
self.assertEqual(str(bool_domain), '[0,1]')
def testFromValues(self):
domain = sorted_interval_list.Domain.FromValues([1, 3, -5, 5])
self.assertEqual(4, domain.Size())
self.assertEqual(-5, domain.Min())
self.assertEqual(5, domain.Max())
self.assertEqual([-5, -5, 1, 1, 3, 3, 5, 5],
domain.FlattenedIntervals())
self.assertTrue(domain.Contains(1))
self.assertFalse(domain.Contains(0))
def testFromIntervals(self):
domain = sorted_interval_list.Domain.FromIntervals([[2, 4], [-2, 0]])
self.assertEqual(6, domain.Size())
self.assertEqual(-2, domain.Min())
self.assertEqual(4, domain.Max())
self.assertEqual([-2, 0, 2, 4], domain.FlattenedIntervals())
def testFromFlatIntervals(self):
domain = sorted_interval_list.Domain.FromFlatIntervals([2, 4, -2, 0])
self.assertEqual(6, domain.Size())
self.assertEqual(-2, domain.Min())
self.assertEqual(4, domain.Max())
self.assertEqual([-2, 0, 2, 4], domain.FlattenedIntervals())
def testNegation(self):
domain = sorted_interval_list.Domain(5, 20)
self.assertEqual([-20, -5], domain.Negation().FlattenedIntervals())
if __name__ == '__main__':
unittest.main()
|
py | 7df74a0d4bb82f0d399a0f5581ea0a869aaccad6 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-25 22:30
import re
from django.db import migrations
def update_perms_and_locks(apps, schema_editor):
# update all permissions
Tag = apps.get_model("typeclasses", "Tag")
perm_map = {
"guests": "guest",
"players": "player",
"playerhelpers": "helper",
"builders": "builder",
"wizards": "admin",
"immortals": "developer",
}
for perm in Tag.objects.filter(db_tagtype="permission"):
if perm.db_key in perm_map:
perm.db_key = perm_map[perm.db_key]
perm.save(update_fields=("db_key",))
# update all locks on all entities
apps_models = [
("objects", "ObjectDB"),
("accounts", "AccountDB"),
("scripts", "ScriptDB"),
("comms", "ChannelDB"),
]
p_reg = re.compile(
r"(?<=perm\()(\w+)(?=\))|(?<=perm_above\()(\w+)(?=\))", re.IGNORECASE + re.UNICODE
)
def _sub(match):
perm = match.group(1)
return perm_map[perm.lower()].capitalize() if (perm and perm.lower() in perm_map) else perm
for app_tuple in apps_models:
TClass = apps.get_model(*app_tuple)
for obj in TClass.objects.filter(db_lock_storage__icontains="perm"):
orig_lock = obj.db_lock_storage
repl_lock = p_reg.sub(_sub, orig_lock)
if repl_lock != orig_lock:
obj.db_lock_storage = repl_lock
obj.save(update_fields=("db_lock_storage",))
class Migration(migrations.Migration):
dependencies = [("typeclasses", "0007_tag_migrations_may_be_slow")]
operations = [migrations.RunPython(update_perms_and_locks)]
|
py | 7df74a10a0cf0aac4080e4ac61b3208192a4eba6 | # from dal import autocomplete
# from django import forms
# from django.contrib.auth import forms as admin_forms
# from django.contrib.auth import get_user_model
# from django.utils.translation import gettext_lazy as _
# from angalabiri.blog.models import Post, Comment
# User = get_user_model()
# from crispy_forms.helper import FormHelper
# from crispy_forms.layout import Column, HTML, Field, Fieldset, Layout, Row, Submit
# from crispy_forms.bootstrap import InlineField, UneditableField
# from crispy_forms import layout
# class CommentForm(forms.ModelForm):
# class Meta:
# model = Comment
# fields = ["text"]
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.helper = FormHelper()
# self.helper.layout = Layout(
# Row(
# Column("text", css_class="form-group col-md-12"),
# css_class="row mb-0"
# ),
# HTML(
# "<div class='w-100'></div>",
# ),
# Submit(
# "submit",
# "Comment",
# css_class="btn btn-block text-white block rounded-lg py-3 font-weight-semibold text-uppercase mt-3 button-black"
# )
# )
|
py | 7df74ab10ff4fe8b613516ea57a33935dc3af242 | import csv
import sys
import psycopg2
if __name__ == '__main__':
# establish connection
connect_input = "host='localhost' dbname='general_journal' user='general_journal' password='general_journal'"
conn = psycopg2.connect(connect_input)
cur = conn.cursor()
##############################
## LOAD DATA FOR TRANSFERS ##
#############################
# open cvs file
with open('journal_transactions.csv') as data_file:
# define pointer to values
reader = csv.reader(data_file, delimiter =',')
# skip header
next(reader)
# load data from dataset
for row in reader:
# read data into transfer table
cur.execute("INSERT INTO transfer VALUES (%s, %s, %s, %s)", (row[1], row[5], row[3], row[4]))
conn.commit()
|
py | 7df74bf50e5631b05550d32215a3b2d49f1f2e7b | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
@patch("tempfile.mkdtemp", new = MagicMock(return_value='/some_tmp_dir'))
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
class TestMetricsCollector(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "AMBARI_METRICS/0.1.0/package"
STACK_VERSION = "2.0.6"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
def test_start_default_distributed(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metrics_collector.py",
classname = "AmsCollector",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.maxDiff=None
self.assert_hbase_configure('master', distributed=True)
self.assert_hbase_configure('regionserver', distributed=True)
self.assert_ams('collector', distributed=True)
self.assertResourceCalled('Execute', 'ambari-sudo.sh /usr/jdk64/jdk1.7.0_45/bin/keytool -importkeystore -srckeystore /etc/security/clientKeys/all.jks -destkeystore /some_tmp_dir/truststore.p12 -srcalias c6402.ambari.apache.org -deststoretype PKCS12 -srcstorepass bigdata -deststorepass bigdata',
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh openssl pkcs12 -in /some_tmp_dir/truststore.p12 -out /etc/ambari-metrics-collector/conf/ca.pem -cacerts -nokeys -passin pass:bigdata',
)
self.assertResourceCalled('Execute', ('chown', u'ams:hadoop', '/etc/ambari-metrics-collector/conf/ca.pem'),
sudo=True
)
self.assertResourceCalled('Execute', ('chmod', '644', '/etc/ambari-metrics-collector/conf/ca.pem'),
sudo=True)
self.assertResourceCalled('Execute', 'ambari-sudo.sh rm -rf /some_tmp_dir',
)
self.assertResourceCalled('Execute', '/usr/lib/ams-hbase/bin/hbase-daemon.sh --config /etc/ams-hbase/conf stop regionserver',
on_timeout = 'ls /var/run/ambari-metrics-collector//hbase-ams-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/ambari-metrics-collector//hbase-ams-regionserver.pid` >/dev/null 2>&1 && ambari-sudo.sh -H -E kill -9 `ambari-sudo.sh cat /var/run/ambari-metrics-collector//hbase-ams-regionserver.pid`',
timeout = 30,
user = 'ams'
)
self.assertResourceCalled('File', '/var/run/ambari-metrics-collector//hbase-ams-regionserver.pid',
action = ['delete']
)
self.assertResourceCalled('Execute', '/usr/lib/ams-hbase/bin/hbase-daemon.sh --config /etc/ams-hbase/conf stop master',
on_timeout = 'ls /var/run/ambari-metrics-collector//hbase-ams-master.pid >/dev/null 2>&1 && ps `cat /var/run/ambari-metrics-collector//hbase-ams-master.pid` >/dev/null 2>&1 && ambari-sudo.sh -H -E kill -9 `ambari-sudo.sh cat /var/run/ambari-metrics-collector//hbase-ams-master.pid`',
timeout = 30,
user = 'ams'
)
self.assertResourceCalled('File', '/var/run/ambari-metrics-collector//hbase-ams-master.pid',
action = ['delete']
)
self.assertResourceCalled('Execute', '/usr/sbin/ambari-metrics-collector --config /etc/ambari-metrics-collector/conf --distributed stop',
user = 'ams'
)
self.assertResourceCalled('Execute', '/usr/lib/ams-hbase/bin/hbase-daemon.sh --config /etc/ams-hbase/conf start master',
not_if = 'ls /var/run/ambari-metrics-collector//hbase-ams-master.pid >/dev/null 2>&1 && ps `cat /var/run/ambari-metrics-collector//hbase-ams-master.pid` >/dev/null 2>&1',
user = 'ams'
)
self.assertResourceCalled('Execute', '/usr/lib/ams-hbase/bin/hbase-daemon.sh --config /etc/ams-hbase/conf start regionserver',
not_if = 'ls /var/run/ambari-metrics-collector//hbase-ams-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/ambari-metrics-collector//hbase-ams-regionserver.pid` >/dev/null 2>&1',
user = 'ams'
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh rm -rf /var/lib/ambari-metrics-collector/hbase-tmp/*.tmp',
)
self.assertResourceCalled('Execute', '/usr/sbin/ambari-metrics-collector --config /etc/ambari-metrics-collector/conf --distributed start',
user = 'ams'
)
self.assertNoMoreResources()
def test_start_default_embedded(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metrics_collector.py",
classname = "AmsCollector",
command = "start",
config_file="default_ams_embedded.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.maxDiff=None
self.assert_hbase_configure('master')
self.assert_hbase_configure('regionserver')
self.assert_ams('collector')
self.assertResourceCalled('Execute', '/usr/sbin/ambari-metrics-collector --config /etc/ambari-metrics-collector/conf stop',
user = 'ams'
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh rm -rf /var/lib/ambari-metrics-collector/hbase-tmp/*.tmp',
)
self.assertResourceCalled('File', '/etc/ambari-metrics-collector/conf/core-site.xml',
owner = 'ams',
action = ['delete']
)
self.assertResourceCalled('File', '/etc/ambari-metrics-collector/conf/hdfs-site.xml',
owner = 'ams',
action = ['delete']
)
self.assertResourceCalled('Execute', '/usr/sbin/ambari-metrics-collector --config /etc/ambari-metrics-collector/conf start',
user = 'ams'
)
self.assertNoMoreResources()
def assert_ams(self, name=None, distributed=False):
self.assertResourceCalled('Directory', '/etc/ambari-metrics-collector/conf',
owner = 'ams',
group = 'hadoop',
create_parents = True,
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/var/lib/ambari-metrics-collector/checkpoint',
owner = 'ams',
group = 'hadoop',
cd_access = 'a',
create_parents = True,
recursive_ownership = True,
)
self.assertResourceCalled('XmlConfig', 'ams-site.xml',
owner = 'ams',
group = 'hadoop',
conf_dir = '/etc/ambari-metrics-collector/conf',
configurations = self.getConfig()['configurations']['ams-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['ams-hbase-site']
)
self.assertResourceCalled('XmlConfig', 'ssl-server.xml',
owner = 'ams',
group = 'hadoop',
conf_dir = '/etc/ambari-metrics-collector/conf',
configurations = self.getConfig()['configurations']['ams-ssl-server'],
configuration_attributes = self.getConfig()['configuration_attributes']['ams-ssl-server']
)
merged_ams_hbase_site = {}
merged_ams_hbase_site.update(self.getConfig()['configurations']['ams-hbase-site'])
merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = '25'
merged_ams_hbase_site['phoenix.spool.directory'] = '/tmp'
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'ams',
group = 'hadoop',
conf_dir = '/etc/ambari-metrics-collector/conf',
configurations = merged_ams_hbase_site,
configuration_attributes = self.getConfig()['configuration_attributes']['ams-hbase-site']
)
self.assertResourceCalled('File', '/etc/ambari-metrics-collector/conf/log4j.properties',
owner = 'ams',
group = 'hadoop',
content = InlineTemplate(self.getConfig()['configurations']['ams-hbase-log4j']['content']),
mode=0644,
)
self.assertResourceCalled('File', '/etc/ambari-metrics-collector/conf/ams-env.sh',
owner = 'ams',
content = InlineTemplate(self.getConfig()['configurations']['ams-env']['content'])
)
self.assertResourceCalled('Directory', '/var/log/ambari-metrics-collector',
owner = 'ams',
group = 'hadoop',
cd_access = 'a',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/run/ambari-metrics-collector',
owner = 'ams',
cd_access = 'a',
group = 'hadoop',
create_parents = True,
mode=0755,
)
self.assertResourceCalled('File', '/usr/lib/ams-hbase/bin/hadoop',
owner = 'ams',
mode=0755
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True
)
self.assertResourceCalled('File', '/etc/security/limits.d/ams.conf',
owner='root',
group='root',
mode=0644,
content=Template("ams.conf.j2")
)
if distributed:
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'ams',
group = 'hadoop',
mode=0644,
conf_dir = '/etc/ambari-metrics-collector/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'ams',
group = 'hadoop',
mode=0644,
conf_dir = '/etc/ams-hbase/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'ams',
group = 'hadoop',
mode=0644,
conf_dir = '/etc/ambari-metrics-collector/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'ams',
group = 'hadoop',
mode=0644,
conf_dir = '/etc/ams-hbase/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
def assert_hbase_configure(self, name=None, distributed=False):
self.assertResourceCalled('Directory', '/etc/ams-hbase/conf',
owner = 'ams',
group = 'hadoop',
create_parents = True,
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/var/lib/ambari-metrics-collector/hbase-tmp',
owner = 'ams',
cd_access = 'a',
create_parents = True,
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/var/lib/ambari-metrics-collector/hbase-tmp/local/jars',
owner = 'ams',
cd_access = 'a',
group = 'hadoop',
mode = 0775,
create_parents = True
)
if not distributed:
self.assertResourceCalled('File', '/etc/ams-hbase/conf/core-site.xml',
owner = 'ams',
action = ['delete']
)
self.assertResourceCalled('File', '/etc/ams-hbase/conf/hdfs-site.xml',
owner = 'ams',
action = ['delete']
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'ams',
group = 'hadoop',
conf_dir = '/etc/ams-hbase/conf',
configurations = self.getConfig()['configurations']['ams-hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['ams-hbase-site']
)
self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
owner = 'ams',
group = 'hadoop',
conf_dir = '/etc/ams-hbase/conf',
configurations = self.getConfig()['configurations']['ams-hbase-policy'],
configuration_attributes = self.getConfig()['configuration_attributes']['ams-hbase-site']
)
self.assertResourceCalled('File', '/etc/ams-hbase/conf/hbase-env.sh',
owner = 'ams',
content = InlineTemplate(self.getConfig()['configurations']['ams-hbase-env']['content'])
)
self.assertResourceCalled('File', '/etc/ams-hbase/conf/hadoop-metrics2-hbase.properties',
owner = 'ams',
group = 'hadoop',
content = Template('hadoop-metrics2-hbase.properties.j2')
)
self.assertResourceCalled('TemplateConfig', '/etc/ams-hbase/conf/regionservers',
owner = 'ams',
template_tag = None,
)
self.assertResourceCalled('Directory', '/var/run/ambari-metrics-collector/',
owner = 'ams',
create_parents = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/var/log/ambari-metrics-collector',
owner = 'ams',
create_parents = True,
mode = 0755,
cd_access = "a",
)
if name == 'master':
if distributed:
self.assertResourceCalled('HdfsResource', 'hdfs://localhost:8020/apps/hbase/data',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'ams',
mode = 0775,
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site=self.getConfig()['configurations']['hdfs-site'],
principal_name=UnknownConfigurationMock(),
default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', '/amshbase/staging',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'ams',
mode = 0711,
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site=self.getConfig()['configurations']['hdfs-site'],
principal_name=UnknownConfigurationMock(),
default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
hadoop_conf_dir = '/etc/hadoop/conf',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site=self.getConfig()['configurations']['hdfs-site'],
principal_name=UnknownConfigurationMock(),
default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('File', '/var/run/ambari-metrics-collector//distributed_mode', action=["create"],
mode=0644, owner='ams')
else:
self.assertResourceCalled('Directory', '/var/lib/ambari-metrics-collector/hbase',
owner = 'ams',
cd_access="a",
create_parents = True,
recursive_ownership = True,
)
if (not distributed):
self.assertResourceCalled('File', '/var/run/ambari-metrics-collector//distributed_mode',
owner = 'ams',
action = ['delete']
)
self.assertResourceCalled('File', '/etc/ams-hbase/conf/log4j.properties',
owner = 'ams',
group = 'hadoop',
mode = 0644,
content = InlineTemplate(self.getConfig()['configurations']['ams-hbase-log4j']['content'])
)
|
py | 7df74c2f52668b154c1e08e4ecd8580f0e0e3be8 | #!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import logging
from typing import Dict, List, Union
# Libs
# Custom
from .base import BaseTask
from .endpoints import EXPERIMENT_ENDPOINTS
##################
# Configurations #
##################
##########################################
# Experiment task Class - ExperimentTask #
##########################################
class ExperimentTask(BaseTask):
""" Interfacing class governing all experiment-related interactions with the
remote Synergos grid
Attributes:
_type (str): Specifies the type of task
address (str): Address where Synergos TTP is hosted at
endpoints (str)): All endpoints governed by this task
"""
def __init__(self, address: str):
super().__init__(
_type="experiment",
address=address,
endpoints=EXPERIMENT_ENDPOINTS
)
###########
# Helpers #
###########
def _generate_bulk_url(self, collab_id: str, project_id: str) -> str:
return self._generate_url(
endpoint=self.endpoints.EXPERIMENTS,
collab_id=collab_id,
project_id=project_id
)
def _generate_single_url(
self,
collab_id: str,
project_id: str,
expt_id: str
) -> str:
return self._generate_url(
endpoint=self.endpoints.EXPERIMENT,
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id
)
##################
# Core functions #
##################
def create(
self,
collab_id: str,
project_id: str,
expt_id: str,
model: List[Dict[str, Union[str, bool, int, float]]],
**kwargs
):
""" Registers an experiment in the federated grid
Args:
collab_id (str): Identifier of collaboration
project_id (str): Identifier of project experiment is under
expt_id (str): Identifier of experiment
model (list): Layer architectures of an experiment model
**kwargs
Returns:
"""
parameters = {'expt_id': expt_id, 'model': model}
return self._execute_operation(
operation="post",
url=self._generate_bulk_url(
collab_id=collab_id,
project_id=project_id
),
payload=parameters
)
def read_all(self, collab_id: str, project_id: str):
""" Retrieves information/configurations of all experiments created in
the federated grid under a specific project
Args:
collab_id (str): Identifier of collaboration
project_id (str): Identifier of project experiment is under
Returns:
"""
return self._execute_operation(
operation="get",
url=self._generate_bulk_url(
collab_id=collab_id,
project_id=project_id
),
payload=None
)
def read(self, collab_id: str, project_id: str, expt_id: str):
""" Retrieves a single experiment's information/configurations created
in the federated grid under a specific project
Args:
collab_id (str): Identifier of collaboration
project_id (str): Identifier of project experiment is under
expt_id (str): Identifier of experiment
Returns:
"""
return self._execute_operation(
operation="get",
url=self._generate_single_url(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id
),
payload=None
)
def update(self, collab_id: str, project_id: str, expt_id: str, **updates):
""" Updates an experiment's information/configurations created in the
federated grid under a specific project
Args:
collab_id (str): Identifier of collaboration
project_id (str): Identifier of project experiment is under
expt_id (str): Identifier of experiment
**updates: Keyword pairs of parameters to be updated
Returns:
"""
return self._execute_operation(
operation="put",
url=self._generate_single_url(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id
),
payload=updates
)
def delete(self, collab_id: str, project_id: str, expt_id: str):
""" Removes an experiment's information/configurations previously
created from the federated grid
Args:
collab_id (str): Identifier of collaboration
project_id (str): Identifier of project experiment is under
expt_id (str): Identifier of experiment
Returns:
"""
return self._execute_operation(
operation="delete",
url=self._generate_single_url(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id
),
payload=None
)
if __name__ == "__main__":
host = "0.0.0.0"
port = 5001
address = f"http://{host}:{port}"
from .collaborations import CollaborationTask
from .projects import ProjectTask
# Create a reference collaboration
collaborations = CollaborationTask(address)
collab_id = "test_collab"
collaborations.create(collab_id=collab_id)
# Create reference project
projects = ProjectTask(address)
project_id = "test_project"
projects.create(
collab_id=collab_id,
project_id=project_id,
action='classify',
incentives={
'tier_1': [],
'tier_2': []
}
)
experiments = ExperimentTask(address)
expt_id_1 = "test_expt_1"
expt_id_2 = "test_expt_2"
# Test experiment creation
create_response_1 = experiments.create(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_1,
model=[
{
"activation": "sigmoid",
"is_input": True,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 15,
"out_features": 1
}
}
]
)
print("Experiment 1: Create response:", create_response_1)
create_response_2 = experiments.create(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_2,
model=[
{
"activation": "sigmoid",
"is_input": True,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 30,
"out_features": 20
}
},
{
"activation": "sigmoid",
"is_input": False,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 20,
"out_features": 10
}
},
{
"activation": "sigmoid",
"is_input": False,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 10,
"out_features": 1
}
}
]
)
print("Experiment 2: Create response:", create_response_2)
# Test experiment retrieval bulk
read_all_response = experiments.read_all(
collab_id=collab_id,
project_id=project_id
)
print("Read all response:", read_all_response)
# Test experiment retrieval single
read_response_1 = experiments.read(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_1
)
print("Experiment 1: Read response:", read_response_1)
read_response_2 = experiments.read(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_2
)
print("Experiment 2: Read response:", read_response_2)
# Test experiment update
update_response_1 = experiments.update(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_1,
model=[
{
"activation": "sigmoid",
"is_input": True,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 20,
"out_features": 10
}
},
{
"activation": "sigmoid",
"is_input": False,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 10,
"out_features": 1
}
}
]
)
print("Experiment 1: Update response:", update_response_1)
update_response_2 = experiments.update(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_2,
model=[
{
"activation": "relu",
"is_input": True,
"l_type": "Linear",
"structure": {
"bias": False,
"in_features": 15,
"out_features": 1
}
}
]
)
print("Experiment 2: Update response:", update_response_2)
# Test experiment deletion
delete_response_1 = experiments.delete(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_1
)
print("Experiment 1: delete response:", delete_response_1)
delete_response_2 = experiments.delete(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id_2
)
print("Experiment 2: delete response:", delete_response_2)
print("Experiments left:", experiments.read_all(
collab_id=collab_id,
project_id=project_id
))
# Clean up
collaborations.delete(collab_id=collab_id) |
py | 7df74ca6aa6d1d792769b9b1ba2290b7a39ff33b | # -*- coding: utf-8 -*-
"""SSH YAML Configuration
NAME - SSH YAML Configuration
AUTHOR - Patryk Adamczyk <[email protected]>
LICENSE - MIT
"""
# Imports
from sshyc.app import main
# Underscore Variables
"""Author of the module"""
__author__ = 'Patryk Adamczyk'
"""Module License"""
__license__ = 'MIT'
"""Documentation format"""
__docformat__ = 'restructuredtext en'
# Main Script
main()
|
py | 7df74e5ed6794d3ffecf4869e031aafce1217d2f | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import asyncio
import logging
from typing import Any, Union, TYPE_CHECKING, Iterable, List
from uamqp import constants # type: ignore
from ..exceptions import ConnectError, EventHubError
from ._client_base_async import ClientBaseAsync
from ._producer_async import EventHubProducer
from .._constants import ALL_PARTITIONS
from .._common import (
EventData,
EventHubSharedKeyCredential,
EventHubSASTokenCredential,
EventDataBatch
)
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential # type: ignore
_LOGGER = logging.getLogger(__name__)
class EventHubProducerClient(ClientBaseAsync):
"""
The EventHubProducerClient class defines a high level interface for
sending events to the Azure Event Hubs service.
:param str fully_qualified_namespace: The fully qualified host name for the Event Hubs namespace.
This is likely to be similar to <yournamespace>.servicebus.windows.net
:param str eventhub_name: The path of the specific Event Hub to connect the client to.
:param credential: The credential object used for authentication which implements particular interface
of getting tokens. It accepts :class:`EventHubSharedKeyCredential<azure.eventhub.EventHubSharedKeyCredential>`,
:class:`EventHubSASTokenCredential<azure.eventhub.EventHubSASTokenCredential>`, or credential objects generated by
the azure-identity library and objects that implement `get_token(self, *scopes)` method.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword float auth_timeout: The time in seconds to wait for a token to be authorized by the service.
The default value is 60 seconds. If set to 0, no timeout will be enforced from the client.
:keyword str user_agent: The user agent that needs to be appended to the built in user agent string.
:keyword int retry_total: The total number of attempts to redo the failed operation when an error happened. Default
value is 3.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Event Hubs service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.eventhub.TransportType
:keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: 'proxy_hostname' (str value) and 'proxy_port' (int value).
Additionally the following keys may also be present: 'username', 'password'.
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_eventhub_async.py
:start-after: [START create_eventhub_producer_client_async]
:end-before: [END create_eventhub_producer_client_async]
:language: python
:dedent: 4
:caption: Create a new instance of the EventHubProducerClient.
"""
def __init__(self,
fully_qualified_namespace: str,
eventhub_name: str,
credential: Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, 'TokenCredential'],
**kwargs) -> None:
super(EventHubProducerClient, self).__init__(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential,
network_tracing=kwargs.pop("logging_enable", False),
**kwargs
)
self._producers = {ALL_PARTITIONS: self._create_producer()} # type: Dict[str, EventHubProducer]
self._lock = asyncio.Lock() # sync the creation of self._producers
self._max_message_size_on_link = 0
self._partition_ids = None
async def _get_partitions(self):
if not self._partition_ids:
self._partition_ids = await self.get_partition_ids()
for p_id in self._partition_ids:
self._producers[p_id] = None
async def _get_max_mesage_size(self):
# pylint: disable=protected-access
async with self._lock:
if not self._max_message_size_on_link:
await self._producers[ALL_PARTITIONS]._open_with_retry()
self._max_message_size_on_link = \
self._producers[ALL_PARTITIONS]._handler.message_handler._link.peer_max_message_size \
or constants.MAX_MESSAGE_LENGTH_BYTES
async def _start_producer(self, partition_id, send_timeout):
async with self._lock:
await self._get_partitions()
if partition_id not in self._partition_ids and partition_id != ALL_PARTITIONS:
raise ConnectError("Invalid partition {} for the event hub {}".format(partition_id, self.eventhub_name))
if not self._producers[partition_id] or self._producers[partition_id].closed:
self._producers[partition_id] = self._create_producer(
partition_id=partition_id,
send_timeout=send_timeout
)
def _create_producer(
self, *,
partition_id: str = None,
send_timeout: float = None,
loop: asyncio.AbstractEventLoop = None
) -> EventHubProducer:
target = "amqps://{}{}".format(self._address.hostname, self._address.path)
send_timeout = self._config.send_timeout if send_timeout is None else send_timeout
handler = EventHubProducer(
self, target, partition=partition_id, send_timeout=send_timeout, loop=loop)
return handler
@classmethod
def from_connection_string(
cls, conn_str: str,
*,
eventhub_name: str = None,
logging_enable: bool = False,
http_proxy: dict = None,
auth_timeout: float = 60,
user_agent: str = None,
retry_total: int = 3,
transport_type=None,
**kwargs):
# type: (str, Any) -> EventHubProducerClient
# pylint: disable=arguments-differ
"""
Create an EventHubProducerClient from a connection string.
:param str conn_str: The connection string of an eventhub.
:keyword str eventhub_name: The path of the specific Event Hub to connect the client to.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword dict[str,Any] http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys - 'proxy_hostname' (str value) and 'proxy_port' (int value).
Additionally the following keys may also be present - 'username', 'password'.
:keyword float auth_timeout: The time in seconds to wait for a token to be authorized by the service.
The default value is 60 seconds. If set to 0, no timeout will be enforced from the client.
:keyword str user_agent: The user agent that needs to be appended to the built in user agent string.
:keyword int retry_total: The total number of attempts to redo the failed operation when an error happened.
Default value is 3.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Event Hubs service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.eventhub.TransportType
:rtype: ~azure.eventhub.aio.EventHubProducerClient
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_eventhub_async.py
:start-after: [START create_eventhub_producer_client_from_conn_str_async]
:end-before: [END create_eventhub_producer_client_from_conn_str_async]
:language: python
:dedent: 4
:caption: Create a new instance of the EventHubProducerClient from connection string.
"""
return super(EventHubProducerClient, cls).from_connection_string(
conn_str,
eventhub_name=eventhub_name,
logging_enable=logging_enable,
http_proxy=http_proxy,
auth_timeout=auth_timeout,
user_agent=user_agent,
retry_total=retry_total,
transport_type=transport_type,
**kwargs
)
async def send(self, event_data,
*, partition_key: Union[str, bytes] = None, partition_id: str = None, timeout: float = None) -> None:
# type: (Union[EventData, EventDataBatch, Iterable[EventData]], ...) -> None
"""Sends event data and blocks until acknowledgement is received or operation times out.
:param event_data: The event to be sent. It can be an EventData object, or iterable of EventData objects.
:type event_data: ~azure.eventhub.EventData or ~azure.eventhub.EventDataBatch or
Iterator[~azure.eventhub.EventData]
:keyword str partition_key: With the given partition_key, event data will land to
a particular partition of the Event Hub decided by the service.
:keyword str partition_id: The specific partition ID to send to. Default is None, in which case the service
will assign to all partitions using round-robin.
:keyword float timeout: The maximum wait time to send the event data.
If not specified, the default wait time specified when the producer was created will be used.
:rtype: None
:raises: :class:`AuthenticationError<azure.eventhub.AuthenticationError>`
:class:`ConnectError<azure.eventhub.ConnectError>`
:class:`ConnectionLostError<azure.eventhub.ConnectionLostError>`
:class:`EventDataError<azure.eventhub.EventDataError>`
:class:`EventDataSendError<azure.eventhub.EventDataSendError>`
:class:`EventHubError<azure.eventhub.EventHubError>`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_eventhub_async.py
:start-after: [START eventhub_producer_client_send_async]
:end-before: [END eventhub_producer_client_send_async]
:language: python
:dedent: 4
:caption: Asynchronously sends event data
"""
partition_id = partition_id or ALL_PARTITIONS
try:
await self._producers[partition_id].send(event_data, partition_key=partition_key)
except (KeyError, AttributeError, EventHubError):
await self._start_producer(partition_id, timeout)
await self._producers[partition_id].send(event_data, partition_key=partition_key)
async def create_batch(self, max_size=None):
# type:(int) -> EventDataBatch
"""
Create an EventDataBatch object with max size being max_size.
The max_size should be no greater than the max allowed message size defined by the service side.
:param int max_size: The maximum size of bytes data that an EventDataBatch object can hold.
:rtype: ~azure.eventhub.EventDataBatch
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_eventhub_async.py
:start-after: [START eventhub_producer_client_create_batch_async]
:end-before: [END eventhub_producer_client_create_batch_async]
:language: python
:dedent: 4
:caption: Create EventDataBatch object within limited size
"""
if not self._max_message_size_on_link:
await self._get_max_mesage_size()
if max_size and max_size > self._max_message_size_on_link:
raise ValueError('Max message size: {} is too large, acceptable max batch size is: {} bytes.'
.format(max_size, self._max_message_size_on_link))
return EventDataBatch(max_size=(max_size or self._max_message_size_on_link))
async def close(self):
# type: () -> None
"""
Close down the handler. If the handler has already closed,
this will be a no op.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_eventhub_async.py
:start-after: [START eventhub_producer_client_close_async]
:end-before: [END eventhub_producer_client_close_async]
:language: python
:dedent: 4
:caption: Close down the handler.
"""
async with self._lock:
for producer in self._producers.values():
if producer:
await producer.close()
await self._conn_manager.close_connection()
|
py | 7df7507bf48975f189c76ad2a6cb8c468a972eb3 | # !/usr/bin/env python3
#######################################################################################
# #
# Program purpose: Convert seconds to day, hour, minutes and seconds #
# Program Author : Happi Yvan <[email protected]> #
# Creation Date : August 15, 2019 #
# #
#######################################################################################
def get_hours(minutes):
hours = 0
while minutes >= 60:
hours += 1
minutes -= 60
return hours
def get_minutes(seconds):
min = 0
while seconds >= 60:
min += 1
seconds -= 60
return min
if __name__ == "__main__":
total_secs = int(input("Enter max number of seconds: "))
mins = get_minutes(seconds=total_secs)
if mins * 60 == total_secs:
total_secs = 0
else:
total_secs -= (mins * 60)
hours = get_hours(mins)
if hours * 60 == mins:
mins = 0
else:
mins -= (hours * 60)
day = 0
while hours >= 24:
day += 1
hours -= 24
print(f"Total days: {day}\nTotal hours: {hours}\nTotal minutes:"
f" {mins}\nTotal seconds: {total_secs}") |
py | 7df75092f8997e1572caa260bfe46b3c97023632 | #!/usr/bin/env python3
import argparse
import pandas as pd
from ggplot import *
from parsers import config
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', dest='data_paths', nargs='+', help='chat log data files (pickle files)',
required=True)
parser.add_argument('--plot-density', dest='density', action='store_true',
help='plots the message densities (KDE) instead of their count')
parser.add_argument('-n', '--number-senders', dest='top_n', type=int, default=10,
help='number of different senders to consider, ordered by number of messages sent')
parser.add_argument('-b', '--bin-width', dest='bin_width', type=int, default=25, help='bin width for histograms')
parser.add_argument('--filter-conversation', dest='filter_conversation', type=str, default=None,
help='only keep messages sent in a conversation with these senders, separated by comma')
parser.add_argument('--filter-sender', dest='filter_sender', type=str, default=None,
help='only keep messages sent by these senders, separated by comma')
parser.add_argument('--remove-sender', dest='remove_sender', type=str, default=None,
help='remove messages sent by these senders,separated by comma')
args = parser.parse_args()
return args
def load_data(data_paths, filter_conversation=None, filter_sender=None, remove_sender=None, top_n=10):
# data loading
df = pd.DataFrame()
for dataPath in data_paths:
print('Loading', dataPath, '...')
df = pd.concat([df, pd.read_pickle(dataPath)])
df.columns = config.ALL_COLUMNS
print('Loaded', len(df), 'messages')
# filtering
if filter_conversation is not None:
filter_conversation = filter_conversation.split(',')
df = df[df['conversationWithName'].isin(filter_conversation)]
if filter_sender is not None:
filter_sender = filter_sender.split(',')
df = df[df['senderName'].isin(filter_sender)]
if remove_sender is not None:
remove_sender = remove_sender.split(',')
df = df[~df['senderName'].isin(remove_sender)]
# keep only topN interlocutors
mf = df.groupby(['conversationWithName'], as_index=False) \
.agg(lambda x: len(x)) \
.sort_values('timestamp', ascending=False)['conversationWithName'] \
.head(top_n).to_frame()
print(mf)
merged = pd.merge(df, mf, on=['conversationWithName'], how='inner')
merged = merged[['datetime', 'conversationWithName', 'senderName']]
print('Number to render:', len(merged))
print(merged.head())
return merged
def render(data, bin_width, plot_density=False):
if plot_density:
# filter out conversationWithName with only one timestamp (which breaks density plot)
for name in data.conversationWithName.unique():
if len(data[data.conversationWithName == name].datetime.unique()) == 1:
data = data[data.conversationWithName != name]
plot = ggplot(data, aes(x='datetime', color='conversationWithName')) \
+ geom_density() \
+ scale_x_date(labels='%b %Y') \
+ ggtitle('Conversation Densities') \
+ ylab('Density') \
+ xlab('Date')
else:
plot = ggplot(data, aes(x='datetime', fill='conversationWithName')) \
+ geom_histogram(alpha=0.6, binwidth=bin_width) \
+ scale_x_date(labels='%b %Y', breaks='6 months') \
+ ggtitle('Message Breakdown') \
+ ylab('Number of Messages') \
+ xlab('Date')
print(plot)
def main():
args = parse_arguments()
data = load_data(
data_paths=args.data_paths,
filter_conversation=args.filter_conversation,
filter_sender=args.filter_sender,
remove_sender=args.remove_sender,
top_n=args.top_n,
)
render(data, bin_width=args.bin_width, plot_density=args.density)
if __name__ == '__main__':
main()
|
py | 7df751a09a18d2708ab3415e06da0723b1b9f802 | import pickle
fa = open("final/english-words.10")
fb = open("final/english-words.20")
fc = open("final/english-words.35")
fo = open("allwords.txt", "wb")
out = set(fa) | set(fb) | set(fc)
out = set([i.upper()[:-1] for i in out])
print(out)
input()
print("dumping...")
pickle.dump(out, fo)
print("DONE!") |
py | 7df751cd15b9fe975f0195840c4547f6a990a4f8 | #
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import Ar
def _IsPackageOrPackagedLayer(layer):
return layer.GetFileFormat().IsPackage() or \
Ar.IsPackageRelativePath(layer.identifier)
class BaseRuleChecker(object):
"""This is Base class for all the rule-checkers."""
def __init__(self, verbose):
self._verbose = verbose
self._failedChecks = []
self._errors = []
def _AddFailedCheck(self, msg):
self._failedChecks.append(msg)
def _AddError(self, msg):
self._errors.append(msg)
def _Msg(self, msg):
if self._verbose:
print msg
def GetFailedChecks(self):
return self._failedChecks
def GetErrors(self):
return self._errors
# -------------------------------------------------------------------------
# Virtual methods that any derived rule-checker may want to override.
# Default implementations do nothing.
#
# A rule-checker may choose to override one or more of the virtual methods.
# The callbacks are invoked in the order they are defined here (i.e.
# CheckStage is invoked first, followed by CheckDiagnostics, followed by
# CheckUnresolvedPaths and so on until CheckPrim). Some of the callbacks may
# be invoked multiple times per-rule with different parameters, for example,
# CheckLayer, CheckPrim and CheckZipFile.
def CheckStage(self, usdStage):
""" Check the given usdStage. """
pass
def CheckDiagnostics(self, diagnostics):
""" Check the diagnostic messages that were generated when opening the
USD stage. The diagnostic messages are collected using a
UsdUtilsCoalescingDiagnosticDelegate.
"""
pass
def CheckUnresolvedPaths(self, unresolvedPaths):
""" Check or process any unresolved asset paths that were found when
analysing the dependencies.
"""
pass
def CheckDependencies(self, usdStage, layerDeps, assetDeps):
""" Check usdStage's layer and asset dependencies that were gathered
using UsdUtils.ComputeAllDependencies().
"""
pass
def CheckLayer(self, layer):
""" Check the given SdfLayer. """
pass
def CheckZipFile(self, zipFile, packagePath):
""" Check the zipFile object created by opening the package at path
packagePath.
"""
pass
def CheckPrim(self, prim):
""" Check the given prim, which may only exist is a specific combination
of variant selections on the UsdStage.
"""
pass
# -------------------------------------------------------------------------
class ByteAlignmentChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "Files within a usdz package must be laid out properly, "\
"i.e. they should be aligned to 64 bytes."
def __init__(self, verbose):
super(ByteAlignmentChecker, self).__init__(verbose)
def CheckZipFile(self, zipFile, packagePath):
fileNames = zipFile.GetFileNames()
for fileName in fileNames:
fileExt = Ar.GetResolver().GetExtension(fileName)
fileInfo = zipFile.GetFileInfo(fileName)
offset = fileInfo.dataOffset
if offset % 64 != 0:
self._AddFailedCheck("File '%s' in package '%s' has an "
"invalid offset %s." %
(fileName, packagePath, offset))
class CompressionChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "Files withing a usdz package should not be compressed or "\
"encrypted."
def __init__(self, verbose):
super(CompressionChecker, self).__init__(verbose)
def CheckZipFile(self, zipFile, packagePath):
fileNames = zipFile.GetFileNames()
for fileName in fileNames:
fileExt = Ar.GetResolver().GetExtension(fileName)
fileInfo = zipFile.GetFileInfo(fileName)
if fileInfo.compressionMethod != 0:
self._AddFailedCheck("File '%s' in package '%s' has "
"compression. Compression method is '%s', actual size "
"is %s. Uncompressed size is %s." % (
fileName, packagePath, fileInfo.compressionMethod,
fileInfo.size, fileInfo.uncompressedSize))
class MissingReferenceChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "The composed USD stage should not contain any unresolvable"\
" asset dependencies (in every possible variation of the "\
"asset), when using the default asset resolver. "
def __init__(self, verbose):
super(MissingReferenceChecker, self).__init__(verbose)
def CheckDiagnostics(self, diagnostics):
for diag in diagnostics:
# "_ReportErrors" is the name of the function that issues
# warnings about unresolved references, sublayers and other
# composition arcs.
if '_ReportErrors' in diag.sourceFunction and \
'usd/stage.cpp' in diag.sourceFileName:
self._AddFailedCheck(diag.commentary)
def CheckUnresolvedPaths(self, unresolvedPaths):
for unresolvedPath in unresolvedPaths:
self._AddFailedCheck("Found unresolvable external dependency "
"'%s'." % unresolvedPath)
class TextureChecker(BaseRuleChecker):
# Allow just png and jpg for now.
_allowedImageFormats = ("jpg", "png")
# Include a list of "unsupported" image formats to provide better error
# messages whwn we find one of these.
_unsupportedImageFormats = ["bmp", "tga", "hdr", "exr", "tif", "zfile",
"tx"]
@staticmethod
def GetDescription():
return "Texture files should be .jpg or .png."
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(TextureChecker, self).__init__(verbose)
def _CheckTexture(self, texAssetPath):
self._Msg("Checking texture <%s>." % texAssetPath)
texFileExt = Ar.GetResolver().GetExtension(texAssetPath)
if texFileExt in \
TextureChecker._unsupportedImageFormats:
self._AddFailedCheck("Found texture file '%s' with unsupported "
"file format." % texAssetPath)
elif texFileExt not in \
TextureChecker._allowedImageFormats:
self._AddFailedCheck("Found texture file '%s' with unknown file "
"format." % texAssetPath)
def CheckPrim(self, prim):
# Right now, we find texture referenced by looking at the asset-valued
# shader inputs. However, it is entirely legal to feed the "fileName"
# input of a UsdUVTexture shader from a UsdPrimvarReader_string.
# Hence, ideally we would also check "the right" primvars on
# geometry prims here. However, identifying the right primvars is
# non-trivial. We probably need to pre-analyze all the materials.
# Not going to try to do this yet, but it raises an interesting
# validation pattern -
# Check if the prim is a shader.
if prim.GetTypeName() != "Shader":
return
from pxr import Sdf, UsdShade
shader = UsdShade.Shader(prim)
shaderInputs = shader.GetInputs()
for ip in shaderInputs:
if ip.GetTypeName() == Sdf.ValueTypeNames.Asset:
texFilePath = str(ip.Get()).strip('@')
self._CheckTexture(texFilePath)
elif ip.GetTypeName() == Sdf.ValueTypeNames.AssetArray:
texPathArray = ip.Get()
texPathArray = [str(i).strip('@') for i in texPathArray]
for texPath in texPathArray:
self._CheckTexture(texFilePath)
class ARKitPackageEncapsulationChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "If the root layer is a package, then the composed stage "\
"should not contain references to files outside the package. "\
"In other words, the package should be entirely self-contained."
def __init__(self, verbose):
super(ARKitPackageEncapsulationChecker, self).__init__(verbose)
def CheckDependencies(self, usdStage, layerDeps, assetDeps):
rootLayer = usdStage.GetRootLayer()
if not _IsPackageOrPackagedLayer(rootLayer):
return
packagePath = usdStage.GetRootLayer().realPath
if packagePath:
if Ar.IsPackageRelativePath(packagePath):
packagePath = Ar.SplitPackageRelativePathOuter(
packagePath)[0]
for layer in layerDeps:
# In-memory layers like session layers (which we must skip when
# doing this check) won't have a real path.
if layer.realPath:
if not layer.realPath.startswith(packagePath):
self._AddFailedCheck("Found loaded layer '%s' that "
"does not belong to the package '%s'." %
(layer.identifier, packagePath))
for asset in assetDeps:
if not asset.startswith(packagePath):
self._AddFailedCheck("Found asset reference '%s' that "
"does not belong to the package '%s'." %
(asset, packagePath))
class ARKitLayerChecker(BaseRuleChecker):
# Only core USD file formats are allowed.
_allowedLayerFormatIds = ('usd', 'usda', 'usdc', 'usdz')
@staticmethod
def GetDescription():
return "All included layers that participate in composition should"\
" have one of the core supported file formats."
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(ARKitLayerChecker, self).__init__(verbose)
def CheckLayer(self, layer):
self._Msg("Checking layer <%s>." % layer.identifier)
formatId = layer.GetFileFormat().formatId
if not formatId in \
ARKitLayerChecker._allowedLayerFormatIds:
self._AddFailedCheck("Layer '%s' has unsupported formatId "
"'%s'." % (layer.identifier, formatId))
class ARKitPrimTypeChecker(BaseRuleChecker):
# All core prim types other than UsdGeomPointInstancers, Curve types, Nurbs,
# and the types in UsdLux are allowed.
_allowedPrimTypeNames = ('', 'Scope', 'Xform', 'Camera',
'Shader', 'Material',
'Mesh', 'Sphere', 'Cube', 'Cylinder', 'Cone',
'Capsule', 'GeomSubset', 'Points',
'SkelRoot', 'Skeleton', 'SkelAnimation',
'BlendShape', 'SpatialAudio')
@staticmethod
def GetDescription():
return "UsdGeomPointInstancers and custom schemas not provided by "\
"core USD are not allowed."
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(ARKitPrimTypeChecker, self).__init__(verbose)
def CheckPrim(self, prim):
self._Msg("Checking prim <%s>." % prim.GetPath())
if prim.GetTypeName() not in \
ARKitPrimTypeChecker._allowedPrimTypeNames:
self._AddFailedCheck("Prim <%s> has unsupported type '%s'." %
(prim.GetPath(), prim.GetTypeName()))
class ARKitStageYupChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "The stage and all fo the assets referenced within it "\
"should be Y-up.",
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(ARKitStageYupChecker, self).__init__(verbose)
def CheckStage(self, usdStage):
from pxr import UsdGeom
upAxis = UsdGeom.GetStageUpAxis(usdStage)
if upAxis != UsdGeom.Tokens.y:
self._AddFailedCheck("Stage has upAxis '%s'. upAxis should be "
"'%s'." % (upAxis, UsdGeom.Tokens.y))
class ARKitShaderChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "Shader nodes must have \"id\" as the implementationSource, " \
"with id values that begin with \"Usd*\". Also, shader inputs "\
"with connections must each have a single, valid connection " \
"source."
def __init__(self, verbose):
super(ARKitShaderChecker, self).__init__(verbose)
def CheckPrim(self, prim):
from pxr import UsdShade
if not prim.IsA(UsdShade.Shader):
return
shader = UsdShade.Shader(prim)
if not shader:
self._AddError("Invalid shader prim <%s>." % prim.GetPath())
return
self._Msg("Checking shader <%s>." % prim.GetPath())
implSource = shader.GetImplementationSource()
if implSource != UsdShade.Tokens.id:
self._AddFailedCheck("Shader <%s> has non-id implementation "
"source '%s'." % (prim.GetPath(), implSource))
shaderId = shader.GetShaderId()
if not shaderId or \
not (shaderId in ['UsdPreviewSurface', 'UsdUVTexture'] or
shaderId.startswith('UsdPrimvarReader')) :
self._AddFailedCheck("Shader <%s> has unsupported info:id '%s'."
% (prim.GetPath(), shaderId))
# Check shader input connections
shaderInputs = shader.GetInputs()
for shdInput in shaderInputs:
connections = shdInput.GetAttr().GetConnections()
# If an input has one or more connections, ensure that the
# connections are valid.
if len(connections) > 0:
if len(connections) > 1:
self._AddFailedCheck("Shader input <%s> has %s connection "
"sources, but only one is allowed." %
(shdInput.GetAttr.GetPath(), len(connections)))
connectedSource = shdInput.GetConnectedSource()
if connectedSource is None:
self._AddFailedCheck("Connection source <%s> for shader "
"input <%s> is missing." % (connections[0],
shdInput.GetAttr().GetPath()))
else:
# The source must be a valid shader or material prim.
source = connectedSource[0]
if not source.GetPrim().IsA(UsdShade.Shader) and \
not source.GetPrim().IsA(UsdShade.Material):
self._AddFailedCheck("Shader input <%s> has an invalid "
"connection source prim of type '%s'." %
(shdInput.GetAttr().GetPath(),
source.GetPrim().GetTypeName()))
class ARKitMaterialBindingChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "All material binding relationships must have valid targets."
def __init__(self, verbose):
super(ARKitMaterialBindingChecker, self).__init__(verbose)
def CheckPrim(self, prim):
from pxr import UsdShade
relationships = prim.GetRelationships()
bindingRels = [rel for rel in relationships if
rel.GetName().startswith(UsdShade.Tokens.materialBinding)]
for bindingRel in bindingRels:
targets = bindingRel.GetTargets()
if len(targets) == 1:
directBinding = UsdShade.MaterialBindingAPI.DirectBinding(
bindingRel)
if not directBinding.GetMaterial():
self._AddFailedCheck("Direct material binding <%s> targets "
"an invalid material <%s>." % (bindingRel.GetPath(),
directBinding.GetMaterialPath()))
elif len(targets) == 2:
collBinding = UsdShade.MaterialBindingAPI.CollectionBinding(
bindingRel)
if not collBinding.GetMaterial():
self._AddFailedCheck("Collection-based material binding "
"<%s> targets an invalid material <%s>." %
(bindingRel.GetPath(), collBinding.GetMaterialPath()))
if not collBinding.GetCollection():
self._AddFailedCheck("Collection-based material binding "
"<%s> targets an invalid collection <%s>." %
(bindingRel.GetPath(), collBinding.GetCollectionPath()))
class ARKitFileExtensionChecker(BaseRuleChecker):
_allowedFileExtensions = \
ARKitLayerChecker._allowedLayerFormatIds + \
TextureChecker._allowedImageFormats
@staticmethod
def GetDescription():
return "Only layer files and textures are allowed in a package."
def __init__(self, verbose):
super(ARKitFileExtensionChecker, self).__init__(verbose)
def CheckZipFile(self, zipFile, packagePath):
fileNames = zipFile.GetFileNames()
for fileName in fileNames:
fileExt = Ar.GetResolver().GetExtension(fileName)
if fileExt not in ARKitFileExtensionChecker._allowedFileExtensions:
self._AddFailedCheck("File '%s' in package '%s' has an "
"unknown or unsupported extension '%s'." %
(fileName, packagePath, fileExt))
class ARKitRootLayerChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "The root layer of the package must be a usdc file and " \
"must not include any external dependencies that participate in "\
"stage composition."
def __init__(self, verbose):
super(ARKitRootLayerChecker, self).__init__(verbose=verbose)
def CheckStage(self, usdStage):
usedLayers = usdStage.GetUsedLayers()
# This list excludes any session layers.
usedLayersOnDisk = [i for i in usedLayers if i.realPath]
if len(usedLayersOnDisk) > 1:
self._AddFailedCheck("The stage uses %s layers. It should "
"contain a single usdc layer to be compatible with ARKit's "
"implementation of usdz." % len(usedLayersOnDisk))
rootLayerRealPath = usdStage.GetRootLayer().realPath
if rootLayerRealPath.endswith(".usdz"):
# Check if the root layer in the package is a usdc.
from pxr import Usd
zipFile = Usd.ZipFile.Open(rootLayerRealPath)
if not zipFile:
self._AddError("Could not open package at path '%s'." %
resolvedPath)
return
fileNames = zipFile.GetFileNames()
if not fileNames[0].endswith(".usdc"):
self._AddFailedCheck("First file (%s) in usdz package '%s' "
"does not have the .usdc extension." % (fileNames[0],
rootLayerRealPath))
elif not rootLayerRealPath.endswith(".usdc"):
self._AddFailedCheck("Root layer of the stage '%s' does not "
"have the '.usdc' extension." % (rootLayerRealPath))
class ComplianceChecker(object):
""" A utility class for checking compliance of a given USD asset or a USDZ
package.
Since usdz files are zip files, someone could use generic zip tools to
create an archive and just change the extension, producing a .usdz file that
does not honor the additional constraints that usdz files require. Even if
someone does use our official archive creation tools, though, we
intentionally allow creation of usdz files that can be very "permissive" in
their contents for internal studio uses, where portability outside the
studio is not a concern. For content meant to be delivered over the web
(eg. ARKit assets), however, we must be much more restrictive.
This class provides two levels of compliance checking:
* "structural" validation that is represented by a set of base rules.
* "ARKit" compatibility validation, which includes many more restrictions.
Calling ComplianceChecker.DumpAllRules() will print an enumeration of the
various rules in the two categories of compliance checking.
"""
@staticmethod
def GetBaseRules():
return [ByteAlignmentChecker, CompressionChecker,
MissingReferenceChecker, TextureChecker]
@staticmethod
def GetARKitRules(skipARKitRootLayerCheck=False):
arkitRules = [ARKitLayerChecker, ARKitPrimTypeChecker,
ARKitStageYupChecker, ARKitShaderChecker,
ARKitMaterialBindingChecker,
ARKitFileExtensionChecker,
ARKitPackageEncapsulationChecker]
if not skipARKitRootLayerCheck:
arkitRules.append(ARKitRootLayerChecker)
return arkitRules
@staticmethod
def GetRules(arkit=False, skipARKitRootLayerCheck=False):
allRules = ComplianceChecker.GetBaseRules()
if arkit:
arkitRules = ComplianceChecker.GetARKitRules(
skipARKitRootLayerCheck=skipARKitRootLayerCheck)
allRules += arkitRules
return allRules
@staticmethod
def DumpAllRules():
print 'Base rules:'
for ruleNum, rule in enumerate(GetBaseRules()):
print '[%s] %s' % (ruleNum + 1, rule.GetDescription())
print '-' * 30
print 'ARKit rules: '
for ruleNum, rule in enumerate(GetBaseRules()):
print '[%s] %s' % (ruleNum + 1, rule.GetDescription())
print '-' * 30
def __init__(self, arkit=False, skipARKitRootLayerCheck=False,
rootPackageOnly=False, skipVariants=False, verbose=False):
self._rootPackageOnly = rootPackageOnly
self._doVariants = not skipVariants
self._verbose = verbose
self._errors = []
# Once a package has been checked, it goes into this set.
self._checkedPackages = set()
# Instantiate an instance of every rule checker and store in a list.
self._rules = [Rule(self._verbose) for Rule in
ComplianceChecker.GetRules(arkit, skipARKitRootLayerCheck)]
def _Msg(self, msg):
if self._verbose:
print msg
def _AddError(self, errMsg):
self._errors.append(errMsg)
def GetErrors(self):
errors = self._errors
for rule in self._rules:
errs = rule.GetErrors()
for err in errs:
errors.append("Error checking rule '%s': %s" %
(type(rule).__name__, err))
return errors
def DumpRules(self):
descriptions = [rule.GetDescription() for rule in self._rules]
print 'Checking rules: '
for ruleNum, rule in enumerate(descriptions):
print '[%s] %s' % (ruleNum + 1, rule)
print '-' * 30
def GetFailedChecks(self):
failedChecks = []
for rule in self._rules:
fcs = rule.GetFailedChecks()
for fc in fcs:
failedChecks.append("%s (fails '%s')" % (fc,
type(rule).__name__))
return failedChecks
def CheckCompliance(self, inputFile):
from pxr import Sdf, Usd, UsdUtils
if not Usd.Stage.IsSupportedFile(inputFile):
_AddError("Cannot open file '%s' on a USD stage." % args.inputFile)
return
# Collect all warnings using a diagnostic delegate.
delegate = UsdUtils.CoalescingDiagnosticDelegate()
usdStage = Usd.Stage.Open(inputFile)
stageOpenDiagnostics = delegate.TakeUncoalescedDiagnostics()
for rule in self._rules:
rule.CheckStage(usdStage)
rule.CheckDiagnostics(stageOpenDiagnostics)
with Ar.ResolverContextBinder(usdStage.GetPathResolverContext()):
# This recursively computes all of inputFiles's external
# dependencies.
(allLayers, allAssets, unresolvedPaths) = \
UsdUtils.ComputeAllDependencies(Sdf.AssetPath(inputFile))
for rule in self._rules:
rule.CheckUnresolvedPaths(unresolvedPaths)
rule.CheckDependencies(usdStage, allLayers, allAssets)
if self._rootPackageOnly:
rootLayer = usdStage.GetRootLayer()
if rootLayer.GetFileFormat().IsPackage():
packagePath = Ar.SplitPackageRelativePathInner(
rootLayer.identifier)[0]
self._CheckPackage(packagePath)
else:
self._AddError("Root layer of the USD stage (%s) doesn't belong to "
"a package, but 'rootPackageOnly' is True!" %
Usd.Describe(usdStage))
else:
# Process every package just once by storing them all in a set.
packages = set()
for layer in allLayers:
if _IsPackageOrPackagedLayer(layer):
packagePath = Ar.SplitPackageRelativePathInner(
layer.identifier)[0]
packages.add(packagePath)
self._CheckLayer(layer)
for package in packages:
self._CheckPackage(package)
# Traverse the entire stage and check every prim.
from pxr import Usd
# Author all variant switches in the session layer.
usdStage.SetEditTarget(usdStage.GetSessionLayer())
allPrimsIt = iter(Usd.PrimRange.Stage(usdStage,
Usd.TraverseInstanceProxies()))
self._TraverseRange(allPrimsIt, isStageRoot=True)
def _CheckPackage(self, packagePath):
self._Msg("Checking package <%s>." % packagePath)
# XXX: Should we open the package on a stage to ensure that it is valid
# and entirely self-contained.
from pxr import Usd
pkgExt = Ar.GetResolver().GetExtension(packagePath)
if pkgExt != "usdz":
self._AddError("Package at path %s has an invalid extension."
% packagePath)
return
# Check the parent package first.
if Ar.IsPackageRelativePath(packagePath):
parentPackagePath = Ar.SplitPackageRelativePathInner(packagePath)[0]
self._CheckPackage(parentPackagePath)
# Avoid checking the same parent package multiple times.
if packagePath in self._checkedPackages:
return
self._checkedPackages.add(packagePath)
resolvedPath = Ar.GetResolver().Resolve(packagePath)
if len(resolvedPath) == 0:
self._AddError("Failed to resolve package path '%s'." % packagePath)
return
zipFile = Usd.ZipFile.Open(resolvedPath)
if not zipFile:
self._AddError("Could not open package at path '%s'." %
resolvedPath)
return
for rule in self._rules:
rule.CheckZipFile(zipFile, packagePath)
def _CheckLayer(self, layer):
for rule in self._rules:
rule.CheckLayer(layer)
def _CheckPrim(self, prim):
for rule in self._rules:
rule.CheckPrim(prim)
def _TraverseRange(self, primRangeIt, isStageRoot):
primsWithVariants = []
rootPrim = primRangeIt.GetCurrentPrim()
for prim in primRangeIt:
# Skip variant set check on the root prim if it is the stage'.
if not self._doVariants or (not isStageRoot and prim == rootPrim):
self._CheckPrim(prim)
continue
vSets = prim.GetVariantSets()
vSetNames = vSets.GetNames()
if len(vSetNames) == 0:
self._CheckPrim(prim)
else:
primsWithVariants.append(prim)
primRangeIt.PruneChildren()
for prim in primsWithVariants:
self._TraverseVariants(prim)
def _TraverseVariants(self, prim):
from pxr import Usd
if prim.IsInstanceProxy():
return True
vSets = prim.GetVariantSets()
vSetNames = vSets.GetNames()
allVariantNames = []
for vSetName in vSetNames:
vSet = vSets.GetVariantSet(vSetName)
vNames = vSet.GetVariantNames()
allVariantNames.append(vNames)
import itertools
allVariations = itertools.product(*allVariantNames)
for variation in allVariations:
self._Msg("Testing variation %s of prim <%s>" %
(variation, prim.GetPath()))
for (idx, sel) in enumerate(variation):
vSets.SetSelection(vSetNames[idx], sel)
primRangeIt = iter(Usd.PrimRange(prim,
Usd.TraverseInstanceProxies()))
self._TraverseRange(primRangeIt, isStageRoot=False)
|
py | 7df7525b5758b3ef6c49326130c85567b7edf0f1 | # Generated by Django 3.0.6 on 2020-06-03 16:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('digital_books', '0008_digital_book_rating'),
('electronicbookcollections', '0035_auto_20200603_1625'),
]
operations = [
migrations.AlterField(
model_name='electronicbookcollection',
name='digital_books',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='digital_books.Digital_Book'),
),
]
|
py | 7df7531575906f384a11329a27d34e11ff34e25e | """
@author Jacob Xie
@time 3/6/2021
"""
from .connector import Connector
from .loader import Loader
|
py | 7df753bcfc3e6de3acc55ba4a9e18c2bea53b86a | import os
import django
# Django settings for conf project.
settings_dir = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(os.path.dirname(settings_dir))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "test.sqlite"}}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "America/New_York"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ""
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ""
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ""
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = "7@m$nx@q%-$la^fy_(-rhxtvoxk118hrprg=q86f(@k*6^^vf8"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = "conf.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "conf.wsgi.application"
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "templates/"),
)
TEMPLATE_CONTEXT_PROCESSORS = (
# default template context processors
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.auth.context_processors.auth",
)
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
)
INSTALLED_APPS += ("accounts", "abstract", "vendors")
# MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
# INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ("127.0.0.1",)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False, "TAG": "body"}
DEBUG_TOOLBAR_PANELS = (
"debug_toolbar.panels.version.VersionDebugPanel",
"debug_toolbar.panels.timer.TimerDebugPanel",
"debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel",
"debug_toolbar.panels.headers.HeaderDebugPanel",
"debug_toolbar.panels.request_vars.RequestVarsDebugPanel",
"debug_toolbar.panels.template.TemplateDebugPanel",
"debug_toolbar.panels.sql.SQLDebugPanel",
"debug_toolbar.panels.signals.SignalDebugPanel",
"debug_toolbar.panels.logger.LoggingPanel",
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"], "level": "ERROR", "propagate": True
}
},
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
py | 7df753fa5dd1029b9925b703c7ce26ff178e94ff | """
"""
from collections import OrderedDict
import numpy as np
from halotools.utils import sliding_conditional_percentile
from jax import random as jran
from scipy.stats import norm
DEFAULT_SMHM_PARAMS = OrderedDict(
smhm_logm_crit=11.35,
smhm_ratio_logm_crit=-1.65,
smhm_k_logm=1.6,
smhm_lowm_index_x0=11.5,
smhm_lowm_index_k=2,
smhm_lowm_index_ylo=2.5,
smhm_lowm_index_yhi=2.5,
smhm_highm_index_x0=13.5,
smhm_highm_index_k=2,
smhm_highm_index_ylo=0.5,
smhm_highm_index_yhi=0.5,
)
DEFAULT_SMHM_SCATTER = 0.2
def _get_cen_sat_percentile(x, y, cenmsk, nwin, ran_key):
n_gals = cenmsk.size
n_cens = cenmsk.sum()
n_sats = n_gals - n_cens
p_cens = sliding_conditional_percentile(x[cenmsk], y[cenmsk], nwin)
p_sats = jran.uniform(ran_key, shape=(n_sats,))
percentile = np.zeros(n_gals)
percentile[cenmsk] = p_cens
percentile[~cenmsk] = p_sats
return percentile
def mc_logsm(smhm_params, logmh, p, scatter):
median_logsm = _logsm_from_logmh(smhm_params, logmh)
logsm = norm.isf(1 - p, loc=median_logsm, scale=scatter)
return logsm
def _logsm_from_logmh(smhm_params, logmh):
"""Kernel of the three-roll SMHM mapping Mhalo ==> Mstar.
Parameters
----------
smhm_params : ndarray, shape (11, )
Parameters of the three-roll SMHM used to map Mhalo ==> Mstar,
logmh : ndarray, shape (n, )
Base-10 log of halo mass
Returns
-------
logsm : ndarray, shape (n, )
Base-10 log of stellar mass
"""
logm_crit, log_sfeff_at_logm_crit, smhm_k_logm = smhm_params[0:3]
lo_indx_pars = smhm_params[3:7]
hi_indx_pars = smhm_params[7:11]
lowm_index = _sigmoid(logmh, *lo_indx_pars)
highm_index = _sigmoid(logmh, *hi_indx_pars)
logsm_at_logm_crit = logm_crit + log_sfeff_at_logm_crit
powerlaw_index = _sigmoid(logmh, logm_crit, smhm_k_logm, lowm_index, highm_index)
return logsm_at_logm_crit + powerlaw_index * (logmh - logm_crit)
def _sigmoid(x, x0, k, ymin, ymax):
height_diff = ymax - ymin
return ymin + height_diff / (1 + np.exp(-k * (x - x0)))
|
py | 7df754ade47e4c8107c56a1c227a3c3fb5119225 | import os
import brevitas.nn as qnn
import torch
import torch.nn as nn
from pact import PACTReLU
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor
train_transform = Compose(
[
Pad(4),
RandomCrop(32, fill=128),
RandomHorizontalFlip(),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
def get_train_test_datasets(path):
if not os.path.exists(path):
os.makedirs(path)
download = True
else:
download = True if len(os.listdir(path)) < 1 else False
train_ds = datasets.CIFAR10(root=path, train=True, download=download, transform=train_transform)
test_ds = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform)
return train_ds, test_ds
def get_model(name):
__dict__ = globals()
if name in models.__dict__:
fn = models.__dict__[name]
elif name in ["resnet18_QAT_8b", "resnet18_QAT_6b", "resnet18_QAT_5b", "resnet18_QAT_4b"]:
fn = __dict__[name]
else:
raise RuntimeError("Unknown model name {}".format(name))
return fn(num_classes=10)
# Below code is taken from https://discuss.pytorch.org/t/evaluator-returns-nan/107972/3
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, weight_bit_width=8):
"""3x3 convolution with padding"""
return qnn.QuantConv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
weight_bit_width=weight_bit_width,
)
def conv1x1(in_planes, out_planes, stride=1, weight_bit_width=8):
"""1x1 convolution"""
return qnn.QuantConv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False, weight_bit_width=weight_bit_width
)
def make_PACT_relu(bit_width=8):
relu = qnn.QuantReLU(bit_width=bit_width)
relu.act_impl = PACTReLU()
return relu
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
bit_width=8,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride, weight_bit_width=bit_width)
self.bn1 = norm_layer(planes)
self.relu = make_PACT_relu(bit_width=bit_width)
self.conv2 = conv3x3(planes, planes, weight_bit_width=bit_width)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
bit_width=8,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width, weight_bit_width=bit_width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation, weight_bit_width=bit_width)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion, weight_bit_width=bit_width)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = make_PACT_relu(bit_width=bit_width)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet_QAT_Xb(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
bit_width=8,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = qnn.QuantConv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = make_PACT_relu()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], bit_width=bit_width)
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0], bit_width=bit_width
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1], bit_width=bit_width
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2], bit_width=bit_width
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d): # qnn.QuantConv2d includes nn.Conv2d inside.
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, bit_width=8):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride, weight_bit_width=bit_width),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
bit_width=bit_width,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
bit_width=bit_width,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet_QAT_Xb(block, layers, **kwargs):
model = ResNet_QAT_Xb(block, layers, **kwargs)
return model
def resnet18_QAT_8b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet18_QAT_6b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=6, **kwargs)
def resnet18_QAT_5b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=5, **kwargs)
def resnet18_QAT_4b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=4, **kwargs)
|
py | 7df7550a249408686d4b9e7e7079f5c00cbadd7d | """Auto-generated file, do not edit by hand. LI metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_LI = PhoneMetadata(id='LI', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,3}', possible_length=(3, 4)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:1[278]|44)', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:[278]|45)|4[3-57]|50|75|81[18])', example_number='1145', possible_length=(3, 4)),
short_data=True)
|
py | 7df7558cce85a9171bd30a1d8d0ead1ed13cfc59 | """
Given the root of a binary tree, return the inorder traversal of its nodes' values.
Example 1:
Input: root = [1,null,2,3]
Output: [1,3,2]
Example 2:
Input: root = []
Output: []
Example 3:
Input: root = [1]
Output: [1]
Example 4:
Input: root = [1,2]
Output: [2,1]
Example 5:
Input: root = [1,null,2]
Output: [1,2]
Constraints:
The number of nodes in the tree is in the range [0, 100].
-100 <= Node.val <= 100
Follow up:
Recursive solution is trivial, could you do it iteratively?
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def inorder(node, ls):
if node is None:
return
inorder(node.left, ls)
ls.append(node.val)
inorder(node.right, ls)
ls = []
inorder(root, ls)
return ls |
py | 7df7572ccf4bf7fb2c3a33890534852e4533ca3e | import functools
import re
from itertools import combinations
import collections
import math
def part1(input_data):
grid = {}
for j in range(len(input_data)):
for i in range(len(input_data[j])):
if input_data[j][i] == "#":
grid[(j, i, 0)] = 1
for i in range(6):
grid = iterate_grid(grid, neighbour_generator_part1)
return sum(grid.values())
def part2(input_data):
grid = {}
for j in range(len(input_data)):
for i in range(len(input_data[j])):
if input_data[j][i] == "#":
grid[(j, i, 0, 0)] = 1
for i in range(6):
grid = iterate_grid(grid, neighbour_generator_part2)
return sum(grid.values())
def iterate_grid(grid, neighbour_generator):
new_grid = dict(grid)
boundary = set()
for k, v in grid.items():
# Check only existing neighbors in the first pass and accumulate the boundary set
active_neighbors = sum(
[
grid[neighbor] if neighbor in grid else 0
for neighbor in neighbour_generator(k, grid, boundary)
]
)
new_grid[k] = 0
if v == 1:
if active_neighbors == 2 or active_neighbors == 3:
new_grid[k] = 1
else:
if active_neighbors == 3:
new_grid[k] = 1
for inactive_cell in boundary:
active_neighbors = sum(
[
grid[neighbor] if neighbor in grid else 0
for neighbor in neighbour_generator(inactive_cell, None, None)
]
)
if active_neighbors == 3:
new_grid[inactive_cell] = 1
return new_grid
def neighbour_generator_part1(cell, grid, boundary):
for k in range(-1, 2):
for j in range(-1, 2):
for i in range(-1, 2):
if i == j == k == 0:
continue
neighbor = (cell[0] + i, cell[1] + j, cell[2] + k)
if grid is not None and (neighbor not in grid or grid[neighbor] == 0):
boundary.add(neighbor)
yield neighbor
def neighbour_generator_part2(cell, grid, boundary):
for l in range(-1, 2):
for k in range(-1, 2):
for j in range(-1, 2):
for i in range(-1, 2):
if i == j == k == l == 0:
continue
neighbor = (cell[0] + i, cell[1] + j, cell[2] + k, cell[3] + l)
if grid is not None and (
neighbor not in grid or grid[neighbor] == 0
):
boundary.add(neighbor)
yield neighbor
if __name__ == "__main__":
with open("input", "r") as input_file:
input_data = list(map(lambda x: x.strip(), input_file.readlines()))
print(part1(input_data))
print(part2(input_data))
|
py | 7df75836ee916a28f4a031535dcb56b53a8daeb4 | from typing import List
from mathy_core import ExpressionParser, MathExpression
parser = ExpressionParser()
expression: MathExpression = parser.parse("4 + 2x")
nodes: List[MathExpression] = expression.to_list()
# len([4,+,2,*,x])
assert len(nodes) == 5
|
py | 7df7591eefe9ab0dabf38b0dddbb78485a3ba9c9 | # LTD simulation models / perturbances
# Attribute name case sensitive.
# Commented and empty lines are ignored during parsing.
# Double quoted variable names in model parameters also ignored
CTRLtimeScale = 60*60 # ninutes
ACEgain = 16.0
# Perturbances
mirror.sysPerturbances = [
# ramp non-gov gens
'gen 2 2 : ramp Pm 300 2700 150 rel', # 45 min ramp up
'gen 2 2 : ramp Pm 3600 2700 -150 rel', # 45 min ramp down
'gen 5 : ramp Pm 300 2700 300 rel', # 45 min ramp up
'gen 5 : ramp Pm 3600 2700 -300 rel', # 45 min ramp down
'mirror : ramp Hsys 300 2700 -20 per', # 45 min ramp down
'mirror : ramp Hsys 3600 2700 25 per', # 45 min ramp up
# ramp loads
#'load 8 : ramp P 600 2700 150 rel', # 45 min ramp up
#'load 8 : ramp P 3900 2700 -150 rel', # 45 min ramp down
#'load 9 : ramp P 600 2700 300 rel', # 45 min ramp up
#'load 9 : ramp P 3900 2700 -300 rel', # 45 min ramp down
]
mirror.NoiseAgent = ltd.perturbance.LoadNoiseAgent(mirror, 0.03, True, 5) # mirror, percent noise, walk, delay@ start
# Definite Time Controller Definitions
mirror.DTCdict = {
'bus8caps' : {
'RefAgents' : {
'ra1' : 'bus 8 : Vm',
'ra2' : 'branch 8 9 1 : Qbr', # branches defined from, to, ckID
},# end Referenc Agents
'TarAgents' : {
'tar1' : 'shunt 8 2 : St',
'tar2' : 'shunt 8 3 : St',
'tar3' : 'shunt 8 4 : St',
'tar4' : 'shunt 8 5 : St',
'tar5' : 'shunt 8 6 : St',
}, # end Target Agents
'Timers' : {
'set' :{ # set shunts
'logic' : "(ra1 < 1.0)",
'actTime' : 30, # seconds of true logic before act
'act' : "anyOFFTar = 1", # set any target off target = 1
},# end set
'reset' :{ # reset shunts
'logic' : "(ra1 > 1.04)",
'actTime' : 30, # seconds of true logic before act
'act' : "anyONTar = 0", # set any target On target = 0
},# end reset
'hold' : 60, # minimum time between actions
}, # end timers
},# end bus8caps
'bus9caps' : {
'RefAgents' : {
'ra1' : 'bus 9 : Vm',
'ra2' : 'branch 8 9 1 : Qbr', # branches defined from, to, ckID
},# end Referenc Agents
'TarAgents' : {
'tar1' : 'shunt 9 2 : St',
'tar2' : 'shunt 9 3 : St',
'tar3' : 'shunt 9 4 : St',
'tar4' : 'shunt 9 5 : St',
'tar5' : 'shunt 9 6 : St',
}, # end Target Agents
'Timers' : {
'set' :{ # set shunts
'logic' : "(ra1 < 1.0)",
'actTime' : 80, # seconds of true logic before act
'act' : "anyOFFTar = 1", # set any target off target = 1
},# end set
'reset' :{ # reset shunts
'logic' : "(ra1 > 1.04)",
'actTime' : 80, # seconds of true logic before act
'act' : "anyONTar = 0", # set any target On target = 0
},# end reset
'hold' : 120, # minimum time between actions
}, # end timers
},# end bus9caps
}# end DTCdict
# Balancing Authorities
mirror.sysBA = {
'BA1':{
'Area':1,
'B': "1.0 : perload", # MW/0.1 Hz
'AGCActionTime': 300, # seconds
'ACEgain' : ACEgain,
'AGCType':'TLB : 0', # Tie-Line Bias
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 30, # seconds - size of window - 0 for non window
'IACEscale' : 1/15,
'IACEdeadband' : 0, # Hz
'ACEFiltering': 'PI : 0.04 0.0001',
'AGCDeadband' : None, # MW? -> not implemented
'GovDeadbandType' : 'ramp', # step, None, ramp, nldroop
'GovDeadband' : .036, # Hz
'GovAlpha' : 0.016, # Hz - for nldroop
'GovBeta' : 0.036, # Hz - for nldroop
'CtrlGens': ['gen 1 : 0.5 : rampA',
'gen 2 1 : 0.5 : rampA',
]
},
'BA2':{
'Area':2,
'B': "1.0 : perload", # MW/0.1 Hz
'AGCActionTime': 300.00, # seconds
'ACEgain' : ACEgain,
'AGCType':'TLB : 0', # Tie-Line Bias
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 30, # seconds - size of window - 0 for non window
'IACEscale' : 1/15,
'IACEdeadband' : 0, # Hz
'ACEFiltering': 'PI : 0.04 0.0001',
'AGCDeadband' : None, # MW? -> not implemented
'GovDeadbandType' : 'ramp', # step, None, ramp, nldroop
'GovDeadband' : .036, # Hz
'GovAlpha' : 0.016, # Hz - for nldroop
'GovBeta' : 0.036, # Hz - for nldroop
'CtrlGens': ['gen 3 : 0.6 : rampA',
'gen 4 : .4 : rampA',
]
},
}
# Load and Generation Cycle Agents
"""
mirror.sysGenerationControl = {
'BPATDispatch' : {
'Area': 1,
'startTime' : 2,
'timeScale' : CTRLtimeScale,
'rampType' : 'per', # relative percent change
'CtrlGens': [
"gen 1 : 0.25",
"gen 2 1 : 0.75",
],
# Data from: 12/11/2019 PACE
'forcast' : [
#(time , Precent change from previous value)
(0, 0.0),
(1, 5.8),
(2, 8.8),
(3, 9.9),
(4, 4.0),
],
}, #end of generation controller def
'CAISODispatch' : {
'Area': 2,
'startTime' : 2,
'timeScale' : CTRLtimeScale,
'rampType' : 'per', # relative percent change
'CtrlGens': [
"gen 4 : 1.0",
],
# Data from: 12/11/2019 PACE
'forcast' : [
#(time , Precent change from previous value)
(0, 0.0),
(1, 0.7),
(2, 7.5),
(3, 11.2),
(4, 4.4),
],
}, #end of generation controller def
}
mirror.sysLoadControl = {
'BPATDemand' : {
'Area': 1,
'startTime' : 2,
'timeScale' : CTRLtimeScale,
'rampType' : 'per', # relative percent change
# Data from: 12/11/2019 BPAT
'demand' : [
#(time , Precent change from previous value)
(0, 0.000),
(1, 3.2),
(2, 8.2),
(3, 9.3),
(4, 3.8),
] ,
}, # end of demand agent def
'CAISODemand' : {
'Area': 2,
'startTime' : 2,
'timeScale' : CTRLtimeScale,
'rampType' : 'per', # relative percent change
# Data from: 12/11/2019 CAISO
'demand' : [
#(time , Precent change from previous value)
(0, 0.000),
(1, 3.0),
(2, 7.0),
(3, 10.5),
(4, 4.4),
] ,
},# end of demand load control definition
}# end of loac control definitions
""" |
py | 7df7592ecce2afaa43dac557a1e9fc40186ea8a9 | from datetime import datetime
from math import ceil
from typing import Dict, Optional
from altair import Chart
import pandas as pd
import numpy as np
from .constants import DATE_FORMAT
from .parameters import Parameters
def build_admits_chart(
*, alt, admits_floor_df: pd.DataFrame, max_y_axis: Optional[int] = None
) -> Chart:
"""Build admits chart."""
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
x = dict(shorthand="date:T", title="Date", axis=alt.Axis(format=(DATE_FORMAT)))
y = dict(shorthand="value:Q", title="Daily admissions", scale=y_scale)
color = "key:N"
tooltip = ["date:T", alt.Tooltip("value:Q", format=".0f", title="Admit"), "key:N"]
# TODO fix the fold to allow any number of dispositions
points = (
alt.Chart()
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)
.mark_line(point=True)
.encode(
x=alt.X(**x),
y=alt.Y(**y),
color=color,
tooltip=tooltip,
)
)
bar = (
alt.Chart()
.encode(x=alt.X(**x))
.transform_filter(alt.datum.day == 0)
.mark_rule(color="black", opacity=0.35, size=2)
)
return (
alt.layer(points, bar, data=admits_floor_df)
.configure_legend(orient="bottom")
.interactive()
)
def build_census_chart(
*, alt, census_floor_df: pd.DataFrame, max_y_axis: Optional[int] = None
) -> Chart:
"""Build census chart."""
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
x = dict(shorthand="date:T", title="Date", axis=alt.Axis(format=(DATE_FORMAT)))
y = dict(shorthand="value:Q", title="Census", scale=y_scale)
color = "key:N"
tooltip = ["date:T", alt.Tooltip("value:Q", format=".0f", title="Census"), "key:N"]
# TODO fix the fold to allow any number of dispositions
points = (
alt.Chart()
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)
.mark_line(point=True)
.encode(
x=alt.X(**x),
y=alt.Y(**y),
color=color,
tooltip=tooltip,
)
)
bar = (
alt.Chart()
.encode(x=alt.X(**x))
.transform_filter(alt.datum.day == 0)
.mark_rule(color="black", opacity=0.35, size=2)
)
return (
alt.layer(points, bar, data=census_floor_df)
.configure_legend(orient="bottom")
.interactive()
)
def build_census_chart_with_real(
*, alt, census_floor_df: pd.DataFrame, max_y_axis: Optional[int] = None
) -> Chart:
"""Build census chart."""
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
x = dict(shorthand="date:T", title="Date", axis=alt.Axis(format=(DATE_FORMAT)))
y = dict(shorthand="value:Q", title="Census", scale=y_scale)
color = "key:N"
tooltip = ["date:T", alt.Tooltip("value:Q", format=".0f", title="Census"), "key:N"]
# TODO fix the fold to allow any number of dispositions
points = (
alt.Chart()
.transform_fold(fold=["hospitalized", "icu", "hosp_reel", "icu_reel"])
.encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)
.mark_line(point=True)
.encode(
x=alt.X(**x),
y=alt.Y(**y),
color=color,
tooltip=tooltip,
)
)
bar = (
alt.Chart()
.encode(x=alt.X(**x))
.transform_filter(alt.datum.day == 0)
.mark_rule(color="black", opacity=0.35, size=2)
)
return (
alt.layer(points, bar, data=census_floor_df)
.configure_legend(orient="bottom")
.interactive()
)
def build_sim_sir_w_date_chart(
*, alt, sim_sir_w_date_floor_df: pd.DataFrame, max_y_axis: Optional[int] = None
) -> Chart:
"""Build sim sir w date chart."""
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
x = dict(shorthand="date:T", title="Date", axis=alt.Axis(format=(DATE_FORMAT)))
y = dict(shorthand="value:Q", title="Count", scale=y_scale)
color = "key:N"
tooltip = ["key:N", "value:Q"]
# TODO fix the fold to allow any number of dispositions
points = (
alt.Chart()
.transform_fold(fold=["susceptible", "infected", "recovered"])
.encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)
.mark_line()
.encode(
x=alt.X(**x),
y=alt.Y(**y),
color=color,
tooltip=tooltip,
)
)
bar = (
alt.Chart()
.encode(x=alt.X(**x))
.transform_filter(alt.datum.day == 0)
.mark_rule(color="black", opacity=0.35, size=2)
)
return (
alt.layer(points, bar, data=sim_sir_w_date_floor_df)
.configure_legend(orient="bottom")
.interactive()
)
def build_descriptions(
*, chart: Chart, labels: Dict[str, str], suffix: str = ""
) -> str:
"""
:param chart: The alt chart to be used in finding max points
:param suffix: The assumption is that the charts have similar column names.
The census chart adds " Census" to the column names.
Make sure to include a space or underscore as appropriate
:return: Returns a multi-line string description of the results
"""
messages = []
cols = ["hospitalized", "icu", "ventilated"]
asterisk = False
day = "date" if "date" in chart.data.columns else "day"
for col in cols:
if chart.data[col].idxmax() + 1 == len(chart.data):
asterisk = True
# todo: bring this to an optional arg / i18n
on = datetime.strftime(chart.data[day][chart.data[col].idxmax()], "%b %d")
messages.append(
"{}{} peaks at {:,} on {}{}".format(
labels[col],
suffix,
ceil(chart.data[col].max()),
on,
"*" if asterisk else "",
)
)
if asterisk:
messages.append(
"_* The max is at the upper bound of the data, and therefore may not be the actual max_"
)
return "\n\n".join(messages)
def build_table(
*, df: pd.DataFrame, labels: Dict[str, str], modulo: int = 1
) -> pd.DataFrame:
table_df = df[np.mod(df.day, modulo) == 0].copy()
table_df.date = table_df.date.dt.strftime(DATE_FORMAT)
table_df.rename(labels)
return table_df
|
py | 7df75a35513ae1e4a595378a40d1b4fa6f9d0bcc | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Payment(models.Model):
created = models.DateTimeField(auto_now_add=True)
amount = models.DecimalField(max_digits=16, decimal_places=2)
payer = models.ForeignKey(User, related_name='payments', null=True)
class Meta:
ordering = ('created',)
|
py | 7df75aa4524bb4f5a708857ab0d660fb8ccedfb8 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
convolve_grayscale_padding = __import__(
'2-convolve_grayscale_padding').convolve_grayscale_padding
if __name__ == '__main__':
dataset = np.load('../../supervised_learning/data/MNIST.npz')
images = dataset['X_train']
print(images.shape)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
images_conv = convolve_grayscale_padding(images, kernel, (2, 4))
print(images_conv.shape)
plt.imshow(images[0], cmap='gray')
plt.show()
plt.imshow(images_conv[0], cmap='gray')
plt.show()
|
py | 7df75c2f822f0f4f7eabad59479b918191960baa | """Multi-layer Perceptron
"""
# Authors: Issam H. Laradji <[email protected]>
# Andreas Mueller
# Jiyuan Qian
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
import warnings
import scipy.optimize
from ..base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
)
from ..base import is_classifier
from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from ._stochastic_optimizers import SGDOptimizer, AdamOptimizer
from ..model_selection import train_test_split
from ..preprocessing import LabelBinarizer
from ..utils import gen_batches, check_random_state
from ..utils import shuffle
from ..utils import _safe_indexing
from ..utils import column_or_1d
from ..exceptions import ConvergenceWarning
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.multiclass import _check_partial_fit_first_call, unique_labels
from ..utils.multiclass import type_of_target
from ..utils.optimize import _check_optimize_result
from ..utils.metaestimators import available_if
_STOCHASTIC_SOLVERS = ["sgd", "adam"]
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(BaseEstimator, metaclass=ABCMeta):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
.. versionadded:: 0.18
"""
@abstractmethod
def __init__(
self,
hidden_layer_sizes,
activation,
solver,
alpha,
batch_size,
learning_rate,
learning_rate_init,
power_t,
max_iter,
loss,
shuffle,
random_state,
tol,
verbose,
warm_start,
momentum,
nesterovs_momentum,
early_stopping,
validation_fraction,
beta_1,
beta_2,
epsilon,
n_iter_no_change,
max_fun,
):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.n_iter_no_change = n_iter_no_change
self.max_fun = max_fun
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
hidden_activation(activations[i + 1])
# For the last layer
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activations[i + 1])
return activations
def _forward_pass_fast(self, X):
"""Predict using the trained model
This is the same as _forward_pass but does not record the activations
of all layers and only returns the last layer's activation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False)
# Initialize first layer
activation = X
# Forward propagate
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
return activation
def _compute_loss_grad(
self, layer, n_samples, activations, deltas, coef_grads, intercept_grads
):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])
coef_grads[layer] += self.alpha * self.coefs_[layer]
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
def _loss_grad_lbfgs(
self, packed_coef_inter, X, y, activations, deltas, coef_grads, intercept_grads
):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
----------
packed_coef_inter : ndarray
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, activations, deltas, coef_grads, intercept_grads
)
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss_func_name = self.loss
if loss_func_name == "log_loss" and self.out_activation_ == "logistic":
loss_func_name = "binary_log_loss"
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1])
# Add L2 regularization term to loss
values = 0
for s in self.coefs_:
s = s.ravel()
values += np.dot(s, s)
loss += (0.5 * self.alpha) * values / n_samples
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] here works with following
# combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
# Compute gradient for the last layer
self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads
)
inplace_derivative = DERIVATIVES[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative(activations[i], deltas[i - 1])
self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads, intercept_grads
)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units, dtype):
# set all attributes, allocate weights etc for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not is_classifier(self):
self.out_activation_ = "identity"
# Output for multi class
elif self._label_binarizer.y_type_ == "multiclass":
self.out_activation_ = "softmax"
# Output for binary class and multi-label
else:
self.out_activation_ = "logistic"
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
coef_init, intercept_init = self._init_coef(
layer_units[i], layer_units[i + 1], dtype
)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if self.solver in _STOCHASTIC_SOLVERS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
else:
self.best_loss_ = np.inf
def _init_coef(self, fan_in, fan_out, dtype):
# Use the initialization method recommended by
# Glorot et al.
factor = 6.0
if self.activation == "logistic":
factor = 2.0
init_bound = np.sqrt(factor / (fan_in + fan_out))
# Generate weights and bias:
coef_init = self._random_state.uniform(
-init_bound, init_bound, (fan_in, fan_out)
)
intercept_init = self._random_state.uniform(-init_bound, init_bound, fan_out)
coef_init = coef_init.astype(dtype, copy=False)
intercept_init = intercept_init.astype(dtype, copy=False)
return coef_init, intercept_init
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
self._validate_hyperparameters()
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError(
"hidden_layer_sizes must be > 0, got %s." % hidden_layer_sizes
)
first_pass = not hasattr(self, "coefs_") or (
not self.warm_start and not incremental
)
X, y = self._validate_input(X, y, incremental, reset=first_pass)
n_samples, n_features = X.shape
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = [n_features] + hidden_layer_sizes + [self.n_outputs_]
# check random state
self._random_state = check_random_state(self.random_state)
if first_pass:
# First time training the model
self._initialize(y, layer_units, X.dtype)
# Initialize lists
activations = [X] + [None] * (len(layer_units) - 1)
deltas = [None] * (len(activations) - 1)
coef_grads = [
np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype)
for n_fan_in_, n_fan_out_ in zip(layer_units[:-1], layer_units[1:])
]
intercept_grads = [
np.empty(n_fan_out_, dtype=X.dtype) for n_fan_out_ in layer_units[1:]
]
# Run the Stochastic optimization solver
if self.solver in _STOCHASTIC_SOLVERS:
self._fit_stochastic(
X,
y,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
incremental,
)
# Run the LBFGS solver
elif self.solver == "lbfgs":
self._fit_lbfgs(
X, y, activations, deltas, coef_grads, intercept_grads, layer_units
)
return self
def _validate_hyperparameters(self):
if not isinstance(self.shuffle, bool):
raise ValueError(
"shuffle must be either True or False, got %s." % self.shuffle
)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.max_fun <= 0:
raise ValueError("max_fun must be > 0, got %s." % self.max_fun)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (
self.learning_rate in ["constant", "invscaling", "adaptive"]
and self.learning_rate_init <= 0.0
):
raise ValueError(
"learning_rate_init must be > 0, got %s." % self.learning_rate
)
if self.momentum > 1 or self.momentum < 0:
raise ValueError("momentum must be >= 0 and <= 1, got %s" % self.momentum)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError(
"nesterovs_momentum must be either True or False, got %s."
% self.nesterovs_momentum
)
if not isinstance(self.early_stopping, bool):
raise ValueError(
"early_stopping must be either True or False, got %s."
% self.early_stopping
)
if self.validation_fraction < 0 or self.validation_fraction >= 1:
raise ValueError(
"validation_fraction must be >= 0 and < 1, got %s"
% self.validation_fraction
)
if self.beta_1 < 0 or self.beta_1 >= 1:
raise ValueError("beta_1 must be >= 0 and < 1, got %s" % self.beta_1)
if self.beta_2 < 0 or self.beta_2 >= 1:
raise ValueError("beta_2 must be >= 0 and < 1, got %s" % self.beta_2)
if self.epsilon <= 0.0:
raise ValueError("epsilon must be > 0, got %s." % self.epsilon)
if self.n_iter_no_change <= 0:
raise ValueError(
"n_iter_no_change must be > 0, got %s." % self.n_iter_no_change
)
# raise ValueError if not registered
if self.activation not in ACTIVATIONS:
raise ValueError(
"The activation '%s' is not supported. Supported activations are %s."
% (self.activation, list(sorted(ACTIVATIONS)))
)
if self.learning_rate not in ["constant", "invscaling", "adaptive"]:
raise ValueError("learning rate %s is not supported. " % self.learning_rate)
supported_solvers = _STOCHASTIC_SOLVERS + ["lbfgs"]
if self.solver not in supported_solvers:
raise ValueError(
"The solver %s is not supported. Expected one of: %s"
% (self.solver, ", ".join(supported_solvers))
)
def _fit_lbfgs(
self, X, y, activations, deltas, coef_grads, intercept_grads, layer_units
):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_, self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
opt_res = scipy.optimize.minimize(
self._loss_grad_lbfgs,
packed_coef_inter,
method="L-BFGS-B",
jac=True,
options={
"maxfun": self.max_fun,
"maxiter": self.max_iter,
"iprint": iprint,
"gtol": self.tol,
},
args=(X, y, activations, deltas, coef_grads, intercept_grads),
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.loss_ = opt_res.fun
self._unpack(opt_res.x)
def _fit_stochastic(
self,
X,
y,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
incremental,
):
params = self.coefs_ + self.intercepts_
if not incremental or not hasattr(self, "_optimizer"):
if self.solver == "sgd":
self._optimizer = SGDOptimizer(
params,
self.learning_rate_init,
self.learning_rate,
self.momentum,
self.nesterovs_momentum,
self.power_t,
)
elif self.solver == "adam":
self._optimizer = AdamOptimizer(
params,
self.learning_rate_init,
self.beta_1,
self.beta_2,
self.epsilon,
)
# early_stopping in partial_fit doesn't make sense
early_stopping = self.early_stopping and not incremental
if early_stopping:
# don't stratify in multilabel classification
should_stratify = is_classifier(self) and self.n_outputs_ == 1
stratify = y if should_stratify else None
X, X_val, y, y_val = train_test_split(
X,
y,
random_state=self._random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
if is_classifier(self):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
sample_idx = np.arange(n_samples, dtype=int)
if self.batch_size == "auto":
batch_size = min(200, n_samples)
else:
if self.batch_size < 1 or self.batch_size > n_samples:
warnings.warn(
"Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped"
)
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
for it in range(self.max_iter):
if self.shuffle:
# Only shuffle the sample indices instead of X and y to
# reduce the memory footprint. These indices will be used
# to slice the X and y.
sample_idx = shuffle(sample_idx, random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
if self.shuffle:
X_batch = _safe_indexing(X, sample_idx[batch_slice])
y_batch = y[sample_idx[batch_slice]]
else:
X_batch = X[batch_slice]
y_batch = y[batch_slice]
activations[0] = X_batch
batch_loss, coef_grads, intercept_grads = self._backprop(
X_batch,
y_batch,
activations,
deltas,
coef_grads,
intercept_grads,
)
accumulated_loss += batch_loss * (
batch_slice.stop - batch_slice.start
)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(params, grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_, self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(early_stopping, X_val, y_val)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > self.n_iter_no_change:
# not better than last `n_iter_no_change` iterations by tol
# stop or decrease learning rate
if early_stopping:
msg = (
"Validation score did not improve more than "
"tol=%f for %d consecutive epochs."
% (self.tol, self.n_iter_no_change)
)
else:
msg = (
"Training loss did not improve more than tol=%f"
" for %d consecutive epochs."
% (self.tol, self.n_iter_no_change)
)
is_stopping = self._optimizer.trigger_stopping(msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn(
"Stochastic Optimizer: Maximum iterations (%d) "
"reached and the optimization hasn't converged yet."
% self.max_iter,
ConvergenceWarning,
)
except KeyboardInterrupt:
warnings.warn("Training interrupted by user.")
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
# compute validation score, use that for stopping
self.validation_scores_.append(self.score(X_val, y_val))
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ + self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy() for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
def fit(self, X, y):
"""Fit the model to data matrix X and target(s) y.
Parameters
----------
X : ndarray or sparse matrix of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns a trained MLP model.
"""
return self._fit(X, y, incremental=False)
def _check_solver(self):
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError(
"partial_fit is only available for stochastic"
" optimizers. %s is not stochastic."
% self.solver
)
return True
@available_if(_check_solver)
def partial_fit(self, X, y):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
Returns
-------
self : object
Trained MLP model.
"""
return self._fit(X, y, incremental=True)
class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic
gradient descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
by Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
L2 penalty (regularization term) parameter.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`.
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate at each
time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when ``solver='sgd'``.
learning_rate_init : double, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for
``n_iter_no_change`` consecutive epochs. The split is stratified,
except in a multilabel setting.
If early stopping is False, then the training stops when the training
loss does not improve by more than tol for n_iter_no_change consecutive
passes over the training set.
Only effective when solver='sgd' or 'adam'.
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'.
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'.
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of loss function calls.
The solver iterates until convergence (determined by 'tol'), number
of iterations reaches max_iter, or this number of loss function calls.
Note that number of loss function calls will be greater than or equal
to the number of iterations for the `MLPClassifier`.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray or list of ndarray of shape (n_classes,)
Class labels for each output.
loss_ : float
The current loss computed with the loss function.
best_loss_ : float
The minimum loss reached by the solver throughout fitting.
loss_curve_ : list of shape (`n_iter_`,)
The ith element in the list represents the loss at the ith iteration.
t_ : int
The number of training samples seen by the solver during fitting.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
The number of iterations the solver has run.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
See Also
--------
MLPRegressor : Multi-layer Perceptron regressor.
BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
Examples
--------
>>> from sklearn.neural_network import MLPClassifier
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(n_samples=100, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
... random_state=1)
>>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
>>> clf.predict_proba(X_test[:1])
array([[0.038..., 0.961...]])
>>> clf.predict(X_test[:5, :])
array([1, 0, 1, 0, 1])
>>> clf.score(X_test, y_test)
0.8...
"""
def __init__(
self,
hidden_layer_sizes=(100,),
activation="relu",
*,
solver="adam",
alpha=0.0001,
batch_size="auto",
learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5,
max_iter=200,
shuffle=True,
random_state=None,
tol=1e-4,
verbose=False,
warm_start=False,
momentum=0.9,
nesterovs_momentum=True,
early_stopping=False,
validation_fraction=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
n_iter_no_change=10,
max_fun=15000,
):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
loss="log_loss",
shuffle=shuffle,
random_state=random_state,
tol=tol,
verbose=verbose,
warm_start=warm_start,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
max_fun=max_fun,
)
def _validate_input(self, X, y, incremental, reset):
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc"],
multi_output=True,
dtype=(np.float64, np.float32),
reset=reset,
)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
# Matrix of actions to be taken under the possible combinations:
# The case that incremental == True and classes_ not defined is
# already checked by _check_partial_fit_first_call that is called
# in _partial_fit below.
# The cases are already grouped into the respective if blocks below.
#
# incremental warm_start classes_ def action
# 0 0 0 define classes_
# 0 1 0 define classes_
# 0 0 1 redefine classes_
#
# 0 1 1 check compat warm_start
# 1 1 1 check compat warm_start
#
# 1 0 1 check compat last fit
#
# Note the reliance on short-circuiting here, so that the second
# or part implies that classes_ is defined.
if (not hasattr(self, "classes_")) or (not self.warm_start and not incremental):
self._label_binarizer = LabelBinarizer()
self._label_binarizer.fit(y)
self.classes_ = self._label_binarizer.classes_
else:
classes = unique_labels(y)
if self.warm_start:
if set(classes) != set(self.classes_):
raise ValueError(
"warm_start can only be used where `y` has the same "
"classes as in the previous call to fit. Previously "
f"got {self.classes_}, `y` has {classes}"
)
elif len(np.setdiff1d(classes, self.classes_, assume_unique=True)):
raise ValueError(
"`y` has classes not in `self.classes_`. "
f"`self.classes_` has {self.classes_}. 'y' has {classes}."
)
# This downcast to bool is to prevent upcasting when working with
# float32 data
y = self._label_binarizer.transform(y).astype(bool)
return X, y
def predict(self, X):
"""Predict using the multi-layer perceptron classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
return self._label_binarizer.inverse_transform(y_pred)
@available_if(lambda est: est._check_solver())
def partial_fit(self, X, y, classes=None):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : array-like of shape (n_samples,)
The target values.
classes : array of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Trained MLP model.
"""
if _check_partial_fit_first_call(self, classes):
self._label_binarizer = LabelBinarizer()
if type_of_target(y).startswith("multilabel"):
self._label_binarizer.fit(y)
else:
self._label_binarizer.fit(classes)
super().partial_fit(X, y)
return self
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : ndarray of shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to `log(predict_proba(X))`.
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
def _more_tags(self):
return {"multilabel": True}
class MLPRegressor(RegressorMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron regressor.
This model optimizes the squared error using LBFGS or stochastic gradient
descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed by
Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
L2 penalty (regularization term) parameter.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`.
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when solver='sgd'.
learning_rate_init : double, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least ``tol`` for
``n_iter_no_change`` consecutive epochs.
Only effective when solver='sgd' or 'adam'.
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'.
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'.
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of function calls.
The solver iterates until convergence (determined by 'tol'), number
of iterations reaches max_iter, or this number of function calls.
Note that number of function calls will be greater than or equal to
the number of iterations for the MLPRegressor.
.. versionadded:: 0.22
Attributes
----------
loss_ : float
The current loss computed with the loss function.
best_loss_ : float
The minimum loss reached by the solver throughout fitting.
loss_curve_ : list of shape (`n_iter_`,)
Loss value evaluated at the end of each training step.
The ith element in the list represents the loss at the ith iteration.
t_ : int
The number of training samples seen by the solver during fitting.
Mathematically equals `n_iters * X.shape[0]`, it means
`time_step` and it is used by optimizer's learning rate scheduler.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
The number of iterations the solver has run.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
See Also
--------
BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
MLPClassifier : Multi-layer Perceptron classifier.
sklearn.linear_model.SGDRegressor : Linear model fitted by minimizing
a regularized empirical loss with SGD.
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
Examples
--------
>>> from sklearn.neural_network import MLPRegressor
>>> from sklearn.datasets import make_regression
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_regression(n_samples=200, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=1)
>>> regr = MLPRegressor(random_state=1, max_iter=500).fit(X_train, y_train)
>>> regr.predict(X_test[:2])
array([-0.9..., -7.1...])
>>> regr.score(X_test, y_test)
0.4...
"""
def __init__(
self,
hidden_layer_sizes=(100,),
activation="relu",
*,
solver="adam",
alpha=0.0001,
batch_size="auto",
learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5,
max_iter=200,
shuffle=True,
random_state=None,
tol=1e-4,
verbose=False,
warm_start=False,
momentum=0.9,
nesterovs_momentum=True,
early_stopping=False,
validation_fraction=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
n_iter_no_change=10,
max_fun=15000,
):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
loss="squared_error",
shuffle=shuffle,
random_state=random_state,
tol=tol,
verbose=verbose,
warm_start=warm_start,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
max_fun=max_fun,
)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray of shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _validate_input(self, X, y, incremental, reset):
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc"],
multi_output=True,
y_numeric=True,
dtype=(np.float64, np.float32),
reset=reset,
)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
|
py | 7df75cf064dd5424e810595fadc3f5e9f8cb6c30 | #在python3中训练
import argparse
import os
import time
import pickle
import sys
print(sys.path)
sys.path.append('/home/wgk/code/PointNetGPD/PointNetGPD/model')
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
#from tensorboardX import SummaryWriter
#现在pytorch已经集成了tensorboard了
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR
##################/home/wgk/code/PointNetGPD/PointNetGPD/model 包###########################
#需要安装pcl
from model.dataset import *
#pointnet的具体网络结构
from model.pointnet import PointNetCls, DualPointNetCls
import argparse
#创建一个argparse模块中的ArgumentParser类对象
# description只是解释一下,这个类对象是做什么的
parser = argparse.ArgumentParser(description='pointnetGPD')
#添加一个参数 tag 这个可以自定义,主要用于区分指定训练结果的存放文件夹等
parser.add_argument('--tag', type=str, default='default')
#默认epoch训练的轮数,大小为200
parser.add_argument('--epoch', type=int, default=200)
#添加模式参数,指定从train或者test中选择,且是必选项
parser.add_argument('--mode', choices=['train', 'test'], required=True)
#设定batch-size,默认是1
parser.add_argument('--batch-size', type=int, default=1)
#意思是,当命令中出现 --cuda的时候,就把 object.cuda的值设置为真
parser.add_argument('--cuda', action='store_true')
#添加参数 选择使用的gpu编号,默认使用0号gpu
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--load-model', type=str, default='')
parser.add_argument('--load-epoch', type=int, default=-1)
parser.add_argument('--model-path', type=str, default='./assets/learned_models',
help='pre-trained model path')
parser.add_argument('--data-path', type=str, default='./data', help='data path')
#设置每隔多少次迭代,就打印出来一次loss还有训练进度
parser.add_argument('--log-interval', type=int, default=10)
#保存间隔,设置每隔几个epoch就保存一次模型,比如epoch=10就是训练10轮之后就保存当前的训练模型
parser.add_argument('--save-interval', type=int, default=1)
#此时,使用对象parser中的解析函数.parse_args(),读取命令行给出的参数,集合成namspace 返回给args
#例子: python main_1v.py --epoch 200 --mode train --batch-size 3
args = parser.parse_args()
#查看是否安装好了cuda
args.cuda = args.cuda if torch.cuda.is_available else False
#为当前的gpu设置随机种子;这样在以后运行该程序的时候,随机数都是相同的,不会每次运行都变化一次
#作用主要是为了固定随机初始化的权重值,这样就可以在每次重新从头训练网络的时候,权重值虽然是随机的
#但是都是固定的,不会每次都在变化
#其中的seed值,可以随便写
"""
https://blog.csdn.net/weixin_43002433/article/details/104706950?utm_
medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-1
.add_param_isCf&depth_1-utm_source=distribute.pc_relevant.none-task-blog-Blog
CommendFromMachineLearnPai2-1.add_param_isCf
"""
if args.cuda:
torch.cuda.manual_seed(1)
#获取当前文件所在目录(文件夹)的绝对路径
path=os.path.dirname(os.path.abspath(__file__))
#更改(确保)当前所在目录是工作目录
os.chdir(path)
print(os.getcwd())
#获取当前的时间,年月日 时间
current_time = time.strftime("%Y-%m-%dT%H:%M", time.localtime())
logger = SummaryWriter(os.path.join('./assets/log/', current_time))
#设置numpy的随机数种子,但是注意,这里的随机数种子是随着系统时间变化的,因此每次运行出现的随机数都是不同的
np.random.seed(int(time.time()))
"""
使用单线程进行数据集导入时候,有时候比较慢,会阻碍到计算的过程,于是考虑用多个线程进行数据的导入
如果参数num_workers设置的大于1,就是指定了多个线程,于是考虑使用多个线程导入数据,防止影响计算
每个线程叫一个"worker",这个 worker_init_fn就是定义每个线程worker初始化的时候,需要执行哪些操作
其中,pid就是子线程的线程号,直接写成这样就行
"""
def worker_init_fn(pid):
"""
为后台工作进程设置唯一但具有确定性的随机种子,只需要对numpy设置种子就行了,
pytorch和python的随机数生成器会自己管理自己的子线程?
设置torch.initial_seed() % (2**31-1)取余数,是因为numpy的种子范围是0到2**31-1
"""
np.random.seed(torch.initial_seed() % (2**31-1))
"""
将样本采样器返回的list中对应的样构建成一个minibatch的tensor,自定义的tensor的话
对于map型的数据集,且有多个读取进程的情况,采样器会先将样本集分成一个个batch,返回每个batch的样本的索引的 list,
再根据my_collate函数,说明如何把这些索引指定的样本整合成一个data Tensor和lable Tensor
"""
def my_collate(batch):
batch = list(filter(lambda x:x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
#
grasp_points_num=750
thresh_good=0.6
thresh_bad=0.6
#设置点是只有xyz ?
point_channel=3
"""
数据加载器,主要实现数据加载到网络中的相关作用,核心类是torch.utils.data.DataLoader
DataLoader(dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None)
dataset:加载的数据集,继承自torch.utils.data.Dataset类的自定义类的对象,或者就是torch.utils.data.Dataset类的对象
batch_size:batch size,就是batchs ize
shuffle::是否将数据打乱
sampler: 样本抽样,后续会详细介绍
batch_sampler,从注释可以看出,其和batch_size、shuffle等参数是互斥的,一般采用默认
num_workers:使用多进程加载的进程数,0代表不使用多进程
collate_fn: 如何将多个样本数据拼接成一个batch,一般使用默认的拼接方式即可
pin_memory:是否将数据保存在pin memory区,pin memory中的数据转到GPU会快一些
drop_last:dataset中的数据个数可能不是batch_size的整数倍,drop_last为True会将多出来不足一个batch的数据丢弃
"""
train_loader = torch.utils.data.DataLoader(
#设置dataset,传入的是继承了torch.utils.data.Dataset类的子类的实例对象 class PointGraspOneViewDataset(torch.utils.data.Dataset):
PointGraspOneViewDataset(
#设置夹爪内部的点数量最少要有多少个
grasp_points_num=grasp_points_num,
path=args.data_path,
prefix='panda',
tag='train',
grasp_amount_per_file=16800, #每个物体已经生成的抓取点云个数,140×120 (单物体的生成140个不同抓取姿态×单物体共有120个不同视角点云)
thresh_good=thresh_good,
thresh_bad=thresh_bad,
),
#设置batch_size
batch_size=args.batch_size,
#设置使用多少个子线程来导入数据,如果设置为0,那就是直接把数据导入到主线程中
num_workers=32,
#如果设置为True,就使得Tensor数据最开始存放于内存的锁页内存中,这样将内存Tensor转移到GPU显存就会快一些
# 当计算机内存充足的时候,选择True,如果不充足,可能使用到虚拟内存的时候,就写为False
pin_memory=True,
#如果设置为True,会默认构建一个乱序采样器(为False时,构建顺序采样器);每次epoch之后都会把数据集打乱
shuffle=True,
#
worker_init_fn=worker_init_fn,
#collate_fn函数,将sampler返回的数据list合并成为一个整体的tensor,作为一个mini-batch
collate_fn=my_collate,
#设置,如何处理训练到最后,数据集长度不足一个batch_size时的数据,True就抛弃,否则保留
drop_last=True,
)
"""
测试时候用的数据集加载器;这里的test数据集是用来测试训练好的网络的准确率
"""
test_loader = torch.utils.data.DataLoader(
PointGraspOneViewDataset(
grasp_points_num=grasp_points_num,
path=args.data_path,
prefix='panda',
#设置标签为test
tag='test',
grasp_amount_per_file=500, #
thresh_good=thresh_good,
thresh_bad=thresh_bad,
with_obj=True,
),
batch_size=args.batch_size,
num_workers=32,
pin_memory=True,
shuffle=True,
worker_init_fn=worker_init_fn,
collate_fn=my_collate,
)
is_resume = 0
if args.load_model and args.load_epoch != -1:
is_resume = 1
#如果是测试模式
if is_resume or args.mode == 'test':
#加载网络结构和参数,加载到命令行 指定的gpu中
model = torch.load(args.load_model, map_location='cuda:{}'.format(args.gpu))
model.device_ids = [args.gpu]
print('load model {}'.format(args.load_model))
#如果是训练模式,就加载模型
else:
model = PointNetCls(num_points=grasp_points_num, input_chann=point_channel, k=2)
#如果命令行出现了cuda字眼(args.cuda将会自动设置为True)
if args.cuda:
if args.gpu != -1:
#设置为指定编号的gpu,使用指定编号的gpu进行训练
torch.cuda.set_device(args.gpu)
#将网络模型的所有参数等都转移到gpu中(刚指定的那个)
model = model.cuda()
else:
#如果args.gpu=-1
device_id = [0]
#在这里选择使用哪个gpu
torch.cuda.set_device(device_id[0])
#这句话,使得原有的model(网络),重构成为一个新的model,这个模型能够调用多个gpu同时运行
model = nn.DataParallel(model,device_id).cuda()
#选择adam优化器
optimizer = optim.Adam(model.parameters(), lr=args.lr)
"""
from torch.optim.lr_scheduler import StepLR
StepLR类主要用来调整学习率,lr=learning rate,让学习率随着epoch变化
构造函数 输入数据optimizer选择优化器;选择step_size=30, gamma=0.5
成员函数StepLR.step()是用来
"""
scheduler = StepLR(optimizer, step_size=30, gamma=0.5)
#训练函数,仅仅是一个epoch,其中包含了很多batch
def train(model, loader, epoch):
#每一次epoch之前,都更新一下学习率
scheduler.step()
#调用train函数训练;这个model是提前设置好的pointnet
model.train()
torch.set_grad_enabled(True)
correct = 0
dataset_size = 0
"""
注意,如果使用了多线程导入数据集的情况下,在当调用enumerate时,将会在此时在后台创建多线程导入数据
loader是一个可迭代对象,索引是batch_idx,对象是(data, target)
这里实现的效果是:一个一个batch的训练网络,直到一个epoch训练完毕
"""
for batch_idx, (data, target) in enumerate(loader):
#print(len(data),"data len is")
"""
在实际的实验过程中发现,当某一个batch(比如剩下来的某一个batch)中间只含有一个sample
那么很有可能会报错,这里判断一下本次的batch中是不是只有一个样本,如果只有一个样本
那就跳过这个batch
"""
if len(data) <=1:
continue
dataset_size += data.shape[0]
data, target = data.float(), target.long().squeeze()
#如果使用cuda的话
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
#这里进行前向传播,并输出结果 但是这个output是什么
output, _ = model(data)
#计算loss
loss = F.nll_loss(output, target)
#反向传播,更新权重
loss.backward()
#进行学习率的优化
optimizer.step()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).long().cpu().sum()
#设置每隔多少次迭代(batch)打印一次loss,这里默认是10次迭代
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
#当前第几个epoch, 当前训练到该epoch中的第几个batch,数据集总共有多大
epoch, batch_idx * args.batch_size, len(loader.dataset),
#计算当前训练百分比
100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
"""在tensorboard上面绘制折线图
def add_scalar(self, tag, scalar_value, global_step=None, walltime=None):
参数含义:
tag (string): 显示什么名称
scalar_value (float or string/blobname): 需要打印出来(同时会保存下来)的变量
global_step (int): 就是坐标系横轴的下标显示什么变量
walltime (float): Optional override default walltime (time.time())
with seconds after epoch of event
"""
logger.add_scalar('train_loss',
loss.cpu().item(), #这里的loss.cpu是不是利用到了cpu? 做了什么计算?
batch_idx + epoch * len(loader))#横坐标下标是迭代次数,训练了几次epoch
"""len(loader)和len(loader.dataset)区别
len(loader.dataset) 返回的是数据集中的sample的数量
len(loader) 返回的是 len(loader.dataset)/batch_size向上取整 就是一个epoch中包含有多少个batch(一个epoch迭代多少次)
"""
return float(correct)/float(dataset_size)
#测试训练好的网络
def test(model, loader):
model.eval()
torch.set_grad_enabled(False)
test_loss = 0
correct = 0
dataset_size = 0
da = {}
db = {}
res = []
for data, target, obj_name in loader:
dataset_size += data.shape[0]
data, target = data.float(), target.long().squeeze()
if args.cuda:
data, target = data.cuda(), target.cuda()
output, _ = model(data) # N*C
test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).long().cpu().sum()
for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
res.append((i, j[0], k))
test_loss /= len(loader.dataset)
acc = float(correct)/float(dataset_size)
return acc, test_loss
def main():
#
if args.mode == 'train':
for epoch_i in range(is_resume*args.load_epoch, args.epoch):
#训练一个epoch,把网络模型、数据集、当前是第几轮epoch的编号,都写进去
acc_train = train(model, train_loader, epoch_i)
#训练完毕i,精度等于多少
print('Train done, acc={}'.format(acc_train))
#使用测试数据集进行测试
acc, loss = test(model, test_loader)
#打印测试出的精度和loss
print('Test done, acc={}, loss={}'.format(acc, loss))
#然后把训练的结果测试的结果,存放在logger中,之后使用折线图打印出来
logger.add_scalar('train_acc', acc_train, epoch_i)
logger.add_scalar('test_acc', acc, epoch_i)
logger.add_scalar('test_loss', loss, epoch_i)
if epoch_i % args.save_interval == 0:
path = os.path.join(args.model_path, current_time + '_{}.model'.format(epoch_i))
#如果满足要求了,就保存下来,注意这里,需要选定_use_new_zipfile_serialization=False
torch.save(model, path,_use_new_zipfile_serialization=False)
print('Save model @ {}'.format(path))
else:
print('testing...')
acc, loss = test(model, test_loader)
print('Test done, acc={}, loss={}'.format(acc, loss))
if __name__ == "__main__":
main()
|
py | 7df75d72ef04b4e2bb9e58838711f1e889af2414 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import math
import operator
from functools import reduce
import numpy as np
import cv2
from src.model_utils.config import config
from src.ETSNET.pse import pse
def sort_to_clockwise(points):
center = tuple(map(operator.truediv, reduce(lambda x, y: map(operator.add, x, y), points), [len(points)] * 2))
clockwise_points = sorted(points, key=lambda coord: (-135 - math.degrees(
math.atan2(*tuple(map(operator.sub, coord, center))[::-1]))) % 360, reverse=True)
return clockwise_points
def write_result_as_txt(image_name, img_bboxes, path):
if not os.path.isdir(path):
os.makedirs(path)
filename = os.path.join(path, 'res_{}.txt'.format(os.path.splitext(image_name)[0]))
lines = []
for _, img_bbox in enumerate(img_bboxes):
img_bbox = img_bbox.reshape(-1, 2)
img_bbox = np.array(list(sort_to_clockwise(img_bbox)))[[3, 0, 1, 2]].copy().reshape(-1)
values = [int(v) for v in img_bbox]
line = "%d,%d,%d,%d,%d,%d,%d,%d\n" % tuple(values)
lines.append(line)
with open(filename, 'w') as f:
for line in lines:
f.write(line)
def get_img(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
if __name__ == "__main__":
if not os.path.isdir('./res/submit_ic15/'):
os.makedirs('./res/submit_ic15/')
if not os.path.isdir('./res/vis_ic15/'):
os.makedirs('./res/vis_ic15/')
file_list = os.listdir(config.img_path)
for k in file_list:
if os.path.splitext(k)[-1].lower() in ['.jpg', '.jpeg', '.png']:
img_path = os.path.join(config.img_path, k)
img = get_img(img_path).reshape(1, 720, 1280, 3)
img = img[0].astype(np.uint8).copy()
img_name = os.path.split(img_path)[-1]
score = np.fromfile(os.path.join(config.result_path, k.split('.')[0] + '_0.bin'), np.float32)
score = score.reshape(1, 1, config.INFER_LONG_SIZE, config.INFER_LONG_SIZE)
kernels = np.fromfile(os.path.join(config.result_path, k.split('.')[0] + '_1.bin'), bool)
kernels = kernels.reshape(1, config.KERNEL_NUM, config.INFER_LONG_SIZE, config.INFER_LONG_SIZE)
score = np.squeeze(score)
kernels = np.squeeze(kernels)
# post-process
pred = pse(kernels, 5.0)
scale = max(img.shape[:2]) * 1.0 / config.INFER_LONG_SIZE
label = pred
label_num = np.max(label) + 1
bboxes = []
for i in range(1, label_num):
pot = np.array(np.where(label == i)).transpose((1, 0))[:, ::-1]
if pot.shape[0] < 600:
continue
score_i = np.mean(score[label == i])
if score_i < 0.93:
continue
rect = cv2.minAreaRect(pot)
bbox = cv2.boxPoints(rect) * scale
bbox = bbox.astype('int32')
cv2.drawContours(img, [bbox], 0, (0, 255, 0), 3)
bboxes.append(bbox)
# save res
cv2.imwrite('./res/vis_ic15/{}'.format(img_name), img[:, :, [2, 1, 0]].copy())
write_result_as_txt(img_name, bboxes, './res/submit_ic15/')
|
py | 7df760607d27c529b82ef578f182d73278250535 | def flatten_me(lst):
|
py | 7df760632b2c65adda9e45fdd4bd660656f5b584 | import tvm
import os
import sys
import time
import tvm._ffi
import numpy as np
from tvm import tg
from pebble import concurrent
from tvm.tensor_graph.testing.models import mobilenet_v1
from tvm.tensor_graph.core import evaluate_function_for, start_evaluate, stop_evaluate
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph, make_fwd_graph, \
make_tir_graph
from tvm.tensor_graph.core.transform import apply_layout_change
from tvm.tensor_graph.core.utils import to_tuple
import argparse
batch = 1
def random_initialize_weights(weight_tensors, ctx):
init = []
for w in weight_tensors:
ary = np.random.uniform(-1, 1, to_tuple(w.shape)).astype(w.dtype)
init.append(tvm.nd.array(ary, ctx))
return init
def clear_log_files(filenames):
for filename in filenames:
if os.path.exists(filename) and os.path.isfile(filename):
os.remove(filename)
@concurrent.process
def consumer_process(target, dev_id, number=10, repeat=10, reference="", profile_level=0, change_layout=False):
os.environ["TG_PRINT_LEVEL"] = "4"
execution_log_file="tmp.txt"
lst = [execution_log_file]
clear_log_files(lst)
# create a session
log_option = tg.create_session_option(
report_profile=True,
report_iteration=True,
report_iteration_period=1,
autoschedule_topk=20,
autoschedule_new_trial=10,
autoschedule_policy="random",
autoschedule_parallel=1,
autoschedule_timeout=200.0,
profile_parallel=1,
profile_timeout=4.0,
build_parallel=1,
build_timeout=1.0,
execution_explore_probability=0.5,
execution_parallel=1,
execution_timeout=100.0,
execution_log_file=execution_log_file
)
sess = tg.create_session(target, dev_id, log_option)
# get the model
model = mobilenet_v1.MobileNetv1()
model.eval()
img_shape = [batch, 3, 224, 224]
dtype = "float32"
img_tensor = GraphTensor(img_shape, dtype, name="data", requires_grad=False)
# get forward graph and tir graph
fwd_graph = make_fwd_graph(model, [img_tensor])
if change_layout:
fwd_graph = apply_layout_change(fwd_graph)
tir_graph = make_tir_graph(fwd_graph, inference=True)
ctx = tg.get_context_from_session(sess)
inputs_data = np.random.uniform(-1, 1, img_shape).astype(dtype)
inputs_bindings = {
tir_graph.inputs[0]: tvm.nd.array(inputs_data, ctx),
}
weight_bindings = random_initialize_weights(tir_graph.weights, ctx)
# initialize weights
tg.initialize_weights(sess, tir_graph, weight_bindings)
# add task
task_id = tg.add_task(sess, tir_graph)
# replay
print("test schedules from", reference)
tg.test_schedule_reference(sess, task_id, reference=reference)
# execute graph by 'number' iterations
number = number
repeats = repeat
for i in range(repeats):
beg = time.time()
tg.run_task(sess, task_id, [inputs_bindings] * number, profile_level=profile_level, save_to="")
end = time.time()
print("Average time cost for one iteration:", (end - beg) * 1e3 / number, "ms")
# remember to delete the session before exit
tg.delete_session(sess)
return 0
@concurrent.process
def producer_process(
target, dev_id, delete_log=False, exe_iter=100, max_tune_iter=10000, tune_minutes=4 * 60,
reference="", save_to="", first_stage_number=100000, second_stage_topk_ratio=0.1, no_run=False, change_layout=False):
os.environ["TG_PRINT_LEVEL"] = "4"
autoschedule_log_file="autoschedule_log.txt"
autoschedule_profile_file="autoschedule_log_profile.txt"
build_log_file="build_log.txt"
evaluate_log_file="evaluate_log.txt"
execution_log_file="execution_log.txt"
lst = [autoschedule_log_file, autoschedule_profile_file, build_log_file, evaluate_log_file, execution_log_file]
if delete_log:
clear_log_files(lst)
# create a session
log_option = tg.create_session_option(
report_profile=True,
report_iteration=True,
report_iteration_period=1,
autoschedule_topk=20,
autoschedule_new_trial=4,
autoschedule_policy="random",
autoschedule_parallel=1,
autoschedule_timeout=200.0,
autoschedule_log_file=autoschedule_log_file,
profile_parallel=1,
profile_timeout=4.0,
build_parallel=1,
build_timeout=1.0,
build_log_file=build_log_file,
execution_explore_probability=0.5,
execution_parallel=4,
execution_timeout=100.0,
execution_log_file=execution_log_file
)
sess = tg.create_session(target, dev_id, log_option)
# get the model
model = mobilenet_v1.MobileNetv1()
model.eval()
img_shape = [batch, 3, 224, 224]
dtype = "float32"
img_tensor = GraphTensor(img_shape, dtype, name="data", requires_grad=False)
# get forward graph and tir graph
fwd_graph = make_fwd_graph(model, [img_tensor])
if change_layout:
fwd_graph = apply_layout_change(fwd_graph)
tir_graph = make_tir_graph(fwd_graph, inference=True)
ctx = tg.get_context_from_session(sess)
inputs_data = np.random.uniform(-1, 1, img_shape).astype(dtype)
inputs_bindings = {
tir_graph.inputs[0]: tvm.nd.array(inputs_data, ctx),
}
weight_bindings = random_initialize_weights(tir_graph.weights, ctx)
# initialize weights
tg.initialize_weights(sess, tir_graph, weight_bindings)
# add task
task_id = tg.add_task(sess, tir_graph)
# tune
tg.begin_tuning(sess, task_id, max_tune_iter,
reference=reference, first_stage_number=first_stage_number, second_stage_topk_ratio=second_stage_topk_ratio)
# execute graph by 'number' iterations
number = exe_iter
start_time = time.time()
while True:
beg = time.time()
tg.run_task(sess, task_id, [inputs_bindings] * number, save_to=save_to, no_actual_run=no_run)
end = time.time()
if no_run:
time.sleep(10 * 60)
else:
print("Average time cost for one iteration:", (end - beg) * 1e3 / number, "ms", flush=True)
print("Passing %f min" % ((time.time() - start_time) / 60), flush=True)
if (time.time() - start_time) / 60 > tune_minutes:
print("Tuning last for over %f minutes, stop tuning" % tune_minutes)
tg.end_tuning(sess, task_id)
break
# remember to delete the session before exit
tg.delete_session(sess)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tune", help="tuning", action="store_true")
parser.add_argument("--test", help="testing", action="store_true")
parser.add_argument("--delete", help="delete log", action="store_true")
parser.add_argument("--minutes", help="tuning minutes", type=float, default=4.0 * 60)
parser.add_argument("--timeout", help="timeout in seconds", type=float, default=10)
parser.add_argument("--tune_iter", help="max tune iter", type=int, default=10000)
parser.add_argument("--exe_iter", help="max execution iter", type=int, default=100)
parser.add_argument("--reference", help="tuning reference", type=str, default="")
parser.add_argument("--save", help="save to", type=str, default="saved_schedules.txt")
parser.add_argument("--target", help="target device", type=str, default="llvm")
parser.add_argument("--device", help="device id", type=int, default=0)
parser.add_argument("--eval_device", help="evaluate device id", type=int, default=0)
parser.add_argument("--eval_repeat", help="evaluate repeat", type=int, default=10)
parser.add_argument("--eval_number", help="evaluate number for each repeat", type=int, default=10)
parser.add_argument("--profile", help="profile level", type=int, default=0)
parser.add_argument("--first_stage", help="first stage number", type=int, default=100000)
parser.add_argument("--second_stage_ratio", help="second stage topk ratio", type=float, default=0.1)
parser.add_argument("--no_run", help="do not run", action="store_true")
parser.add_argument("--layout", help="optimize layout", action="store_true")
parser.add_argument("--batch", type=int, default=1)
args = parser.parse_args()
start_evaluate()
target = args.target
dev_id = args.device
batch = args.batch
evalute_exit_code = evaluate_function_for(target, args.eval_device, args.timeout)
if args.tune:
producer_exit_code = producer_process(
target, dev_id, args.delete, args.exe_iter, args.tune_iter, args.minutes, args.reference, args.save,
args.first_stage, args.second_stage_ratio, args.no_run, args.layout)
try:
ret = producer_exit_code.result()
except Exception as e:
print(str(e))
if args.test:
exit_code = consumer_process(target, dev_id, args.eval_number, args.eval_repeat, args.reference, args.profile, args.layout)
try:
ret = exit_code.result()
except Exception as e:
print(str(e))
stop_evaluate()
ret = evalute_exit_code.result()
print("Success!") |
py | 7df760875ddce1654bff792b6c99e551b3b58437 | '''
@copyright: 2022 - Symas Corporation
''' |
py | 7df760d66f8ce1f75ddec22f47dd9491a3be10dd | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import collections
import copy
from dace.sdfg.sdfg import SDFG
import itertools
import os
import pdb
import re
import numpy as np
import dace
from dace import data as dt, registry, dtypes, subsets
from dace.config import Config
from dace.frontend import operations
from dace.sdfg import nodes, utils
from dace.sdfg import find_input_arraynode, find_output_arraynode
from dace.codegen import exceptions as cgx
from dace.codegen.codeobject import CodeObject
from dace.codegen.dispatcher import DefinedType
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.targets.target import make_absolute
from dace.codegen.targets import cpp, fpga
from typing import List, Union
REDUCTION_TYPE_TO_HLSLIB = {
dace.dtypes.ReductionType.Min: "hlslib::op::Min",
dace.dtypes.ReductionType.Max: "hlslib::op::Max",
dace.dtypes.ReductionType.Sum: "hlslib::op::Sum",
dace.dtypes.ReductionType.Product: "hlslib::op::Product",
dace.dtypes.ReductionType.Logical_And: "hlslib::op::And",
}
@registry.autoregister_params(name='xilinx')
class XilinxCodeGen(fpga.FPGACodeGen):
""" Xilinx FPGA code generator. """
target_name = 'xilinx'
title = 'Xilinx'
language = 'hls'
def __init__(self, *args, **kwargs):
fpga_vendor = Config.get("compiler", "fpga_vendor")
if fpga_vendor.lower() != "xilinx":
# Don't register this code generator
return
super().__init__(*args, **kwargs)
# Used to pass memory bank assignments from kernel generation code to
# where they are written to file
self._bank_assignments = {}
@staticmethod
def cmake_options():
host_flags = Config.get("compiler", "xilinx", "host_flags")
synthesis_flags = Config.get("compiler", "xilinx", "synthesis_flags")
build_flags = Config.get("compiler", "xilinx", "build_flags")
mode = Config.get("compiler", "xilinx", "mode")
target_platform = Config.get("compiler", "xilinx", "platform")
enable_debugging = ("ON" if Config.get_bool(
"compiler", "xilinx", "enable_debugging") else "OFF")
autobuild = ("ON" if Config.get_bool("compiler", "autobuild_bitstreams")
else "OFF")
frequency = Config.get("compiler", "xilinx", "frequency").strip()
options = [
"-DDACE_XILINX_HOST_FLAGS=\"{}\"".format(host_flags),
"-DDACE_XILINX_SYNTHESIS_FLAGS=\"{}\"".format(synthesis_flags),
"-DDACE_XILINX_BUILD_FLAGS=\"{}\"".format(build_flags),
"-DDACE_XILINX_MODE={}".format(mode),
"-DDACE_XILINX_TARGET_PLATFORM=\"{}\"".format(target_platform),
"-DDACE_XILINX_ENABLE_DEBUGGING={}".format(enable_debugging),
"-DDACE_FPGA_AUTOBUILD_BITSTREAM={}".format(autobuild),
f"-DDACE_XILINX_TARGET_CLOCK={frequency}"
]
# Override Vitis/SDx/SDAccel installation directory
if Config.get("compiler", "xilinx", "path"):
options.append("-DVITIS_ROOT_DIR=\"{}\"".format(
Config.get("compiler", "xilinx", "path").replace("\\", "/")))
return options
def get_generated_codeobjects(self):
execution_mode = Config.get("compiler", "xilinx", "mode")
kernel_file_name = "DACE_BINARY_DIR \"/{}".format(self._program_name)
if execution_mode == "software_emulation":
kernel_file_name += "_sw_emu.xclbin\""
xcl_emulation_mode = "\"sw_emu\""
xilinx_sdx = "DACE_VITIS_DIR"
elif execution_mode == "hardware_emulation":
kernel_file_name += "_hw_emu.xclbin\""
xcl_emulation_mode = "\"hw_emu\""
xilinx_sdx = "DACE_VITIS_DIR"
elif execution_mode == "hardware" or execution_mode == "simulation":
kernel_file_name += "_hw.xclbin\""
xcl_emulation_mode = None
xilinx_sdx = None
else:
raise cgx.CodegenError(
"Unknown Xilinx execution mode: {}".format(execution_mode))
set_env_vars = ""
set_str = "dace::set_environment_variable(\"{}\", {});\n"
unset_str = "dace::unset_environment_variable(\"{}\");\n"
set_env_vars += (set_str.format("XCL_EMULATION_MODE",
xcl_emulation_mode)
if xcl_emulation_mode is not None else
unset_str.format("XCL_EMULATION_MODE"))
set_env_vars += (set_str.format("XILINX_SDX", xilinx_sdx) if xilinx_sdx
is not None else unset_str.format("XILINX_SDX"))
set_env_vars += set_str.format(
"EMCONFIG_PATH", "DACE_BINARY_DIR"
) if execution_mode == 'hardware_emulation' else unset_str.format(
"EMCONFIG_PATH")
host_code = CodeIOStream()
host_code.write("""\
#include "dace/xilinx/host.h"
#include "dace/dace.h"
""")
if len(self._dispatcher.instrumentation) > 1:
host_code.write("""\
#include "dace/perf/reporting.h"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <limits>
""")
host_code.write("\n\n")
self._frame.generate_fileheader(self._global_sdfg, host_code,
'xilinx_host')
params_comma = self._global_sdfg.signature(with_arrays=False)
if params_comma:
params_comma = ', ' + params_comma
host_code.write("""
DACE_EXPORTED int __dace_init_xilinx({sdfg.name}_t *__state{signature}) {{
{environment_variables}
__state->fpga_context = new dace::fpga::Context();
__state->fpga_context->Get().MakeProgram({kernel_file_name});
return 0;
}}
DACE_EXPORTED void __dace_exit_xilinx({sdfg.name}_t *__state) {{
delete __state->fpga_context;
}}
{host_code}""".format(signature=params_comma,
sdfg=self._global_sdfg,
environment_variables=set_env_vars,
kernel_file_name=kernel_file_name,
host_code="".join([
"{separator}\n// Kernel: {kernel_name}"
"\n{separator}\n\n{code}\n\n".format(separator="/" *
79,
kernel_name=name,
code=code)
for (name, code) in self._host_codes
])))
host_code_obj = CodeObject(self._program_name,
host_code.getvalue(),
"cpp",
XilinxCodeGen,
"Xilinx",
target_type="host")
kernel_code_objs = [
CodeObject(kernel_name,
code,
"cpp",
XilinxCodeGen,
"Xilinx",
target_type="device")
for (kernel_name, code) in self._kernel_codes
]
# Memory bank and streaming interfaces connectivity configuration file
link_cfg = CodeIOStream()
self._other_codes["link.cfg"] = link_cfg
link_cfg.write("[connectivity]")
are_assigned = [v is not None for v in self._bank_assignments.values()]
if any(are_assigned):
if not all(are_assigned):
raise RuntimeError("Some, but not all global memory arrays "
"were assigned to memory banks: {}".format(
self._bank_assignments))
# Emit mapping from kernel memory interfaces to DRAM banks
for (kernel_name, interface_name), (
memory_type, memory_bank) in self._bank_assignments.items():
link_cfg.write(
f"sp={kernel_name}_1.m_axi_{interface_name}:{memory_type}[{memory_bank}]"
)
# Emit mapping between inter-kernel streaming interfaces
for _, (src, dst) in self._stream_connections.items():
link_cfg.write(f"stream_connect={src}:{dst}")
other_objs = []
for name, code in self._other_codes.items():
name = name.split(".")
other_objs.append(
CodeObject(name[0],
code.getvalue(),
".".join(name[1:]),
XilinxCodeGen,
"Xilinx",
target_type="device"))
return [host_code_obj] + kernel_code_objs + other_objs
@staticmethod
def define_stream(dtype, buffer_size, var_name, array_size, function_stream,
kernel_stream):
"""
Defines a stream
:return: a tuple containing the type of the created variable, and boolean indicating
whether this is a global variable or not
"""
ctype = "dace::FIFO<{}, {}, {}>".format(dtype.base_type.ctype,
dtype.veclen, buffer_size)
if cpp.sym2cpp(array_size) == "1":
kernel_stream.write("{} {}(\"{}\");".format(ctype, var_name,
var_name))
else:
kernel_stream.write("{} {}[{}];\n".format(ctype, var_name,
cpp.sym2cpp(array_size)))
kernel_stream.write("dace::SetNames({}, \"{}\", {});".format(
var_name, var_name, cpp.sym2cpp(array_size)))
# In Xilinx, streams are defined as local variables
# Return value is used for adding to defined_vars in fpga.py
return ctype, False
def define_local_array(self, var_name, desc, array_size, function_stream,
kernel_stream, sdfg, state_id, node):
dtype = desc.dtype
kernel_stream.write("{} {}[{}];\n".format(dtype.ctype, var_name,
cpp.sym2cpp(array_size)))
if desc.storage == dace.dtypes.StorageType.FPGA_Registers:
kernel_stream.write("#pragma HLS ARRAY_PARTITION variable={} "
"complete\n".format(var_name))
elif desc.storage == dace.dtypes.StorageType.FPGA_Local:
if len(desc.shape) > 1:
kernel_stream.write("#pragma HLS ARRAY_PARTITION variable={} "
"block factor={}\n".format(
var_name, desc.shape[-2]))
else:
raise ValueError("Unsupported storage type: {}".format(
desc.storage.name))
self._dispatcher.defined_vars.add(var_name, DefinedType.Pointer,
'%s *' % dtype.ctype)
def define_shift_register(*args, **kwargs):
raise NotImplementedError("Xilinx shift registers NYI")
@staticmethod
def make_vector_type(dtype, is_const):
return "{}{}".format("const " if is_const else "", dtype.ctype)
@staticmethod
def make_kernel_argument(data: dt.Data,
var_name: str,
subset_info: Union[int, subsets.Subset],
sdfg: SDFG,
is_output: bool,
with_vectorization: bool,
interface_id: Union[int, List[int]] = None):
if isinstance(data, dt.Array):
var_name = fpga.fpga_ptr(var_name, data, sdfg, subset_info,
is_output, None, None, True, interface_id)
if with_vectorization:
dtype = data.dtype
else:
dtype = data.dtype.base_type
return "{} *{}".format(dtype.ctype, var_name)
if isinstance(data, dt.Stream):
ctype = "dace::FIFO<{}, {}, {}>".format(data.dtype.base_type.ctype,
data.dtype.veclen,
data.buffer_size)
return "{} &{}".format(ctype, var_name)
else:
return data.as_arg(with_types=True, name=var_name)
def generate_unroll_loop_pre(self, kernel_stream, factor, sdfg, state_id,
node):
pass
@staticmethod
def generate_unroll_loop_post(kernel_stream, factor, sdfg, state_id, node):
if factor is None:
kernel_stream.write("#pragma HLS UNROLL", sdfg, state_id, node)
else:
kernel_stream.write("#pragma HLS UNROLL factor={}".format(factor),
sdfg, state_id, node)
@staticmethod
def generate_pipeline_loop_pre(kernel_stream, sdfg, state_id, node):
pass
@staticmethod
def generate_pipeline_loop_post(kernel_stream, sdfg, state_id, node):
kernel_stream.write("#pragma HLS PIPELINE II=1", sdfg, state_id, node)
@staticmethod
def generate_flatten_loop_pre(kernel_stream, sdfg, state_id, node):
pass
@staticmethod
def generate_flatten_loop_post(kernel_stream, sdfg, state_id, node):
kernel_stream.write("#pragma HLS LOOP_FLATTEN")
def generate_nsdfg_header(self, sdfg, state, state_id, node,
memlet_references, sdfg_label):
# TODO: Use a single method for GPU kernels, FPGA modules, and NSDFGs
arguments = [
f'{atype} {aname}' for atype, aname, _ in memlet_references
]
arguments += [
f'{node.sdfg.symbols[aname].as_arg(aname)}'
for aname in sorted(node.symbol_mapping.keys())
if aname not in sdfg.constants
]
arguments = ', '.join(arguments)
return f'void {sdfg_label}({arguments}) {{\n#pragma HLS INLINE'
def write_and_resolve_expr(self,
sdfg,
memlet,
nc,
outname,
inname,
indices=None,
dtype=None):
"""
Emits a conflict resolution call from a memlet.
"""
redtype = operations.detect_reduction_type(memlet.wcr, openmp=True)
defined_type, _ = self._dispatcher.defined_vars.get(memlet.data)
if isinstance(indices, str):
ptr = '%s + %s' % (cpp.cpp_ptr_expr(
sdfg, memlet, defined_type, is_write=True), indices)
else:
ptr = cpp.cpp_ptr_expr(sdfg,
memlet,
defined_type,
indices=indices,
is_write=True)
if isinstance(dtype, dtypes.pointer):
dtype = dtype.base_type
# Special call for detected reduction types
if redtype != dtypes.ReductionType.Custom:
if redtype == dace.dtypes.ReductionType.Sub:
# write this as an addition
credtype = "dace::ReductionType::Sum"
is_sub = True
else:
credtype = "dace::ReductionType::" + str(
redtype)[str(redtype).find(".") + 1:]
is_sub = False
if isinstance(dtype, dtypes.vector):
return (f'dace::xilinx_wcr_fixed_vec<{credtype}, '
f'{dtype.vtype.ctype}, {dtype.veclen}>::reduce('
f'{ptr}, {"-" if is_sub else ""}{inname})')
return (
f'dace::xilinx_wcr_fixed<{credtype}, {dtype.ctype}>::reduce('
f'{ptr}, {"-" if is_sub else ""}{inname})')
# General reduction
raise NotImplementedError('General reductions not yet implemented')
@staticmethod
def make_read(defined_type, dtype, var_name, expr, index, is_pack,
packing_factor):
if defined_type in [DefinedType.Stream, DefinedType.StreamArray]:
if " " in expr:
expr = "(" + expr + ")"
read_expr = "{}.pop()".format(expr)
elif defined_type == DefinedType.Scalar:
read_expr = var_name
else:
if index is not None and index != "0":
read_expr = "{} + {}".format(expr, index)
else:
read_expr = expr
if is_pack:
return "dace::Pack<{}, {}>({})".format(dtype.base_type.ctype,
packing_factor, read_expr)
else:
return "dace::Read<{}, {}>({})".format(dtype.base_type.ctype,
dtype.veclen, read_expr)
def generate_converter(*args, **kwargs):
pass # Handled in C++
@staticmethod
def make_write(defined_type, dtype, var_name, write_expr, index, read_expr,
wcr, is_unpack, packing_factor):
if defined_type in [DefinedType.Stream, DefinedType.StreamArray]:
if defined_type == DefinedType.StreamArray:
write_expr = "{}[{}]".format(write_expr,
"0" if not index else index)
if is_unpack:
return "\n".join(
"{}.push({}[{}]);".format(write_expr, read_expr, i)
for i in range(packing_factor))
else:
return "{}.push({});".format(write_expr, read_expr)
else:
if defined_type == DefinedType.Scalar:
write_expr = var_name
elif index and index != "0":
write_expr = "{} + {}".format(write_expr, index)
if is_unpack:
return "dace::Unpack<{}, {}>({}, {});".format(
dtype.base_type.ctype, packing_factor, read_expr,
write_expr)
else:
# TODO: Temporary hack because we don't have the output
# vector length.
veclen = max(dtype.veclen, packing_factor)
return "dace::Write<{}, {}>({}, {});".format(
dtype.base_type.ctype, veclen, write_expr, read_expr)
def make_shift_register_write(self, defined_type, dtype, var_name,
write_expr, index, read_expr, wcr, is_unpack,
packing_factor, sdfg):
raise NotImplementedError("Xilinx shift registers NYI")
@staticmethod
def generate_no_dependence_pre(kernel_stream,
sdfg,
state_id,
node,
var_name=None):
pass
def generate_no_dependence_post(
self,
kernel_stream,
sdfg: SDFG,
state_id: int,
node: nodes.Node,
var_name: str,
accessed_subset: Union[int, subsets.Subset] = None):
'''
Adds post loop pragma for ignoring loop carried dependencies on a given variable
'''
defined_type, _ = self._dispatcher.defined_vars.get(var_name)
if var_name in sdfg.arrays:
array = sdfg.arrays[var_name]
else:
array = None
var_name = fpga.fpga_ptr(
var_name,
array,
sdfg,
accessed_subset,
True,
self._dispatcher,
is_array_interface=(defined_type == DefinedType.ArrayInterface))
kernel_stream.write(
"#pragma HLS DEPENDENCE variable={} false".format(var_name), sdfg,
state_id, node)
def generate_kernel_boilerplate_pre(self, sdfg, state_id, kernel_name,
parameters, bank_assignments,
module_stream, kernel_stream,
external_streams):
# Write header
module_stream.write(
"""#include <dace/xilinx/device.h>
#include <dace/math.h>
#include <dace/complex.h>""", sdfg)
self._frame.generate_fileheader(sdfg, module_stream, 'xilinx_device')
module_stream.write("\n", sdfg)
argname_to_bank_assignment = {}
# Build kernel signature
kernel_args = []
array_args = []
for is_output, data_name, data, interface in parameters:
is_assigned = data_name in bank_assignments and bank_assignments[
data_name] is not None
if is_assigned and isinstance(data, dt.Array):
memory_bank = bank_assignments[data_name]
if memory_bank[0] == "HBM":
lowest_bank_index, _ = fpga.get_multibank_ranges_from_subset(
memory_bank[1], sdfg)
else:
lowest_bank_index = int(memory_bank[1])
for bank in fpga.iterate_hbm_multibank_arrays(
data_name, data, sdfg):
kernel_arg = self.make_kernel_argument(
data, data_name, bank, sdfg, is_output, True, interface)
if kernel_arg:
kernel_args.append(kernel_arg)
array_args.append((kernel_arg, data_name))
argname_to_bank_assignment[kernel_arg] = (
memory_bank[0], lowest_bank_index + bank)
else:
kernel_arg = self.make_kernel_argument(data, data_name, None,
None, is_output, True,
interface)
if kernel_arg:
kernel_args.append(kernel_arg)
if isinstance(data, dt.Array):
array_args.append((kernel_arg, data_name))
argname_to_bank_assignment[kernel_arg] = None
stream_args = []
for is_output, data_name, data, interface in external_streams:
kernel_arg = self.make_kernel_argument(data, data_name, None, None,
is_output, True, interface)
if kernel_arg:
stream_args.append(kernel_arg)
# Write kernel signature
kernel_stream.write(
"DACE_EXPORTED void {}({}) {{\n".format(
kernel_name, ', '.join(kernel_args + stream_args)), sdfg,
state_id)
# Insert interface pragmas
num_mapped_args = 0
for arg, data_name in array_args:
var_name = re.findall(r"\w+", arg)[-1]
if "*" in arg:
interface_name = "gmem{}".format(num_mapped_args)
kernel_stream.write(
"#pragma HLS INTERFACE m_axi port={} "
"offset=slave bundle={}".format(var_name, interface_name),
sdfg, state_id)
# Map this interface to the corresponding location
# specification to be passed to the Xilinx compiler
memory_bank = argname_to_bank_assignment[arg]
self._bank_assignments[(kernel_name,
interface_name)] = memory_bank
num_mapped_args += 1
for arg in kernel_args + ["return"]:
var_name = re.findall(r"\w+", arg)[-1]
kernel_stream.write(
"#pragma HLS INTERFACE s_axilite port={} bundle=control".format(
var_name))
for _, var_name, _, _ in external_streams:
kernel_stream.write(
"#pragma HLS INTERFACE axis port={}".format(var_name))
# TODO: add special case if there's only one module for niceness
kernel_stream.write("\n#pragma HLS DATAFLOW")
kernel_stream.write("\nHLSLIB_DATAFLOW_INIT();")
@staticmethod
def generate_kernel_boilerplate_post(kernel_stream, sdfg, state_id):
kernel_stream.write("HLSLIB_DATAFLOW_FINALIZE();\n}\n", sdfg, state_id)
def generate_host_function_body(self, sdfg: dace.SDFG,
state: dace.SDFGState, kernel_name: str,
predecessors: list, parameters: list,
rtl_tasklet_names: list,
kernel_stream: CodeIOStream):
'''
Generate the host-specific code for spawning and synchronizing the given kernel.
:param sdfg:
:param state:
:param predecessors: list containing all the name of kernels that must be finished before starting this one
:param parameters: list containing the kernel parameters (of all kernels in this state)
:param rtl_tasklet_names
:param kernel_stream: Device-specific code stream
'''
kernel_args = []
for _, name, p, _ in parameters:
if isinstance(p, dt.Array):
for bank in fpga.iterate_hbm_multibank_arrays(
name, p, sdfg):
kernel_args.append(
p.as_arg(False,
name=fpga.fpga_ptr(name, p, sdfg, bank)))
else:
kernel_args.append(p.as_arg(False, name=name))
kernel_function_name = kernel_name
kernel_file_name = "{}.xclbin".format(kernel_name)
# Check if this kernel depends from other kernels
needs_synch = len(predecessors) > 0
if needs_synch:
# Build a vector containing all the events associated with the kernels from which this one depends
kernel_deps_name = f"deps_{kernel_name}"
kernel_stream.write(f"std::vector<cl::Event> {kernel_deps_name};")
for pred in predecessors:
# concatenate events from predecessor kernel
kernel_stream.write(
f"{kernel_deps_name}.push_back({pred}_event);")
# Launch HLS kernel, passing synchronization events (if any)
kernel_stream.write(
f"""\
auto {kernel_name}_kernel = program.MakeKernel({kernel_function_name}, "{kernel_function_name}", {", ".join(kernel_args)});
cl::Event {kernel_name}_event = {kernel_name}_kernel.ExecuteTaskFork({f'{kernel_deps_name}.begin(), {kernel_deps_name}.end()' if needs_synch else ''});
all_events.push_back({kernel_name}_event);""", sdfg, sdfg.node_id(state))
# Join RTL tasklets
for name in rtl_tasklet_names:
kernel_stream.write(f"kernel_{name}.wait();\n", sdfg,
sdfg.node_id(state))
def generate_module(self, sdfg, state, kernel_name, name, subgraph,
parameters, module_stream, entry_stream, host_stream):
"""Generates a module that will run as a dataflow function in the FPGA
kernel."""
state_id = sdfg.node_id(state)
dfg = sdfg.nodes()[state_id]
kernel_args_call = []
kernel_args_module = []
for is_output, pname, p, interface_id in parameters:
if isinstance(p, dt.Array):
for bank in fpga.iterate_hbm_multibank_arrays(
pname, p, sdfg):
arr_name = fpga.fpga_ptr(pname,
p,
sdfg,
bank,
is_output,
is_array_interface=True)
# Add interface ID to called module, but not to the module
# arguments
argname = fpga.fpga_ptr(pname,
p,
sdfg,
bank,
is_output,
is_array_interface=True,
interface_id=interface_id)
kernel_args_call.append(argname)
dtype = p.dtype
kernel_args_module.append("{} {}*{}".format(
dtype.ctype, "const " if not is_output else "",
arr_name))
else:
if isinstance(p, dt.Stream):
kernel_args_call.append(
p.as_arg(with_types=False, name=pname))
if p.is_stream_array():
kernel_args_module.append(
"dace::FIFO<{}, {}, {}> {}[{}]".format(
p.dtype.base_type.ctype, p.veclen,
p.buffer_size, pname, p.size_string()))
else:
kernel_args_module.append(
"dace::FIFO<{}, {}, {}> &{}".format(
p.dtype.base_type.ctype, p.veclen,
p.buffer_size, pname))
else:
kernel_args_call.append(
p.as_arg(with_types=False, name=pname))
kernel_args_module.append(
p.as_arg(with_types=True, name=pname))
# Check if we are generating an RTL module, in which case only the
# accesses to the streams should be handled
rtl_tasklet = None
for n in subgraph.nodes():
if (isinstance(n, dace.nodes.Tasklet)
and n.language == dace.dtypes.Language.SystemVerilog):
rtl_tasklet = n
break
if rtl_tasklet:
entry_stream.write(
f'// [RTL] HLSLIB_DATAFLOW_FUNCTION({name}, {", ".join(kernel_args_call)});'
)
module_stream.write(
f'// [RTL] void {name}({", ".join(kernel_args_module)});\n\n')
# _1 in names are due to vitis
for node in subgraph.source_nodes():
if isinstance(sdfg.arrays[node.data], dt.Stream):
if node.data not in self._stream_connections:
self._stream_connections[node.data] = [None, None]
for edge in state.out_edges(node):
rtl_name = "{}_{}_{}_{}".format(edge.dst, sdfg.sdfg_id,
sdfg.node_id(state),
state.node_id(edge.dst))
self._stream_connections[
node.data][1] = '{}_top_1.s_axis_{}'.format(
rtl_name, edge.dst_conn)
for node in subgraph.sink_nodes():
if isinstance(sdfg.arrays[node.data], dt.Stream):
if node.data not in self._stream_connections:
self._stream_connections[node.data] = [None, None]
for edge in state.in_edges(node):
rtl_name = "{}_{}_{}_{}".format(edge.src, sdfg.sdfg_id,
sdfg.node_id(state),
state.node_id(edge.src))
self._stream_connections[
node.data][0] = '{}_top_1.m_axis_{}'.format(
rtl_name, edge.src_conn)
# Make the dispatcher trigger generation of the RTL module, but
# ignore the generated code, as the RTL codegen will generate the
# appropriate files.
ignore_stream = CodeIOStream()
self._dispatcher.dispatch_subgraph(sdfg,
subgraph,
state_id,
ignore_stream,
ignore_stream,
skip_entry_node=False)
# Launch the kernel from the host code
rtl_name = self.rtl_tasklet_name(rtl_tasklet, state, sdfg)
host_stream.write(
f" auto kernel_{rtl_name} = program.MakeKernel(\"{rtl_name}_top\"{', '.join([''] + [name for _, name, p, _ in parameters if not isinstance(p, dt.Stream)])}).ExecuteTaskFork();",
sdfg, state_id, rtl_tasklet)
return
# create a unique module name to prevent name clashes
module_function_name = f"module_{name}_{sdfg.sdfg_id}"
# Unrolling processing elements: if there first scope of the subgraph
# is an unrolled map, generate a processing element for each iteration
scope_children = subgraph.scope_children()
top_scopes = [
n for n in scope_children[None]
if isinstance(n, dace.sdfg.nodes.EntryNode)
]
unrolled_loops = 0
if len(top_scopes) == 1:
scope = top_scopes[0]
if scope.unroll:
self._unrolled_pes.add(scope.map)
kernel_args_call += ", ".join(scope.map.params)
kernel_args_module += ["int " + p for p in scope.params]
for p, r in zip(scope.map.params, scope.map.range):
if len(r) > 3:
raise cgx.CodegenError("Strided unroll not supported")
entry_stream.write(
"for (size_t {param} = {begin}; {param} < {end}; "
"{param} += {increment}) {{\n#pragma HLS UNROLL".format(
param=p, begin=r[0], end=r[1] + 1, increment=r[2]))
unrolled_loops += 1
# Generate caller code in top-level function
entry_stream.write(
"HLSLIB_DATAFLOW_FUNCTION({}, {});".format(
module_function_name, ", ".join(kernel_args_call)), sdfg,
state_id)
for _ in range(unrolled_loops):
entry_stream.write("}")
# ----------------------------------------------------------------------
# Generate kernel code
# ----------------------------------------------------------------------
self._dispatcher.defined_vars.enter_scope(subgraph)
module_body_stream = CodeIOStream()
module_body_stream.write(
"void {}({}) {{".format(module_function_name,
", ".join(kernel_args_module)), sdfg,
state_id)
# Register the array interface as a naked pointer for use inside the
# FPGA kernel
interfaces_added = set()
for is_output, argname, arg, _ in parameters:
for bank in fpga.iterate_hbm_multibank_arrays(
argname, arg, sdfg):
if (not (isinstance(arg, dt.Array) and arg.storage
== dace.dtypes.StorageType.FPGA_Global)):
continue
ctype = dtypes.pointer(arg.dtype).ctype
ptr_name = fpga.fpga_ptr(argname,
arg,
sdfg,
bank,
is_output,
None,
is_array_interface=True)
if not is_output:
ctype = f"const {ctype}"
self._dispatcher.defined_vars.add(ptr_name, DefinedType.Pointer,
ctype)
if argname in interfaces_added:
continue
interfaces_added.add(argname)
self._dispatcher.defined_vars.add(argname,
DefinedType.ArrayInterface,
ctype,
allow_shadowing=True)
module_body_stream.write("\n")
# Allocate local transients
data_to_allocate = (set(subgraph.top_level_transients()) -
set(sdfg.shared_transients()) -
set([p[1] for p in parameters]))
allocated = set()
for node in subgraph.nodes():
if not isinstance(node, dace.sdfg.nodes.AccessNode):
continue
if node.data not in data_to_allocate or node.data in allocated:
continue
allocated.add(node.data)
self._dispatcher.dispatch_allocate(sdfg, state, state_id, node,
node.desc(sdfg), module_stream,
module_body_stream)
self._dispatcher.dispatch_subgraph(sdfg,
subgraph,
state_id,
module_stream,
module_body_stream,
skip_entry_node=False)
module_stream.write(module_body_stream.getvalue(), sdfg, state_id)
module_stream.write("}\n\n")
self._dispatcher.defined_vars.exit_scope(subgraph)
def rtl_tasklet_name(self, node: nodes.RTLTasklet, state, sdfg):
return "{}_{}_{}_{}".format(node.name, sdfg.sdfg_id,
sdfg.node_id(state), state.node_id(node))
def generate_kernel_internal(
self, sdfg: dace.SDFG, state: dace.SDFGState, kernel_name: str,
predecessors: list, subgraphs: list, kernel_stream: CodeIOStream,
state_host_header_stream: CodeIOStream,
state_host_body_stream: CodeIOStream, function_stream: CodeIOStream,
callsite_stream: CodeIOStream, state_parameters: list):
'''
Generates Kernel code, both device and host side.
:param sdfg:
:param state:
:param kernel_name:
:param predecessors: list containing all the name of kernels from which this one depends
:param subgraphs:
:param kernel_stream: Device code stream, contains the kernel code
:param state_host_header_stream: Device-specific code stream: contains the host code
for the state global declarations.
:param state_host_body_stream: Device-specific code stream: contains all the code related to
this state, for creating transient buffers, spawning kernels, and synchronizing them.
:param function_stream: CPU code stream.
:param callsite_stream: CPU code stream.
:param state_parameters: list of state parameters. The kernel-specific parameters will be appended to it.
'''
(global_data_parameters, top_level_local_data, subgraph_parameters,
nested_global_transients, bank_assignments,
external_streams) = self.make_parameters(sdfg, state, subgraphs)
state_parameters.extend(global_data_parameters)
# Detect RTL tasklets, which will be launched as individual kernels
rtl_tasklet_names = [
self.rtl_tasklet_name(nd, state, sdfg) for nd in state.nodes()
if isinstance(nd, nodes.RTLTasklet)
]
# Generate host code
self.generate_host_header(sdfg, kernel_name, global_data_parameters,
state_host_header_stream)
self.generate_host_function_boilerplate(sdfg, state,
nested_global_transients,
state_host_body_stream)
# Now we write the device code
module_stream = CodeIOStream()
entry_stream = CodeIOStream()
state_id = sdfg.node_id(state)
self.generate_kernel_boilerplate_pre(sdfg, state_id, kernel_name,
global_data_parameters,
bank_assignments, module_stream,
entry_stream, external_streams)
# Emit allocations
for node in top_level_local_data:
self._dispatcher.dispatch_allocate(sdfg, state, state_id, node,
node.desc(sdfg), module_stream,
entry_stream)
for is_output, name, node, _ in external_streams:
self._dispatcher.defined_vars.add_global(name, DefinedType.Stream,
node.ctype)
if name not in self._stream_connections:
self._stream_connections[name] = [None, None]
key = 0 if is_output else 1
val = '{}_1.{}'.format(kernel_name, name)
self._stream_connections[name][key] = val
self.generate_modules(sdfg, state, kernel_name, subgraphs,
subgraph_parameters, module_stream, entry_stream,
state_host_body_stream)
self.generate_host_function_body(sdfg, state, kernel_name, predecessors,
global_data_parameters,
rtl_tasklet_names,
state_host_body_stream)
# Store code to be passed to compilation phase
# self._host_codes.append((kernel_name, host_code_stream.getvalue()))
kernel_stream.write(module_stream.getvalue())
kernel_stream.write(entry_stream.getvalue())
self.generate_kernel_boilerplate_post(kernel_stream, sdfg, state_id)
def generate_host_header(self, sdfg, kernel_function_name, parameters,
host_code_stream):
kernel_args = []
for is_output, name, arg, interface_id in parameters:
if isinstance(arg, dt.Array):
for bank in fpga.iterate_hbm_multibank_arrays(
name, arg, sdfg):
argname = fpga.fpga_ptr(name, arg, sdfg, bank, is_output,
None, None, True, interface_id)
kernel_args.append(arg.as_arg(with_types=True,
name=argname))
else:
kernel_args.append(arg.as_arg(with_types=True, name=name))
host_code_stream.write(
"""\
// Signature of kernel function (with raw pointers) for argument matching
DACE_EXPORTED void {kernel_function_name}({kernel_args});\n\n""".format(
kernel_function_name=kernel_function_name,
kernel_args=", ".join(kernel_args)), sdfg)
def generate_memlet_definition(self, sdfg, dfg, state_id, src_node,
dst_node, edge, callsite_stream):
memlet = edge.data
if (self._dispatcher.defined_vars.get(
memlet.data)[0] == DefinedType.FPGA_ShiftRegister):
raise NotImplementedError("Shift register for Xilinx NYI")
else:
self._cpu_codegen.copy_memory(sdfg, dfg, state_id, src_node,
dst_node, edge, None, callsite_stream)
def allocate_view(self, sdfg: dace.SDFG, dfg: dace.SDFGState, state_id: int,
node: dace.nodes.AccessNode, global_stream: CodeIOStream,
declaration_stream: CodeIOStream,
allocation_stream: CodeIOStream):
return self._cpu_codegen.allocate_view(sdfg, dfg, state_id, node,
global_stream,
declaration_stream,
allocation_stream)
def generate_nsdfg_arguments(self, sdfg, dfg, state, node):
# Connectors that are both input and output share the same name, unless
# they are pointers to global memory in device code, in which case they
# are split into explicit input and output interfaces
inout = set(node.in_connectors.keys() & node.out_connectors.keys())
memlet_references = []
for _, _, _, vconn, in_memlet in sorted(state.in_edges(node),
key=lambda e: e.dst_conn or ""):
if in_memlet.data is None:
continue
is_memory_interface = (self._dispatcher.defined_vars.get(
in_memlet.data, 1)[0] == DefinedType.ArrayInterface)
if is_memory_interface:
for bank in fpga.iterate_hbm_multibank_arrays(
in_memlet.data, sdfg.arrays[in_memlet.data], sdfg):
interface_name = fpga.fpga_ptr(vconn,
sdfg.arrays[in_memlet.data],
sdfg,
bank,
False,
is_array_interface=True)
passed_memlet = copy.deepcopy(in_memlet)
passed_memlet.subset = fpga.modify_distributed_subset(
passed_memlet.subset, bank)
interface_ref = cpp.emit_memlet_reference(
self._dispatcher,
sdfg,
passed_memlet,
interface_name,
conntype=node.in_connectors[vconn],
is_write=False)
memlet_references.append(interface_ref)
if vconn in inout:
continue
if fpga.is_hbm_array(sdfg.arrays[in_memlet.data]):
passed_memlet = copy.deepcopy(in_memlet)
passed_memlet.subset = fpga.modify_distributed_subset(
passed_memlet.subset, 0) # dummy so it works for HBM
else:
passed_memlet = in_memlet
ref = cpp.emit_memlet_reference(self._dispatcher,
sdfg,
passed_memlet,
vconn,
conntype=node.in_connectors[vconn],
is_write=False)
if not is_memory_interface:
memlet_references.append(ref)
for _, uconn, _, _, out_memlet in sorted(
state.out_edges(node), key=lambda e: e.src_conn or ""):
if out_memlet.data is None:
continue
if fpga.is_hbm_array(sdfg.arrays[out_memlet.data]):
passed_memlet = copy.deepcopy(out_memlet)
passed_memlet.subset = fpga.modify_distributed_subset(
passed_memlet.subset, 0) # dummy so it works for HBM
else:
passed_memlet = out_memlet
ref = cpp.emit_memlet_reference(self._dispatcher,
sdfg,
passed_memlet,
uconn,
conntype=node.out_connectors[uconn],
is_write=True)
is_memory_interface = (self._dispatcher.defined_vars.get(
out_memlet.data, 1)[0] == DefinedType.ArrayInterface)
if is_memory_interface:
for bank in fpga.iterate_hbm_multibank_arrays(
out_memlet.data, sdfg.arrays[out_memlet.data], sdfg):
interface_name = fpga.fpga_ptr(
uconn,
sdfg.arrays[out_memlet.data],
sdfg,
bank,
True,
is_array_interface=True)
passed_memlet = copy.deepcopy(out_memlet)
passed_memlet.subset = fpga.modify_distributed_subset(
passed_memlet.subset, bank)
memlet_references.append(
cpp.emit_memlet_reference(
self._dispatcher,
sdfg,
passed_memlet,
interface_name,
conntype=node.out_connectors[uconn],
is_write=True))
else:
memlet_references.append(ref)
return memlet_references
def unparse_tasklet(self, *args, **kwargs):
# Pass this object for callbacks into the Xilinx codegen
cpp.unparse_tasklet(*args, codegen=self, **kwargs)
def make_ptr_assignment(self, src_expr, src_dtype, dst_expr, dst_dtype):
"""
Write source to destination, where the source is a scalar, and the
destination is a pointer.
:return: String of C++ performing the write.
"""
return self.make_write(DefinedType.Pointer, dst_dtype, None,
"&" + dst_expr, None, src_expr, None,
dst_dtype.veclen < src_dtype.veclen,
src_dtype.veclen)
|
py | 7df7625755317c45165c4f137e12da4937005c79 | from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from examples.console import util
from examples.props.ai import AnalogInputProps
use_device_detection = True
def run_example():
board_num = 0
if use_device_detection:
ul.ignore_instacal()
if not util.config_first_detected_device(board_num):
print("Could not find device.")
return
channel = 0
ai_props = AnalogInputProps(board_num)
if ai_props.num_ai_chans < 1:
util.print_unsupported_example(board_num)
return
ai_range = ai_props.available_ranges[0]
try:
# Get a value from the device
if ai_props.resolution <= 16:
# Use the v_in method for devices with a resolution <= 16
# (optional parameter omitted)
value = ul.v_in(board_num, channel, ai_range)
else:
# Use the v_in_32 method for devices with a resolution > 16
# (optional parameter omitted)
value = ul.v_in_32(board_num, channel, ai_range)
# Display the value
print("Value: " + str(value))
except ULError as e:
util.print_ul_error(e)
finally:
if use_device_detection:
ul.release_daq_device(board_num)
if __name__ == '__main__':
run_example()
|
py | 7df762a452986f79f90e6f88770ff5c7055887fa | from .tool.func import *
def login_pw_change_2(conn):
curs = conn.cursor()
if ban_check() == 1:
return re_error('/ban')
ip = ip_check()
if ip_or_user(ip) != 0:
return redirect('/login')
if flask.request.method == 'POST':
if flask.request.form.get('pw4', None) and flask.request.form.get('pw2', None):
if flask.request.form.get('pw2', None) != flask.request.form.get('pw3', None):
return re_error('/error/20')
curs.execute(db_change("select pw, encode from user where id = ?"), [flask.session['id']])
user = curs.fetchall()
if not user:
return re_error('/error/2')
pw_check_d = pw_check(
flask.request.form.get('pw4', ''),
user[0][0],
user[0][1],
ip
)
if pw_check_d != 1:
return re_error('/error/10')
hashed = pw_encode(flask.request.form.get('pw2', None))
curs.execute(db_change("update user set pw = ? where id = ?"), [hashed, ip])
return redirect('/user')
else:
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('password_change'), wiki_set(), custom(), other2([0, 0])],
data = '''
<form method="post">
<input placeholder="''' + load_lang('now_password') + '''" name="pw4" type="password">
<hr class=\"main_hr\">
<input placeholder="''' + load_lang('new_password') + '''" name="pw2" type="password">
<hr class=\"main_hr\">
<input placeholder="''' + load_lang('password_confirm') + '''" name="pw3" type="password">
<hr class=\"main_hr\">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['change', load_lang('return')]]
)) |
py | 7df7647985cd8d04f9f4493e6e1eab3605ba1a5c | #!/usr/bin/env python
#
# draw_tiles.py reads the output of sample_shapes.py and uses it to
# render map tiles for the sampled points.
#
# Input data is assumed to be sorted such that all records for a
# single key (tile) are presented as an uninterrupted sequence. Thus,
# tiles are rendered one at a time as data streams through the
# program.
#
import base64
import csv
import io
import mapnik
from itertools import groupby
from os.path import dirname
from shapely.geometry import box, Point
from sys import path, stdin, stderr
from tilebrute.core import print_status, inc_counter, emit, which, merc_srs
try:
from gdal2tiles import GlobalMercator
except ImportError:
# look for gdal2tiles in the system path
p = which('gdal2tiles.py')
if p:
path.append(dirname(p))
from gdal2tiles import GlobalMercator
else:
print_status('Unable to locate gdal2tiles.py in PYTHONPATH or PATH. Aborting.')
raise
# quadkey[zoom -z]
# quadkey[levelOfDetail -i]
# geo helpers
def tile_to_meters_Box2d(tile):
"""
From a 'tile string' of the form tx,ty,zoom', create a mapnik.Box2d
corresponding to that tile's extend in meters.
"""
merc = GlobalMercator()
tx,ty,z = [int(x) for x in tile.split(',')]
tx,ty = merc.GoogleTile(tx,ty,z)
return mapnik.Box2d(*merc.TileBounds(tx, ty, z))
class Peekable:
"""
Extend an Interable with single-item look-ahead.
it = Peekable(...)
for x in it:
if it.has_next():
# more coming
x_plus_one = it.peek()
else:
# last one!
"""
def __init__(self, it):
self._it = iter(it)
self._cache = None
def __iter__(self):
return self
def next(self):
if not self._cache:
return self._it.next()
else:
ret = self._cache
self._cache = None
return ret
def peek(self):
if not self._cache:
self._cache = self.next()
return self._cache
def has_next(self):
if not self._cache:
try: self._cache = self.next()
except StopIteration: return False
else: return True
else:
return True
class TuplesDatasource(mapnik.PythonDatasource):
"""
Generates mx,my Point data in Google Merc. Reads data points from Iterable
specified with TuplesDatasource.set_source(), assumed to produce
tile,mx,my.
Using a static variable as a dirty hack to get around type erasure of C++
instances. mapnik.Python(...) converts all constructor args to Strings.
That means we can't pass an Iterable via the constructor. Worse, the
object returned is a C++ Datasource instance, oblivious of its Pythonic
outer shell. With no way to set instance variables on construction and no
way to update them afterwards, use a static variable to hold the generator
an instance will consume. As a result, TuplesDatasource is not
thread-safe. Generous application of asserts serve to protect this
terrible API from losing data.
"""
_source = None
@staticmethod
def get_source():
assert TuplesDatasource._source, "TuplesDatasource._source not yet initialized!"
return TuplesDatasource._source
@staticmethod
def set_source(source):
assert not TuplesDatasource._source or not TuplesDatasource._source.has_next(), "TuplesDatasource._source is not yet empty!"
TuplesDatasource._source = Peekable(source)
@staticmethod
def get_tile():
tile,_,_ = TuplesDatasource.get_source().peek()
return tile
@staticmethod
def _points(bbox):
for tile, mx, my in TuplesDatasource.get_source():
p = Point(mx, my)
if not bbox.contains(p):
inc_counter("TuplesDatasource._points", "query_out_of_range")
continue
yield (p.wkb, {})
def __init__(self):
"""
Create a Datasource over the specified file handle. Be advised, C++
interop turns everything passed into Strings.
Beware, instantiation is not initialization. Before instantiating, you
must seed the souce with TuplesDatasource.set_source(Iterable)
"""
super(TuplesDatasource, self).__init__()
# fill in required interface
self.envelope = tile_to_meters_Box2d(TuplesDatasource.get_tile())
self.data_type = mapnik.DataType.Vector
def features(self, query):
bbox = box(query.bbox.minx, query.bbox.miny, query.bbox.maxx, query.bbox.maxy)
return mapnik.PythonDatasource.wkb_features(
keys = (),
features = TuplesDatasource._points(bbox)
)
# job code
def encode_image(im):
s = base64.encodestring(im.tostring('png'))
return s.replace('\n','')
def get_zoom(tile):
(tx,ty,z) = tile.split(",")
return int(z)
def opacity(zoom):
return zoom * 0.05
def pointWeight(zoom):
if zoom == 4:
return 0.05333
elif zoom == 5:
return 0.08
elif zoom == 6:
return 0.12
elif zoom == 7:
return 0.18
elif zoom == 8:
return 0.27
elif zoom == 9:
return 0.405
elif zoom == 10:
return 0.6075
elif zoom == 11:
return 0.91125
elif zoom == 12:
return 1.366875
elif zoom == 13:
return 2.0503125
elif zoom == 14:
return 3.07546875
elif zoom == 15:
return 4.61320312
elif zoom == 16:
return 6.9198046
elif zoom == 17:
return 10.37970
def read_points(file):
reader = csv.reader(file, delimiter="\t", strict=True)
for rec in reader:
if len(rec) != 3:
inc_counter("read_points", "invalid_input")
continue
yield (rec[0],float(rec[1]),float(rec[2]))
def init_map(zoom, seq):
m = mapnik.Map(256, 256, merc_srs)
m.background_color = mapnik.Color('white')
s = mapnik.Style()
r = mapnik.Rule()
sym = mapnik.MarkersSymbolizer()
sym.fill = mapnik.Color('black')
sym.spacing = 0.0
sym.opacity = opacity(zoom)
sym.height = mapnik.Expression(str(pointWeight(zoom)/2.0))
sym.width = mapnik.Expression(str(pointWeight(zoom)/2.0))
# Ignore placement instructs Mapnik to avoid building the quadtree
# collision cache and helps performance if you know you want to
# allow for overlaps between features.
# - Dane
sym.allow_overlap = True
sym.ignore_placement = True
r.symbols.append(sym)
s.rules.append(r)
m.append_style('point_style', s)
TuplesDatasource.set_source(seq)
ds = mapnik.Python(factory='TuplesDatasource')
layer = mapnik.Layer('file', merc_srs)
layer.datasource = ds
layer.styles.append('point_style')
m.layers.append(layer)
return m
def main():
for tile,points in groupby(read_points(stdin), lambda x: x[0]):
try:
zoom = get_zoom(tile)
map = init_map(zoom, points)
map.zoom_all()
im = mapnik.Image(256,256)
mapnik.render(map,im)
emit(tile, encode_image(im))
except Exception as e:
print_status("Error while rendering tile %s: %s" % (tile,e))
raise e
if __name__=='__main__':
main()
|
py | 7df764c343f50db8e5dcf1858c6b5dd62c4f0d73 | import numpy as np
import matplotlib.pyplot as plt
from scipy.fft import fft, fftfreq
from scipy.optimize import linprog
## Data load
data_path = "./data/synthetic_data.txt"
data = np.loadtxt(data_path)[:, 3]
data /= data.max()
## Data process
n = data.size
fq = 1./1.
t = np.linspace(0.0, n*fq, n, endpoint=False)
n_f = n//2
data_f = fft(data)
t_f = fftfreq(n, fq)[:n_f]
data_f_norm = 2.0/n * np.abs(data_f[0:n_f])
## CS
cr = 2
m = n_f // cr
a_mat = np.random.normal(0, 1/cr, size=(m, n_f))
y_f = a_mat.dot(data_f_norm)
c_arr = np.ones(n_f)
res = linprog(c_arr, A_eq=a_mat, b_eq=y_f)['x']
data_rec = np.array(res)
plt.plot(data_f_norm)
plt.plot(data_rec)
plt.show()
np.save("./data/compressed.npy", data_f_norm)
|
py | 7df76630ab91e6735773848ba69a79921bd0cd3a | from __future__ import print_function
from __future__ import division
from . import _C
import pandas as pd
import numpy as np
NAN_VALUE = _C.NAN_VALUE
MODE = 'value' # value mean median
###################################################################################################################################################
def clean_df_nans(df,
mode=MODE,
nan_value=NAN_VALUE,
df_values=None,
drop_null_columns=False,
):
new_df = df.replace([np.inf, -np.inf], np.nan) # replace infinites to nan
null_cols = list(new_df.columns[new_df.isnull().all()])
if drop_null_columns:
new_df = new_df.drop(null_cols, axis='columns')
if mode=='value':
new_df = new_df.fillna(nan_value)
elif mode=='mean':
df_values = new_df.mean(axis='index', skipna=True) if df_values is None else df_values
new_df = new_df.fillna(df_values)
elif mode=='median':
df_values = new_df.median(axis='index', skipna=True) if df_values is None else df_values
new_df = new_df.fillna(df_values)
else:
raise Exception(f'{mode}')
return new_df, df_values, null_cols
###################################################################################################################################################
class DFBuilder():
def __init__(self):
self.reset()
def reset(self):
self.counter = 0
self.indexs = []
self.ds = []
def append(self, index, d):
assert isinstance(d, dict)
index = self.counter if index is None else index
self.indexs += [index]
self.ds += [d]
self.counter += 1
def __getitem__(self, idx):
k = self.indexs.index(idx)
return self.ds[k]
def __len__(self):
return len(self.indexs)
def __repr__(self):
df = self.get_df()
return str(df)
def get_df(self):
assert len(self)==len(list(set(self.indexs))), 'indexs must be unique'
new_d = {index:{} for index in self.indexs}
for index,d in zip(self.indexs,self.ds):
for k in d.keys():
new_d[index][k] = d[k]
df = pd.DataFrame.from_dict(new_d, orient='index').reindex(self.indexs)
return df
def __call__(self):
return self.get_df() |
py | 7df7684d31c6491adc3ee919f8c45d0fd3221ef3 | # Copyright (c) 2021 Bounkong Khamphousone
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
|
py | 7df768f1f058b199e669078aacc9fc74a7fcf32d | # Used by pyinstaller to expose hidden imports
import entrypoints
hiddenimports = [ep.module_name for ep in entrypoints.get_group_all('keyring.backends')]
|
py | 7df7691352c178f9e36617897316d47f9260a70c | import numpy as np
import numpy.random as npr
import pandas as pd
from rta.array_operations.functional import act
from rta.stats.random import runif
pi = np.pi
def array2df(x, stack=True):
x = pd.DataFrame(x)
x.index = ["p{}".format(i) for i in range(x.shape[0])]
if stack:
x = x.stack()
x.index.names = ['peptide', 'run']
x = x.reset_index(level=1)
x.columns = ['run', 'rt']
return x
def draw_rt(size=10000,
min_rt=16,
max_rt=180,
sort=True):
"""Draw what is considered to be the real retention times.
Args:
size (int): the number of peptides
min_rt (float): the minimal retention time.
max_rt (float): the maximal retention time.
"""
rt = npr.random(size) * (max_rt - min_rt) + min_rt
if sort:
rt.sort()
return rt
def draw_runs(rt,
runs_no,
precision=.05):
"""Draw the less precise retention times within technical runs.
Args:
rt (np.array): the true retention times.
runs_no (int): the number of technical replicates.
precision (float): the standard deviation of a gaussian blur of original retention times.
"""
return npr.normal(loc=rt, scale=precision, size=(runs_no, len(rt))).T
if name == "__main__":
rt = draw_rt()
rts = draw_runs(rt, 3)
shifts = (lambda x: 10 + x * (1 + .01 * np.sin(x/20)),
lambda x: 7 + x * (1 + .05 * np.cos(x/15)),
lambda x: 15 + x * (1.01 + .25 * np.sin(x/18)))
rtss = act(shifts, rts)
import matplotlib.pyplot as plt
plt.scatter(rtss[:,0], rtss[:,1])
e = rtss.min(), rtss.max()
plt.plot(e, e, color='black')
plt.show()
# automate shift maker
def random_diag_sin_shifts(n,
min_c=0,
max_c=10,
min_a=.01,
max_a=.05,
min_f=15,
max_f=20):
"""Generate systematics shift sine functions.
Each one follows a formula:
f(x) = x + ampl * sin(freq * x)
"""
C = runif(n, min_c, max_c)
A = runif(n, min_a, max_a)
F = runif(n, min_f, max_f)
return tuple([lambda x: c + x + a*np.sin(2*pi*x/f)
for c,a,f in zip(C,A,F)])
if name == "__main__":
rt = draw_rt()
rts = draw_runs(rt, 10)
shifts = random_diag_sin_shifts(10)
rtss = act(shifts, rts)
# add big jumps here.
npr.binomial(10000, .01) |
py | 7df7697f08da80ae0592395b7cdda22d8a6a8ee0 | """Define the Problem class and a FakeComm class for non-MPI users."""
import sys
import pprint
import os
import logging
from collections import defaultdict, namedtuple
from fnmatch import fnmatchcase
from itertools import product
from io import StringIO
import numpy as np
import scipy.sparse as sparse
from openmdao.core.component import Component
from openmdao.core.driver import Driver, record_iteration
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.core.group import Group, System
from openmdao.core.indepvarcomp import IndepVarComp
from openmdao.core.total_jac import _TotalJacInfo
from openmdao.approximation_schemes.complex_step import ComplexStep
from openmdao.approximation_schemes.finite_difference import FiniteDifference
from openmdao.solvers.solver import SolverInfo
from openmdao.error_checking.check_config import _default_checks, _all_checks
from openmdao.recorders.recording_iteration_stack import _RecIteration
from openmdao.recorders.recording_manager import RecordingManager, record_viewer_data
from openmdao.utils.record_util import create_local_meta
from openmdao.utils.general_utils import ContainsAll, pad_name, simple_warning
from openmdao.utils.mpi import FakeComm
from openmdao.utils.mpi import MPI
from openmdao.utils.name_maps import prom_name2abs_name
from openmdao.utils.options_dictionary import OptionsDictionary
from openmdao.utils.units import get_conversion
from openmdao.utils import coloring as coloring_mod
from openmdao.utils.name_maps import abs_key2rel_key
from openmdao.vectors.vector import INT_DTYPE
from openmdao.vectors.default_vector import DefaultVector
from openmdao.utils.logger_utils import get_logger, TestLogger
import openmdao.utils.coloring as coloring_mod
from openmdao.utils.hooks import _setup_hooks
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
from openmdao.utils.name_maps import rel_key2abs_key, rel_name2abs_name
# Use this as a special value to be able to tell if the caller set a value for the optional
# out_stream argument. We run into problems running testflo if we use a default of sys.stdout.
_DEFAULT_OUT_STREAM = object()
ErrorTuple = namedtuple('ErrorTuple', ['forward', 'reverse', 'forward_reverse'])
MagnitudeTuple = namedtuple('MagnitudeTuple', ['forward', 'reverse', 'fd'])
_contains_all = ContainsAll()
_undefined = object()
CITATION = """@article{openmdao_2019,
Author={Justin S. Gray and John T. Hwang and Joaquim R. R. A.
Martins and Kenneth T. Moore and Bret A. Naylor},
Title="{OpenMDAO: An Open-Source Framework for Multidisciplinary
Design, Analysis, and Optimization}",
Journal="{Structural and Multidisciplinary Optimization}",
Year={2019},
Publisher={Springer},
pdf={http://openmdao.org/pubs/openmdao_overview_2019.pdf},
note= {In Press}
}"""
class Problem(object):
"""
Top-level container for the systems and drivers.
Attributes
----------
model : <System>
Pointer to the top-level <System> object (root node in the tree).
comm : MPI.Comm or <FakeComm>
The global communicator.
driver : <Driver>
Slot for the driver. The default driver is `Driver`, which just runs
the model once.
_mode : 'fwd' or 'rev'
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint).
_orig_mode : 'fwd', 'rev', or 'auto'
Derivatives calculation mode assigned by the user. If set to 'auto', _mode will be
automatically assigned to 'fwd' or 'rev' based on relative sizes of design variables vs.
responses.
_solver_print_cache : list
Allows solver iprints to be set to requested values after setup calls.
_initial_condition_cache : dict
Any initial conditions that are set at the problem level via setitem are cached here
until they can be processed.
_setup_status : int
Current status of the setup in _model.
0 -- Newly initialized problem or newly added model.
1 -- The `setup` method has been called, but vectors not initialized.
2 -- The `final_setup` has been run, everything ready to run.
cite : str
Listing of relevant citations that should be referenced when
publishing work that uses this class.
options : <OptionsDictionary>
Dictionary with general options for the problem.
recording_options : <OptionsDictionary>
Dictionary with problem recording options.
_rec_mgr : <RecordingManager>
Object that manages all recorders added to this problem.
_check : bool
If True, call check_config at the end of final_setup.
_recording_iter : _RecIteration
Manages recording of iterations.
_filtered_vars_to_record : dict
Dictionary of lists of design vars, constraints, etc. to record.
_logger : object or None
Object for logging config checks if _check is True.
_force_alloc_complex : bool
Force allocation of imaginary part in nonlinear vectors. OpenMDAO can generally
detect when you need to do this, but in some cases (e.g., complex step is used
after a reconfiguration) you may need to set this to True.
_name : str
Problem name.
"""
def __init__(self, model=None, driver=None, comm=None, name=None, **options):
"""
Initialize attributes.
Parameters
----------
model : <System> or None
The top-level <System>. If not specified, an empty <Group> will be created.
driver : <Driver> or None
The driver for the problem. If not specified, a simple "Run Once" driver will be used.
comm : MPI.Comm or <FakeComm> or None
The global communicator.
name : str
Problem name. Can be used to specify a Problem instance when multiple Problems
exist.
**options : named args
All remaining named args are converted to options.
"""
self.cite = CITATION
self._name = name
if comm is None:
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
except ImportError:
comm = FakeComm()
if model is None:
self.model = Group()
elif isinstance(model, System):
self.model = model
else:
raise TypeError(self.msginfo +
": The value provided for 'model' is not a valid System.")
if driver is None:
self.driver = Driver()
elif isinstance(driver, Driver):
self.driver = driver
else:
raise TypeError(self.msginfo +
": The value provided for 'driver' is not a valid Driver.")
self.comm = comm
self._solver_print_cache = []
self._mode = None # mode is assigned in setup()
self._initial_condition_cache = {}
# Status of the setup of _model.
# 0 -- Newly initialized problem or newly added model.
# 1 -- The `setup` method has been called, but vectors not initialized.
# 2 -- The `final_setup` has been run, everything ready to run.
self._setup_status = 0
self._rec_mgr = RecordingManager()
# General options
self.options = OptionsDictionary(parent_name=type(self).__name__)
self.options.declare('coloring_dir', types=str,
default=os.path.join(os.getcwd(), 'coloring_files'),
desc='Directory containing coloring files (if any) for this Problem.')
self.options.update(options)
# Case recording options
self.recording_options = OptionsDictionary(parent_name=type(self).__name__)
self.recording_options.declare('record_desvars', types=bool, default=True,
desc='Set to True to record design variables at the '
'problem level')
self.recording_options.declare('record_objectives', types=bool, default=True,
desc='Set to True to record objectives at the problem level')
self.recording_options.declare('record_constraints', types=bool, default=True,
desc='Set to True to record constraints at the '
'problem level')
self.recording_options.declare('record_responses', types=bool, default=False,
desc='Set True to record constraints and objectives at the '
'problem level.')
self.recording_options.declare('includes', types=list, default=['*'],
desc='Patterns for variables to include in recording. \
Uses fnmatch wildcards')
self.recording_options.declare('excludes', types=list, default=[],
desc='Patterns for vars to exclude in recording '
'(processed post-includes). Uses fnmatch wildcards')
_setup_hooks(self)
def _get_var_abs_name(self, name):
if name in self.model._var_allprocs_abs2meta:
return name
elif name in self.model._var_allprocs_prom2abs_list['output']:
return self.model._var_allprocs_prom2abs_list['output'][name][0]
elif name in self.model._var_allprocs_prom2abs_list['input']:
abs_names = self.model._var_allprocs_prom2abs_list['input'][name]
if len(abs_names) == 1:
return abs_names[0]
else:
raise KeyError("{}: Using promoted name `{}' is ambiguous and matches unconnected "
"inputs %s. Use absolute name to disambiguate.".format(self.msginfo,
name,
abs_names))
raise KeyError('{}: Variable "{}" not found.'.format(self.msginfo, name))
@property
def msginfo(self):
"""
Return info to prepend to messages.
Returns
-------
str
Info to prepend to messages.
"""
if self._name is None:
return type(self).__name__
return '{} {}'.format(type(self).__name__, self._name)
def _get_inst_id(self):
return self._name
def is_local(self, name):
"""
Return True if the named variable or system is local to the current process.
Parameters
----------
name : str
Name of a variable or system.
Returns
-------
bool
True if the named system or variable is local to this process.
"""
if self._setup_status < 1:
raise RuntimeError("{}: is_local('{}') was called before setup() "
"completed.".format(self.msginfo, name))
try:
abs_name = self._get_var_abs_name(name)
except KeyError:
sub = self.model._get_subsystem(name)
if sub is None: # either the sub is remote or there is no sub by that name
# TODO: raise exception if sub does not exist
return False
else:
# if system has been set up, _var_sizes will be initialized
return sub._var_sizes is not None
# variable exists, but may be remote
return abs_name in self.model._var_abs2meta
def _get_cached_val(self, name):
# We have set and cached already
if name in self._initial_condition_cache:
return self._initial_condition_cache[name]
# Vector not setup, so we need to pull values from saved metadata request.
else:
proms = self.model._var_allprocs_prom2abs_list
meta = self.model._var_abs2meta
if name in meta:
if isinstance(self.model, Group) and name in self.model._conn_abs_in2out:
src_name = self.model._conn_abs_in2out[name]
val = meta[src_name]['value']
else:
val = meta[name]['value']
elif name in proms['output']:
abs_name = prom_name2abs_name(self.model, name, 'output')
if abs_name in meta:
val = meta[abs_name]['value']
elif name in proms['input']:
abs_name = proms['input'][name][0]
conn = self.model._conn_abs_in2out
if abs_name in meta:
if isinstance(self.model, Group) and abs_name in conn:
src_name = self.model._conn_abs_in2out[abs_name]
# So, if the inputs and outputs are promoted to the same name, then we
# allow getitem, but if they aren't, then we raise an error due to non
# uniqueness.
if name not in proms['output']:
# This triggers a check for unconnected non-unique inputs, and
# raises the same error as vector access.
abs_name = prom_name2abs_name(self.model, name, 'input')
val = meta[src_name]['value']
else:
# This triggers a check for unconnected non-unique inputs, and
# raises the same error as vector access.
abs_name = prom_name2abs_name(self.model, name, 'input')
val = meta[abs_name]['value']
else:
raise KeyError('{}: Variable name "{}" not found.'.format(self.msginfo, name))
if val is not _undefined:
# Need to cache the "get" in case the user calls in-place numpy operations.
self._initial_condition_cache[name] = val
return val
def __getitem__(self, name):
"""
Get an output/input variable.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
Returns
-------
float or ndarray or any python object
the requested output/input variable.
"""
return self.get_val(name)
def get_val(self, name, units=None, indices=None, get_remote=False):
"""
Get an output/input variable.
Function is used if you want to specify display units.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
units : str, optional
Units to convert to before return.
indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional
Indices or slice to return.
get_remote : bool
If True, retrieve the value even if it is on a remote process. Note that if the
variable is remote on ANY process, this function must be called on EVERY process
in the Problem's MPI communicator.
Returns
-------
object
The value of the requested output/input variable.
"""
if self._setup_status == 1:
val = self._get_cached_val(name)
if indices is not None:
val = val[indices]
if units is not None:
val = self.model.convert2units(name, val, units)
return val
val = self.model._get_val(name, units=units, indices=indices, get_remote=get_remote)
if val is System._undefined:
if get_remote:
raise KeyError('{}: Variable name "{}" not found.'.format(self.msginfo, name))
else:
raise RuntimeError(
"{}: Variable '{}' is not local to rank {}. You can retrieve values from "
"other processes using "
"`problem.get_val(<name>, get_remote=True)`.".format(self.msginfo, name,
self.comm.rank))
return val
def __setitem__(self, name, value):
"""
Set an output/input variable.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
value : float or ndarray or any python object
value to set this variable to.
"""
# Caching only needed if vectors aren't allocated yet.
if self._setup_status == 1:
self._initial_condition_cache[name] = value
else:
all_proms = self.model._var_allprocs_prom2abs_list
if name in all_proms['output']:
abs_name = all_proms['output'][name][0]
elif name in all_proms['input']:
abs_name = prom_name2abs_name(self.model, name, 'input')
else:
abs_name = name
if abs_name in self.model._outputs._views:
self.model._outputs[abs_name] = value
elif abs_name in self.model._inputs._views:
self.model._inputs[abs_name] = value
elif abs_name in self.model._discrete_outputs:
self.model._discrete_outputs[abs_name] = value
elif abs_name in self.model._discrete_inputs:
self.model._discrete_inputs[abs_name] = value
else:
# might be a remote var. If so, just do nothing on this proc
if abs_name in self.model._var_allprocs_abs2meta:
print("Variable '{}' is remote on rank {}. "
"Local assignment ignored.".format(name, self.comm.rank))
else:
raise KeyError('{}: Variable "{}" not found.'.format(self.model.msginfo, name))
def set_val(self, name, value, units=None, indices=None):
"""
Set an output/input variable.
Function is used if you want to set a value using a different unit.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
value : float or ndarray or list
Value to set this variable to.
units : str, optional
Units that value is defined in.
indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional
Indices or slice to set to specified value.
"""
if units is not None:
base_units = self.model._get_var_meta(name)['units']
if base_units is None:
msg = "{}: Can't set variable '{}' with units 'None' to value with units '{}'."
raise TypeError(msg.format(self.msginfo, name, units))
try:
scale, offset = get_conversion(units, base_units)
except TypeError:
msg = "{}: Can't set variable '{}' with units '{}' to value with units '{}'."
raise TypeError(msg.format(self.msginfo, name, base_units, units))
value = (value + offset) * scale
if indices is not None:
self[name][indices] = value
else:
self[name] = value
def _set_initial_conditions(self):
"""
Set all initial conditions that have been saved in cache after setup.
"""
for name, value in self._initial_condition_cache.items():
self[name] = value
# Clean up cache
self._initial_condition_cache = {}
def run_model(self, case_prefix=None, reset_iter_counts=True):
"""
Run the model by calling the root system's solve_nonlinear.
Parameters
----------
case_prefix : str or None
Prefix to prepend to coordinates when recording.
reset_iter_counts : bool
If True and model has been run previously, reset all iteration counters.
"""
if self._mode is None:
raise RuntimeError(self.msginfo +
": The `setup` method must be called before `run_model`.")
if case_prefix:
if not isinstance(case_prefix, str):
raise TypeError(self.msginfo + ": The 'case_prefix' argument should be a string.")
self._recording_iter.prefix = case_prefix
else:
self._recording_iter.prefix = None
if self.model.iter_count > 0 and reset_iter_counts:
self.driver.iter_count = 0
self.model._reset_iter_counts()
self.final_setup()
self.model._clear_iprint()
self.model.run_solve_nonlinear()
def run_driver(self, case_prefix=None, reset_iter_counts=True):
"""
Run the driver on the model.
Parameters
----------
case_prefix : str or None
Prefix to prepend to coordinates when recording.
reset_iter_counts : bool
If True and model has been run previously, reset all iteration counters.
Returns
-------
boolean
Failure flag; True if failed to converge, False is successful.
"""
if self._mode is None:
raise RuntimeError(self.msginfo +
": The `setup` method must be called before `run_driver`.")
if case_prefix:
if not isinstance(case_prefix, str):
raise TypeError(self.msginfo + ": The 'case_prefix' argument should be a string.")
self._recording_iter.prefix = case_prefix
else:
self._recording_iter.prefix = None
if self.model.iter_count > 0 and reset_iter_counts:
self.driver.iter_count = 0
self.model._reset_iter_counts()
self.final_setup()
self.model._clear_iprint()
return self.driver.run()
def compute_jacvec_product(self, of, wrt, mode, seed):
"""
Given a seed and 'of' and 'wrt' variables, compute the total jacobian vector product.
Parameters
----------
of : list of str
Variables whose derivatives will be computed.
wrt : list of str
Derivatives will be computed with respect to these variables.
mode : str
Derivative direction ('fwd' or 'rev').
seed : dict or list
Either a dict keyed by 'wrt' varnames (fwd) or 'of' varnames (rev), containing
dresidual (fwd) or doutput (rev) values, OR a list of dresidual or doutput
values that matches the corresponding 'wrt' (fwd) or 'of' (rev) varname list.
Returns
-------
dict
The total jacobian vector product, keyed by variable name.
"""
if mode == 'fwd':
if len(wrt) != len(seed):
raise RuntimeError(self.msginfo +
": seed and 'wrt' list must be the same length in fwd mode.")
lnames, rnames = of, wrt
lkind, rkind = 'output', 'residual'
else: # rev
if len(of) != len(seed):
raise RuntimeError(self.msginfo +
": seed and 'of' list must be the same length in rev mode.")
lnames, rnames = wrt, of
lkind, rkind = 'residual', 'output'
rvec = self.model._vectors[rkind]['linear']
lvec = self.model._vectors[lkind]['linear']
rvec._data[:] = 0.
# set seed values into dresids (fwd) or doutputs (rev)
try:
seed[rnames[0]]
except (IndexError, TypeError):
for i, name in enumerate(rnames):
rvec[name] = seed[i]
else:
for name in rnames:
rvec[name] = seed[name]
# We apply a -1 here because the derivative of the output is minus the derivative of
# the residual in openmdao.
rvec._data *= -1.
self.model.run_solve_linear(['linear'], mode)
return {n: lvec[n].copy() for n in lnames}
def _setup_recording(self):
"""
Set up case recording.
"""
self._filtered_vars_to_record = self.driver._get_vars_to_record(self.recording_options)
self._rec_mgr.startup(self)
def add_recorder(self, recorder):
"""
Add a recorder to the problem.
Parameters
----------
recorder : CaseRecorder
A recorder instance.
"""
self._rec_mgr.append(recorder)
def cleanup(self):
"""
Clean up resources prior to exit.
"""
# shut down all recorders
self._rec_mgr.shutdown()
# clean up driver and model resources
self.driver.cleanup()
for system in self.model.system_iter(include_self=True, recurse=True):
system.cleanup()
def record_iteration(self, case_name):
"""
Record the variables at the Problem level.
Parameters
----------
case_name : str
Name used to identify this Problem case.
"""
record_iteration(self, self, case_name)
def _get_recorder_metadata(self, case_name):
"""
Return metadata from the latest iteration for use in the recorder.
Parameters
----------
case_name : str
Name of current case.
Returns
-------
dict
Metadata dictionary for the recorder.
"""
return create_local_meta(case_name)
def setup(self, check=False, logger=None, mode='auto', force_alloc_complex=False,
distributed_vector_class=PETScVector, local_vector_class=DefaultVector,
derivatives=True):
"""
Set up the model hierarchy.
When `setup` is called, the model hierarchy is assembled, the processors are allocated
(for MPI), and variables and connections are all assigned. This method traverses down
the model hierarchy to call `setup` on each subsystem, and then traverses up the model
hierarchy to call `configure` on each subsystem.
Parameters
----------
check : boolean
whether to run config check after setup is complete.
logger : object
Object for logging config checks if check is True.
mode : string
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint). Default is 'auto', which will pick 'fwd' or 'rev' based on
the direction resulting in the smallest number of linear solves required to
compute derivatives.
force_alloc_complex : bool
Force allocation of imaginary part in nonlinear vectors. OpenMDAO can generally
detect when you need to do this, but in some cases (e.g., complex step is used
after a reconfiguration) you may need to set this to True.
distributed_vector_class : type
Reference to the <Vector> class or factory function used to instantiate vectors
and associated transfers involved in interprocess communication.
local_vector_class : type
Reference to the <Vector> class or factory function used to instantiate vectors
and associated transfers involved in intraprocess communication.
derivatives : bool
If True, perform any memory allocations necessary for derivative computation.
Returns
-------
self : <Problem>
this enables the user to instantiate and setup in one line.
"""
model = self.model
model.force_alloc_complex = force_alloc_complex
comm = self.comm
# PETScVector is required for MPI
if comm.size > 1:
if PETScVector is None:
raise ValueError(self.msginfo +
": Attempting to run in parallel under MPI but PETScVector "
"could not be imported.")
elif distributed_vector_class is not PETScVector:
raise ValueError("%s: The `distributed_vector_class` argument must be "
"`PETScVector` when running in parallel under MPI but '%s' was "
"specified." % (self.msginfo, distributed_vector_class.__name__))
if mode not in ['fwd', 'rev', 'auto']:
msg = "%s: Unsupported mode: '%s'. Use either 'fwd' or 'rev'." % (self.msginfo, mode)
raise ValueError(msg)
self._mode = self._orig_mode = mode
# this will be shared by all Solvers in the model
model._solver_info = SolverInfo()
self._recording_iter = _RecIteration()
model._recording_iter = self._recording_iter
model_comm = self.driver._setup_comm(comm)
model._setup(model_comm, 'full', mode, distributed_vector_class, local_vector_class,
derivatives, self.options)
# Cache all args for final setup.
self._check = check
self._logger = logger
self._force_alloc_complex = force_alloc_complex
self._setup_status = 1
return self
def final_setup(self):
"""
Perform final setup phase on problem in preparation for run.
This is the second phase of setup, and is done automatically at the start of `run_driver`
and `run_model`. At the beginning of final_setup, we have a model hierarchy with defined
variables, solvers, case_recorders, and derivative settings. During this phase, the vectors
are created and populated, the drivers and solvers are initialized, and the recorders are
started, and the rest of the framework is prepared for execution.
"""
driver = self.driver
response_size, desvar_size = driver._update_voi_meta(self.model)
# update mode if it's been set to 'auto'
if self._orig_mode == 'auto':
mode = 'rev' if response_size < desvar_size else 'fwd'
self._mode = mode
else:
mode = self._orig_mode
if self._setup_status < 2:
self.model._final_setup(self.comm, 'full',
force_alloc_complex=self._force_alloc_complex)
driver._setup_driver(self)
info = driver._coloring_info
coloring = info['coloring']
if coloring is None and info['static'] is not None:
coloring = driver._get_static_coloring()
if coloring and coloring_mod._use_total_sparsity:
# if we're using simultaneous total derivatives then our effective size is less
# than the full size
if coloring._fwd and coloring._rev:
pass # we're doing both!
elif mode == 'fwd' and coloring._fwd:
desvar_size = coloring.total_solves()
elif mode == 'rev' and coloring._rev:
response_size = coloring.total_solves()
if ((mode == 'fwd' and desvar_size > response_size) or
(mode == 'rev' and response_size > desvar_size)):
simple_warning("Inefficient choice of derivative mode. You chose '%s' for a "
"problem with %d design variables and %d response variables "
"(objectives and nonlinear constraints)." %
(mode, desvar_size, response_size), RuntimeWarning)
# we only want to set up recording once, after problem setup
if self._setup_status == 1:
driver._setup_recording()
self._setup_recording()
record_viewer_data(self)
# Now that setup has been called, we can set the iprints.
for items in self._solver_print_cache:
self.set_solver_print(level=items[0], depth=items[1], type_=items[2])
self._solver_print_cache = []
if self._setup_status < 2:
self._setup_status = 2
self._set_initial_conditions()
if self._check:
if self._check is True:
checks = _default_checks
else:
checks = self._check
if self.comm.rank == 0:
logger = self._logger
else:
logger = TestLogger()
self.check_config(logger, checks=checks)
def check_partials(self, out_stream=_DEFAULT_OUT_STREAM, includes=None, excludes=None,
compact_print=False, abs_err_tol=1e-6, rel_err_tol=1e-6,
method='fd', step=None, form='forward', step_calc='abs',
force_dense=True, show_only_incorrect=False):
"""
Check partial derivatives comprehensively for all components in your model.
Parameters
----------
out_stream : file-like object
Where to send human readable output. By default it goes to stdout.
Set to None to suppress.
includes : None or list_like
List of glob patterns for pathnames to include in the check. Default is None, which
includes all components in the model.
excludes : None or list_like
List of glob patterns for pathnames to exclude from the check. Default is None, which
excludes nothing.
compact_print : bool
Set to True to just print the essentials, one line per unknown-param pair.
abs_err_tol : float
Threshold value for absolute error. Errors about this value will have a '*' displayed
next to them in output, making them easy to search for. Default is 1.0E-6.
rel_err_tol : float
Threshold value for relative error. Errors about this value will have a '*' displayed
next to them in output, making them easy to search for. Note at times there may be a
significant relative error due to a minor absolute error. Default is 1.0E-6.
method : str
Method, 'fd' for finite difference or 'cs' for complex step. Default is 'fd'.
step : float
Step size for approximation. Default is None, which means 1e-6 for 'fd' and 1e-40 for
'cs'.
form : string
Form for finite difference, can be 'forward', 'backward', or 'central'. Default
'forward'.
step_calc : string
Step type for finite difference, can be 'abs' for absolute', or 'rel' for relative.
Default is 'abs'.
force_dense : bool
If True, analytic derivatives will be coerced into arrays. Default is True.
show_only_incorrect : bool, optional
Set to True if output should print only the subjacs found to be incorrect.
Returns
-------
dict of dicts of dicts
First key:
is the component name;
Second key:
is the (output, input) tuple of strings;
Third key:
is one of ['rel error', 'abs error', 'magnitude', 'J_fd', 'J_fwd', 'J_rev'];
For 'rel error', 'abs error', 'magnitude' the value is: A tuple containing norms for
forward - fd, adjoint - fd, forward - adjoint.
For 'J_fd', 'J_fwd', 'J_rev' the value is: A numpy array representing the computed
Jacobian for the three different methods of computation.
"""
if self._setup_status < 2:
self.final_setup()
model = self.model
if not model._use_derivatives:
raise RuntimeError(self.msginfo +
": Can't check partials. Derivative support has been turned off.")
# TODO: Once we're tracking iteration counts, run the model if it has not been run before.
includes = [includes] if isinstance(includes, str) else includes
excludes = [excludes] if isinstance(excludes, str) else excludes
comps = []
for comp in model.system_iter(typ=Component, include_self=True):
if isinstance(comp, IndepVarComp):
continue
name = comp.pathname
# Process includes
if includes is not None:
for pattern in includes:
if fnmatchcase(name, pattern):
break
else:
continue
# Process excludes
if excludes is not None:
match = False
for pattern in excludes:
if fnmatchcase(name, pattern):
match = True
break
if match:
continue
comps.append(comp)
self.set_solver_print(level=0)
# This is a defaultdict of (defaultdict of dicts).
partials_data = defaultdict(lambda: defaultdict(dict))
# Caching current point to restore after setups.
input_cache = model._inputs._clone()
output_cache = model._outputs._clone()
# Keep track of derivative keys that are declared dependent so that we don't print them
# unless they are in error.
indep_key = {}
# Analytic Jacobians
for mode in ('fwd', 'rev'):
model._inputs.set_vec(input_cache)
model._outputs.set_vec(output_cache)
# Make sure we're in a valid state
model.run_apply_nonlinear()
jac_key = 'J_' + mode
for comp in comps:
# Only really need to linearize once.
if mode == 'fwd':
comp.run_linearize()
explicit = isinstance(comp, ExplicitComponent)
matrix_free = comp.matrix_free
c_name = comp.pathname
indep_key[c_name] = set()
with comp._unscaled_context():
of_list, wrt_list = \
comp._get_potential_partials_lists(include_wrt_outputs=not explicit)
# Matrix-free components need to calculate their Jacobian by matrix-vector
# product.
if matrix_free:
dstate = comp._vectors['output']['linear']
if mode == 'fwd':
dinputs = comp._vectors['input']['linear']
doutputs = comp._vectors['residual']['linear']
in_list = wrt_list
out_list = of_list
else:
dinputs = comp._vectors['residual']['linear']
doutputs = comp._vectors['input']['linear']
in_list = of_list
out_list = wrt_list
for inp in in_list:
inp_abs = rel_name2abs_name(comp, inp)
try:
flat_view = dinputs._views_flat[inp_abs]
except KeyError:
# Implicit state
flat_view = dstate._views_flat[inp_abs]
n_in = len(flat_view)
for idx in range(n_in):
dinputs.set_const(0.0)
dstate.set_const(0.0)
# Dictionary access returns a scalar for 1d input, and we
# need a vector for clean code, so use _views_flat.
flat_view[idx] = 1.0
# Matrix Vector Product
comp._apply_linear(None, ['linear'], _contains_all, mode)
for out in out_list:
out_abs = rel_name2abs_name(comp, out)
try:
derivs = doutputs._views_flat[out_abs]
except KeyError:
# Implicit state
derivs = dstate._views_flat[out_abs]
if mode == 'fwd':
key = out, inp
deriv = partials_data[c_name][key]
# Allocate first time
if jac_key not in deriv:
shape = (len(derivs), n_in)
deriv[jac_key] = np.zeros(shape)
deriv[jac_key][:, idx] = derivs
else:
key = inp, out
deriv = partials_data[c_name][key]
# Allocate first time
if jac_key not in deriv:
shape = (n_in, len(derivs))
deriv[jac_key] = np.zeros(shape)
deriv[jac_key][idx, :] = derivs
# These components already have a Jacobian with calculated derivatives.
else:
subjacs = comp._jacobian._subjacs_info
for rel_key in product(of_list, wrt_list):
abs_key = rel_key2abs_key(comp, rel_key)
of, wrt = abs_key
# No need to calculate partials; they are already stored
try:
deriv_value = subjacs[abs_key]['value']
rows = subjacs[abs_key]['rows']
except KeyError:
deriv_value = rows = None
# Testing for pairs that are not dependent so that we suppress printing
# them unless the fd is non zero. Note: subjacs_info is empty for
# undeclared partials, which is the default behavior now.
try:
if not subjacs[abs_key]['dependent']:
indep_key[c_name].add(rel_key)
except KeyError:
indep_key[c_name].add(rel_key)
if deriv_value is None:
# Missing derivatives are assumed 0.
in_size = comp._var_abs2meta[wrt]['size']
out_size = comp._var_abs2meta[of]['size']
deriv_value = np.zeros((out_size, in_size))
if force_dense:
if rows is not None:
try:
in_size = comp._var_abs2meta[wrt]['size']
except KeyError:
in_size = comp._var_abs2meta[wrt]['size']
out_size = comp._var_abs2meta[of]['size']
tmp_value = np.zeros((out_size, in_size))
# if a scalar value is provided (in declare_partials),
# expand to the correct size array value for zipping
if deriv_value.size == 1:
deriv_value *= np.ones(rows.size)
for i, j, val in zip(rows, subjacs[abs_key]['cols'],
deriv_value):
tmp_value[i, j] += val
deriv_value = tmp_value
elif sparse.issparse(deriv_value):
deriv_value = deriv_value.todense()
partials_data[c_name][rel_key][jac_key] = deriv_value.copy()
model._inputs.set_vec(input_cache)
model._outputs.set_vec(output_cache)
model.run_apply_nonlinear()
# Finite Difference to calculate Jacobian
jac_key = 'J_fd'
alloc_complex = model._outputs._alloc_complex
all_fd_options = {}
comps_could_not_cs = set()
requested_method = method
for comp in comps:
c_name = comp.pathname
all_fd_options[c_name] = {}
explicit = isinstance(comp, ExplicitComponent)
approximations = {'fd': FiniteDifference(),
'cs': ComplexStep()}
of, wrt = comp._get_potential_partials_lists(include_wrt_outputs=not explicit)
# Load up approximation objects with the requested settings.
local_opts = comp._get_check_partial_options()
for rel_key in product(of, wrt):
abs_key = rel_key2abs_key(comp, rel_key)
local_wrt = rel_key[1]
# Determine if fd or cs.
method = requested_method
if local_wrt in local_opts:
local_method = local_opts[local_wrt]['method']
if local_method:
method = local_method
# We can't use CS if we haven't allocated a complex vector, so we fall back on fd.
if method == 'cs' and not alloc_complex:
comps_could_not_cs.add(c_name)
method = 'fd'
fd_options = {'order': None,
'method': method}
if method == 'cs':
defaults = ComplexStep.DEFAULT_OPTIONS
fd_options['form'] = None
fd_options['step_calc'] = None
elif method == 'fd':
defaults = FiniteDifference.DEFAULT_OPTIONS
fd_options['form'] = form
fd_options['step_calc'] = step_calc
if step and requested_method == method:
fd_options['step'] = step
else:
fd_options['step'] = defaults['step']
# Precedence: component options > global options > defaults
if local_wrt in local_opts:
for name in ['form', 'step', 'step_calc', 'directional']:
value = local_opts[local_wrt][name]
if value is not None:
fd_options[name] = value
all_fd_options[c_name][local_wrt] = fd_options
approximations[fd_options['method']].add_approximation(abs_key, self.model,
fd_options)
approx_jac = {}
for approximation in approximations.values():
# Perform the FD here.
approximation.compute_approximations(comp, jac=approx_jac)
for abs_key, partial in approx_jac.items():
rel_key = abs_key2rel_key(comp, abs_key)
partials_data[c_name][rel_key][jac_key] = partial
# If this is a directional derivative, convert the analytic to a directional one.
wrt = rel_key[1]
if wrt in local_opts and local_opts[wrt]['directional']:
deriv = partials_data[c_name][rel_key]
for key in ['J_fwd', 'J_rev']:
deriv[key] = np.atleast_2d(np.sum(deriv[key], axis=1)).T
# Conversion of defaultdict to dicts
partials_data = {comp_name: dict(outer) for comp_name, outer in partials_data.items()}
if out_stream == _DEFAULT_OUT_STREAM:
out_stream = sys.stdout
if len(comps_could_not_cs) > 0:
msg = "The following components requested complex step, but force_alloc_complex " + \
"has not been set to True, so finite difference was used: "
msg += str(list(comps_could_not_cs))
msg += "\nTo enable complex step, specify 'force_alloc_complex=True' when calling " + \
"setup on the problem, e.g. 'problem.setup(force_alloc_complex=True)'"
simple_warning(msg)
_assemble_derivative_data(partials_data, rel_err_tol, abs_err_tol, out_stream,
compact_print, comps, all_fd_options, indep_key=indep_key,
all_comps_provide_jacs=not self.model.matrix_free,
show_only_incorrect=show_only_incorrect)
return partials_data
def check_totals(self, of=None, wrt=None, out_stream=_DEFAULT_OUT_STREAM, compact_print=False,
driver_scaling=False, abs_err_tol=1e-6, rel_err_tol=1e-6,
method='fd', step=None, form=None, step_calc='abs'):
"""
Check total derivatives for the model vs. finite difference.
Parameters
----------
of : list of variable name strings or None
Variables whose derivatives will be computed. Default is None, which
uses the driver's objectives and constraints.
wrt : list of variable name strings or None
Variables with respect to which the derivatives will be computed.
Default is None, which uses the driver's desvars.
out_stream : file-like object
Where to send human readable output. By default it goes to stdout.
Set to None to suppress.
compact_print : bool
Set to True to just print the essentials, one line per unknown-param pair.
driver_scaling : bool
When True, return derivatives that are scaled according to either the adder and scaler
or the ref and ref0 values that were specified when add_design_var, add_objective, and
add_constraint were called on the model. Default is False, which is unscaled.
abs_err_tol : float
Threshold value for absolute error. Errors about this value will have a '*' displayed
next to them in output, making them easy to search for. Default is 1.0E-6.
rel_err_tol : float
Threshold value for relative error. Errors about this value will have a '*' displayed
next to them in output, making them easy to search for. Note at times there may be a
significant relative error due to a minor absolute error. Default is 1.0E-6.
method : str
Method, 'fd' for finite difference or 'cs' for complex step. Default is 'fd'
step : float
Step size for approximation. Default is None, which means 1e-6 for 'fd' and 1e-40 for
'cs'.
form : string
Form for finite difference, can be 'forward', 'backward', or 'central'. Default
None, which defaults to 'forward' for FD.
step_calc : string
Step type for finite difference, can be 'abs' for absolute', or 'rel' for relative.
Default is 'abs'.
Returns
-------
Dict of Dicts of Tuples of Floats
First key:
is the (output, input) tuple of strings;
Second key:
is one of ['rel error', 'abs error', 'magnitude', 'fdstep'];
For 'rel error', 'abs error', 'magnitude' the value is: A tuple containing norms for
forward - fd, adjoint - fd, forward - adjoint.
"""
if self._setup_status < 2:
raise RuntimeError(self.msginfo + ": run_model must be called before total "
"derivatives can be checked.")
model = self.model
if method == 'cs' and not model._outputs._alloc_complex:
msg = "\n" + self.msginfo + ": To enable complex step, specify "\
"'force_alloc_complex=True' when calling " + \
"setup on the problem, e.g. 'problem.setup(force_alloc_complex=True)'"
raise RuntimeError(msg)
# TODO: Once we're tracking iteration counts, run the model if it has not been run before.
# Calculate Total Derivatives
total_info = _TotalJacInfo(self, of, wrt, False, return_format='flat_dict',
driver_scaling=driver_scaling)
Jcalc = total_info.compute_totals()
if step is None:
if method == 'cs':
step = ComplexStep.DEFAULT_OPTIONS['step']
else:
step = FiniteDifference.DEFAULT_OPTIONS['step']
# Approximate FD
fd_args = {
'step': step,
'form': form,
'step_calc': step_calc,
}
approx = model._owns_approx_jac
old_jac = model._jacobian
old_subjacs = model._subjacs_info.copy()
model.approx_totals(method=method, step=step, form=form,
step_calc=step_calc if method == 'fd' else None)
total_info = _TotalJacInfo(self, of, wrt, False, return_format='flat_dict', approx=True,
driver_scaling=driver_scaling)
Jfd = total_info.compute_totals_approx(initialize=True)
# reset the _owns_approx_jac flag after approximation is complete.
if not approx:
model._jacobian = old_jac
model._owns_approx_jac = False
model._subjacs_info = old_subjacs
# Assemble and Return all metrics.
data = {}
data[''] = {}
for key, val in Jcalc.items():
data[''][key] = {}
data[''][key]['J_fwd'] = val
data[''][key]['J_fd'] = Jfd[key]
fd_args['method'] = 'fd'
if out_stream == _DEFAULT_OUT_STREAM:
out_stream = sys.stdout
_assemble_derivative_data(data, rel_err_tol, abs_err_tol, out_stream, compact_print,
[model], {'': fd_args}, totals=True)
return data['']
def compute_totals(self, of=None, wrt=None, return_format='flat_dict', debug_print=False,
driver_scaling=False):
"""
Compute derivatives of desired quantities with respect to desired inputs.
Parameters
----------
of : list of variable name strings or None
Variables whose derivatives will be computed. Default is None, which
uses the driver's objectives and constraints.
wrt : list of variable name strings or None
Variables with respect to which the derivatives will be computed.
Default is None, which uses the driver's desvars.
return_format : string
Format to return the derivatives. Can be 'dict', 'flat_dict', or 'array'.
Default is a 'flat_dict', which returns them in a dictionary whose keys are
tuples of form (of, wrt).
debug_print : bool
Set to True to print out some debug information during linear solve.
driver_scaling : bool
When True, return derivatives that are scaled according to either the adder and scaler
or the ref and ref0 values that were specified when add_design_var, add_objective, and
add_constraint were called on the model. Default is False, which is unscaled.
Returns
-------
derivs : object
Derivatives in form requested by 'return_format'.
"""
if self._setup_status < 2:
self.final_setup()
if self.model._owns_approx_jac:
total_info = _TotalJacInfo(self, of, wrt, False, return_format,
approx=True, driver_scaling=driver_scaling)
return total_info.compute_totals_approx(initialize=True)
else:
total_info = _TotalJacInfo(self, of, wrt, False, return_format,
debug_print=debug_print, driver_scaling=driver_scaling)
return total_info.compute_totals()
def set_solver_print(self, level=2, depth=1e99, type_='all'):
"""
Control printing for solvers and subsolvers in the model.
Parameters
----------
level : int
iprint level. Set to 2 to print residuals each iteration; set to 1
to print just the iteration totals; set to 0 to disable all printing
except for failures, and set to -1 to disable all printing including failures.
depth : int
How deep to recurse. For example, you can set this to 0 if you only want
to print the top level linear and nonlinear solver messages. Default
prints everything.
type_ : str
Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.
"""
if (level, depth, type_) not in self._solver_print_cache:
self._solver_print_cache.append((level, depth, type_))
self.model._set_solver_print(level=level, depth=depth, type_=type_)
def list_problem_vars(self,
show_promoted_name=True,
print_arrays=False,
desvar_opts=[],
cons_opts=[],
objs_opts=[],
):
"""
Print all design variables and responses (objectives and constraints).
Parameters
----------
show_promoted_name : bool
If True, then show the promoted names of the variables.
print_arrays : bool, optional
When False, in the columnar display, just display norm of any ndarrays with size > 1.
The norm is surrounded by vertical bars to indicate that it is a norm.
When True, also display full values of the ndarray below the row. Format is affected
by the values set with numpy.set_printoptions
Default is False.
desvar_opts : list of str
List of optional columns to be displayed in the desvars table.
Allowed values are:
['lower', 'upper', 'ref', 'ref0', 'indices', 'adder', 'scaler', 'parallel_deriv_color',
'vectorize_derivs', 'cache_linear_solution']
cons_opts : list of str
List of optional columns to be displayed in the cons table.
Allowed values are:
['lower', 'upper', 'equals', 'ref', 'ref0', 'indices', 'index', 'adder', 'scaler',
'linear', 'parallel_deriv_color', 'vectorize_derivs',
'cache_linear_solution']
objs_opts : list of str
List of optional columns to be displayed in the objs table.
Allowed values are:
['ref', 'ref0', 'indices', 'adder', 'scaler',
'parallel_deriv_color', 'vectorize_derivs', 'cache_linear_solution']
"""
default_col_names = ['name', 'value', 'size']
# Design vars
desvars = self.model.get_design_vars()
header = "Design Variables"
col_names = default_col_names + desvar_opts
self._write_var_info_table(header, col_names, desvars,
show_promoted_name=show_promoted_name,
print_arrays=print_arrays,
col_spacing=2)
# Constraints
cons = self.model.get_constraints()
header = "Constraints"
col_names = default_col_names + cons_opts
self._write_var_info_table(header, col_names, cons, show_promoted_name=show_promoted_name,
print_arrays=print_arrays,
col_spacing=2)
objs = self.model.get_objectives()
header = "Objectives"
col_names = default_col_names + objs_opts
self._write_var_info_table(header, col_names, objs, show_promoted_name=show_promoted_name,
print_arrays=print_arrays,
col_spacing=2)
def _write_var_info_table(self, header, col_names, vars, print_arrays=False,
show_promoted_name=True, col_spacing=1):
"""
Write a table of information for the data in vars.
Parameters
----------
header : str
The header line for the table.
col_names : list of str
List of column labels.
vars : OrderedDict
Keys are variable names and values are metadata for the variables.
print_arrays : bool, optional
When False, in the columnar display, just display norm of any ndarrays with size > 1.
The norm is surrounded by vertical bars to indicate that it is a norm.
When True, also display full values of the ndarray below the row. Format is affected
by the values set with numpy.set_printoptions
Default is False.
show_promoted_name : bool
If True, then show the promoted names of the variables.
col_spacing : int
Number of spaces between columns in the table.
"""
abs2prom = self.model._var_abs2prom
# Get the values for all the elements in the tables
rows = []
for name, meta in vars.items():
row = {}
for col_name in col_names:
if col_name == 'name':
if show_promoted_name:
row[col_name] = name
else:
if name in abs2prom['input']:
row[col_name] = abs2prom['input'][name]
else:
row[col_name] = abs2prom['output'][name]
elif col_name == 'value':
row[col_name] = self[name]
else:
row[col_name] = meta[col_name]
rows.append(row)
col_space = ' ' * col_spacing
print("-" * len(header))
print(header)
print("-" * len(header))
# loop through the rows finding the max widths
max_width = {}
for col_name in col_names:
max_width[col_name] = len(col_name)
for row in rows:
for col_name in col_names:
cell = row[col_name]
if isinstance(cell, np.ndarray) and cell.size > 1:
out = '|{}|'.format(str(np.linalg.norm(cell)))
else:
out = str(cell)
max_width[col_name] = max(len(out), max_width[col_name])
# print col headers
header_div = ''
header_col_names = ''
for col_name in col_names:
header_div += '-' * max_width[col_name] + col_space
header_col_names += pad_name(col_name, max_width[col_name], quotes=False) + col_space
print(header_col_names)
print(header_div[:-1])
# print rows with var info
for row in rows:
have_array_values = [] # keep track of which values are arrays
row_string = ''
for col_name in col_names:
cell = row[col_name]
if isinstance(cell, np.ndarray) and cell.size > 1:
out = '|{}|'.format(str(np.linalg.norm(cell)))
have_array_values.append(col_name)
else:
out = str(cell)
row_string += pad_name(out, max_width[col_name], quotes=False) + col_space
print(row_string)
if print_arrays:
left_column_width = max_width['name']
for col_name in have_array_values:
print("{}{}:".format((left_column_width + col_spacing) * ' ', col_name))
cell = row[col_name]
out_str = pprint.pformat(cell)
indented_lines = [(left_column_width + col_spacing) * ' ' +
s for s in out_str.splitlines()]
print('\n'.join(indented_lines) + '\n')
print()
def load_case(self, case):
"""
Pull all input and output variables from a case into the model.
Parameters
----------
case : Case object
A Case from a CaseRecorder file.
"""
inputs = case.inputs if case.inputs is not None else None
if inputs:
for name in inputs.absolute_names():
if name not in self.model._var_abs_names['input']:
raise KeyError("{}: Input variable, '{}', recorded in the case is not "
"found in the model".format(self.msginfo, name))
self[name] = inputs[name]
outputs = case.outputs if case.outputs is not None else None
if outputs:
for name in outputs.absolute_names():
if name not in self.model._var_abs_names['output']:
raise KeyError("{}: Output variable, '{}', recorded in the case is not "
"found in the model".format(self.msginfo, name))
self[name] = outputs[name]
return
def check_config(self, logger=None, checks=None, out_file='openmdao_checks.out'):
"""
Perform optional error checks on a Problem.
Parameters
----------
logger : object
Logging object.
checks : list of str or None
List of specific checks to be performed.
out_file : str or None
If not None, output will be written to this file in addition to stdout.
"""
if logger is None:
logger = get_logger('check_config', out_file=out_file, use_format=True)
if checks is None:
checks = sorted(_default_checks)
elif checks == 'all':
checks = sorted(_all_checks)
for c in checks:
if c not in _all_checks:
print("WARNING: '%s' is not a recognized check. Available checks are: %s" %
(c, sorted(_all_checks)))
continue
logger.info('checking %s' % c)
_all_checks[c](self, logger)
def _assemble_derivative_data(derivative_data, rel_error_tol, abs_error_tol, out_stream,
compact_print, system_list, global_options, totals=False,
indep_key=None, all_comps_provide_jacs=False,
show_only_incorrect=False):
"""
Compute the relative and absolute errors in the given derivatives and print to the out_stream.
Parameters
----------
derivative_data : dict
Dictionary containing derivative information keyed by system name.
rel_error_tol : float
Relative error tolerance.
abs_error_tol : float
Absolute error tolerance.
out_stream : file-like object
Where to send human readable output.
Set to None to suppress.
compact_print : bool
If results should be printed verbosely or in a table.
system_list : Iterable
The systems (in the proper order) that were checked.0
global_options : dict
Dictionary containing the options for the approximation.
totals : bool
Set to True if we are doing check_totals to skip a bunch of stuff.
indep_key : dict of sets, optional
Keyed by component name, contains the of/wrt keys that are declared not dependent.
all_comps_provide_jacs : bool, optional
Set to True if all components provide a Jacobian (are not matrix-free).
show_only_incorrect : bool, optional
Set to True if output should print only the subjacs found to be incorrect.
"""
nan = float('nan')
suppress_output = out_stream is None
if compact_print:
if totals:
deriv_line = "{0} wrt {1} | {2:.4e} | {3:.4e} | {4:.4e} | {5:.4e}"
else:
if not all_comps_provide_jacs:
deriv_line = "{0} wrt {1} | {2:.4e} | {3} | {4:.4e} | {5:.4e} | {6} | {7}" \
" | {8:.4e} | {9} | {10}"
else:
deriv_line = "{0} wrt {1} | {2:.4e} | {3:.4e} | {4:.4e} | {5:.4e}"
# Keep track of the worst subjac in terms of relative error for fwd and rev
if not suppress_output and compact_print and not totals:
worst_subjac_rel_err = 0.0
worst_subjac = None
if not suppress_output and not totals and show_only_incorrect:
out_stream.write('\n** Only writing information about components with '
'incorrect Jacobians **\n\n')
for system in system_list:
sys_name = system.pathname
sys_class_name = type(system).__name__
# Match header to appropriate type.
if isinstance(system, Component):
sys_type = 'Component'
elif isinstance(system, Group):
sys_type = 'Group'
else:
sys_type = type(system).__name__
if sys_name not in derivative_data:
msg = "No derivative data found for %s '%s'." % (sys_type, sys_name)
simple_warning(msg)
continue
derivatives = derivative_data[sys_name]
if totals:
sys_name = 'Full Model'
# Sorted keys ensures deterministic ordering
sorted_keys = sorted(derivatives.keys())
if not suppress_output:
# Need to capture the output of a component's derivative
# info so that it can be used if that component is the
# worst subjac. That info is printed at the bottom of all the output
out_buffer = StringIO()
num_bad_jacs = 0 # Keep track of number of bad derivative values for each component
if out_stream:
header_str = '-' * (len(sys_name) + len(sys_type) + len(sys_class_name) + 5) + '\n'
out_buffer.write(header_str)
out_buffer.write("{}: {} '{}'".format(sys_type, sys_class_name, sys_name) + '\n')
out_buffer.write(header_str)
if compact_print:
# Error Header
if totals:
header = "{0} wrt {1} | {2} | {3} | {4} | {5}"\
.format(
pad_name('<output>', 30, quotes=True),
pad_name('<variable>', 30, quotes=True),
pad_name('calc mag.'),
pad_name('check mag.'),
pad_name('a(cal-chk)'),
pad_name('r(cal-chk)'),
)
else:
max_width_of = len("'<output>'")
max_width_wrt = len("'<variable>'")
for of, wrt in sorted_keys:
max_width_of = max(max_width_of, len(of) + 2) # 2 to include quotes
max_width_wrt = max(max_width_wrt, len(wrt) + 2)
if not all_comps_provide_jacs:
header = \
"{0} wrt {1} | {2} | {3} | {4} | {5} | {6} | {7} | {8} | {9} | {10}" \
.format(
pad_name('<output>', max_width_of, quotes=True),
pad_name('<variable>', max_width_wrt, quotes=True),
pad_name('fwd mag.'),
pad_name('rev mag.'),
pad_name('check mag.'),
pad_name('a(fwd-chk)'),
pad_name('a(rev-chk)'),
pad_name('a(fwd-rev)'),
pad_name('r(fwd-chk)'),
pad_name('r(rev-chk)'),
pad_name('r(fwd-rev)')
)
else:
header = "{0} wrt {1} | {2} | {3} | {4} | {5}"\
.format(
pad_name('<output>', max_width_of, quotes=True),
pad_name('<variable>', max_width_wrt, quotes=True),
pad_name('fwd mag.'),
pad_name('check mag.'),
pad_name('a(fwd-chk)'),
pad_name('r(fwd-chk)'),
)
if out_stream:
out_buffer.write(header + '\n')
out_buffer.write('-' * len(header) + '\n' + '\n')
for of, wrt in sorted_keys:
if totals:
fd_opts = global_options['']
else:
fd_opts = global_options[sys_name][wrt]
derivative_info = derivatives[of, wrt]
forward = derivative_info['J_fwd']
if not totals:
reverse = derivative_info.get('J_rev')
fd = derivative_info['J_fd']
fwd_error = np.linalg.norm(forward - fd)
if totals:
rev_error = fwd_rev_error = None
else:
rev_error = np.linalg.norm(reverse - fd)
fwd_rev_error = np.linalg.norm(forward - reverse)
fwd_norm = np.linalg.norm(forward)
if totals:
rev_norm = None
else:
rev_norm = np.linalg.norm(reverse)
fd_norm = np.linalg.norm(fd)
derivative_info['abs error'] = abs_err = ErrorTuple(fwd_error, rev_error, fwd_rev_error)
derivative_info['magnitude'] = magnitude = MagnitudeTuple(fwd_norm, rev_norm, fd_norm)
if fd_norm == 0.:
if fwd_norm == 0.:
derivative_info['rel error'] = rel_err = ErrorTuple(nan, nan, nan)
else:
# If fd_norm is zero, let's use fwd_norm as the divisor for relative
# check. That way we don't accidentally squelch a legitimate problem.
if totals:
derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fwd_norm,
nan,
nan)
else:
rel_err = ErrorTuple(fwd_error / fwd_norm,
rev_error / fwd_norm,
fwd_rev_error / fwd_norm)
derivative_info['rel error'] = rel_err
else:
if totals:
derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fd_norm,
nan,
nan)
else:
derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fd_norm,
rev_error / fd_norm,
fwd_rev_error / fd_norm)
# Skip printing the dependent keys if the derivatives are fine.
if not compact_print and indep_key is not None:
rel_key = (of, wrt)
if rel_key in indep_key[sys_name] and fd_norm < abs_error_tol:
del derivative_data[sys_name][rel_key]
continue
if not suppress_output:
directional = fd_opts.get('directional')
if compact_print:
if totals:
if out_stream:
out_stream.write(deriv_line.format(
pad_name(of, 30, quotes=True),
pad_name(wrt, 30, quotes=True),
magnitude.forward,
magnitude.fd,
abs_err.forward,
rel_err.forward,
) + '\n')
else:
error_string = ''
for error in abs_err:
if not np.isnan(error) and error >= abs_error_tol:
error_string += ' >ABS_TOL'
break
# See if this component has the greater
# error in the derivative computation
# compared to the other components so far
is_worst_subjac = False
for i, error in enumerate(rel_err):
if not np.isnan(error):
# only 1st and 2d errs
if i < 2 and error > worst_subjac_rel_err:
worst_subjac_rel_err = error
worst_subjac = (sys_type, sys_class_name, sys_name)
is_worst_subjac = True
if not np.isnan(error) and error >= rel_error_tol:
error_string += ' >REL_TOL'
break
if error_string: # Any error string indicates that at least one of the
# derivative calcs is greater than the rel tolerance
num_bad_jacs += 1
if out_stream:
if directional:
wrt = "(d)'%s'" % wrt
wrt_padded = pad_name(wrt, max_width_wrt, quotes=False)
else:
wrt_padded = pad_name(wrt, max_width_wrt, quotes=True)
if not all_comps_provide_jacs:
deriv_info_line = \
deriv_line.format(
pad_name(of, max_width_of, quotes=True),
wrt_padded,
magnitude.forward,
_format_if_not_matrix_free(
system.matrix_free, magnitude.reverse),
magnitude.fd,
abs_err.forward,
_format_if_not_matrix_free(system.matrix_free,
abs_err.reverse),
_format_if_not_matrix_free(
system.matrix_free, abs_err.forward_reverse),
rel_err.forward,
_format_if_not_matrix_free(system.matrix_free,
rel_err.reverse),
_format_if_not_matrix_free(
system.matrix_free, rel_err.forward_reverse),
)
else:
deriv_info_line = \
deriv_line.format(
pad_name(of, max_width_of, quotes=True),
wrt_padded,
magnitude.forward,
magnitude.fd,
abs_err.forward,
rel_err.forward,
)
if not show_only_incorrect or error_string:
out_buffer.write(deriv_info_line + error_string + '\n')
if is_worst_subjac:
worst_subjac_line = deriv_info_line
else: # not compact print
fd_desc = "{}:{}".format(fd_opts['method'], fd_opts['form'])
# Magnitudes
if out_stream:
if directional:
out_buffer.write(" {}: '{}' wrt (d)'{}'\n".format(sys_name, of, wrt))
else:
out_buffer.write(" {}: '{}' wrt '{}'\n".format(sys_name, of, wrt))
out_buffer.write(' Forward Magnitude : {:.6e}\n'.format(
magnitude.forward))
if not totals and system.matrix_free:
txt = ' Reverse Magnitude : {:.6e}'
if out_stream:
out_buffer.write(txt.format(magnitude.reverse) + '\n')
if out_stream:
out_buffer.write(' Fd Magnitude : {:.6e} ({})\n'.format(
magnitude.fd, fd_desc))
# Absolute Errors
if totals or not system.matrix_free:
error_descs = ('(Jfor - Jfd) ', )
else:
error_descs = ('(Jfor - Jfd) ', '(Jrev - Jfd) ', '(Jfor - Jrev)')
for error, desc in zip(abs_err, error_descs):
error_str = _format_error(error, abs_error_tol)
if error_str.endswith('*'):
num_bad_jacs += 1
if out_stream:
out_buffer.write(' Absolute Error {}: {}\n'.format(desc, error_str))
if out_stream:
out_buffer.write('\n')
# Relative Errors
for error, desc in zip(rel_err, error_descs):
error_str = _format_error(error, rel_error_tol)
if error_str.endswith('*'):
num_bad_jacs += 1
if out_stream:
out_buffer.write(' Relative Error {}: {}\n'.format(desc, error_str))
if out_stream:
if MPI and MPI.COMM_WORLD.size > 1:
out_buffer.write(' MPI Rank {}\n'.format(MPI.COMM_WORLD.rank))
out_buffer.write('\n')
# Raw Derivatives
if out_stream:
if directional:
out_buffer.write(' Directional Forward Derivative (Jfor)\n')
else:
out_buffer.write(' Raw Forward Derivative (Jfor)\n')
out_buffer.write(str(forward) + '\n')
out_buffer.write('\n')
if not totals and system.matrix_free:
if out_stream:
if directional:
out_buffer.write(' Directional Reverse Derivative (Jrev)\n')
else:
out_buffer.write(' Raw Reverse Derivative (Jrev)\n')
out_buffer.write(str(reverse) + '\n')
out_buffer.write('\n')
if out_stream:
if directional:
out_buffer.write(' Directional FD Derivative (Jfd)\n')
else:
out_buffer.write(' Raw FD Derivative (Jfd)\n')
out_buffer.write(str(fd) + '\n')
out_buffer.write('\n')
if out_stream:
out_buffer.write(' -' * 30 + '\n')
# End of if compact print if/else
# End of if not suppress_output
# End of for of, wrt in sorted_keys
if not show_only_incorrect or num_bad_jacs:
if out_stream and not suppress_output:
out_stream.write(out_buffer.getvalue())
# End of for system in system_list
if not suppress_output and compact_print and not totals:
if worst_subjac:
worst_subjac_header = \
"Sub Jacobian with Largest Relative Error: {1} '{2}'".format(*worst_subjac)
out_stream.write('\n' + '#' * len(worst_subjac_header) + '\n')
out_stream.write("{}\n".format(worst_subjac_header))
out_stream.write('#' * len(worst_subjac_header) + '\n')
out_stream.write(header + '\n')
out_stream.write('-' * len(header) + '\n')
out_stream.write(worst_subjac_line + '\n')
def _format_if_not_matrix_free(matrix_free, val):
"""
Return string to represent deriv check value in compact display.
Parameters
----------
matrix_free : bool
If True, then the associated Component is matrix-free.
val : float
The deriv check value.
Returns
-------
str
String which is the actual value if matrix-free, otherwise 'n/a'
"""
if matrix_free:
return '{0:.4e}'.format(val)
else:
return pad_name('n/a')
def _format_error(error, tol):
"""
Format the error, flagging if necessary.
Parameters
----------
error : float
The absolute or relative error.
tol : float
Tolerance above which errors are flagged
Returns
-------
str
Formatted and possibly flagged error.
"""
if np.isnan(error) or error < tol:
return '{:.6e}'.format(error)
return '{:.6e} *'.format(error)
class Slicer(object):
"""
Helper class that can be used with the indices argument for Problem set_val and get_val.
"""
def __getitem__(self, val):
"""
Pass through indices or slice.
Parameters
----------
val : int or slice object or tuples of slice objects
Indices or slice to return.
Returns
-------
indices : int or slice object or tuples of slice objects
Indices or slice to return.
"""
return val
# instance of the Slicer class to be used by users for the set_val and get_val methods of Problem
slicer = Slicer()
|
py | 7df76a2157af8f8108700b6f165a54e9d44ab048 | # Copyright (c) 2018, Zebula Sampedro, CU Research Computing
import unittest
from traitlets import Unicode
from optionsspawner.forms import TextInputField
class TextInputFieldTestCase(unittest.TestCase):
"""Tests for optionsspawner.forms.characterfield.TextInputField."""
def test_render_optional_free_text(self):
expected = ("""<label for="test_attr">Test Attribute</label>\n"""
"""<input id="id_test_attr" class="form-control" name="test_attr" type="text">\n""")
field = TextInputField('test_attr',
label='Test Attribute'
)
rendered = field.render()
self.assertEqual(rendered, expected)
def test_render_required_free_text_default_value(self):
expected = ("""<label for="test_attr">Test Attribute</label>\n"""
"""<input id="id_test_attr" class="form-control" name="test_attr" required type="text" value="DEFAULT">\n""")
field = TextInputField('test_attr',
label='Test Attribute',
attr_required=True,
attr_value='DEFAULT'
)
rendered = field.render()
self.assertEqual(rendered, expected)
def test_render_required_email(self):
expected = ("""<label for="test_attr">Test Attribute</label>\n"""
"""<input id="id_test_attr" class="form-control" name="test_attr" required type="email">\n""")
field = TextInputField('test_attr',
label='Test Attribute',
attr_type='email',
attr_required=True
)
rendered = field.render()
self.assertEqual(rendered, expected)
def test_returns_unicode_trait(self):
expected = Unicode().tag(config=True)
field = TextInputField('test_attr',
label='Test Attribute',
)
traitlet = field.get_trait()
self.assertIsInstance(traitlet, Unicode)
self.assertEqual(traitlet.metadata, expected.metadata)
self.assertEqual(traitlet.default_value, expected.default_value)
def test_returns_unicode_trait_with_default(self):
expected = Unicode(default_value='default').tag(config=True)
field = TextInputField('test_attr',
label='Test Attribute',
attr_value='default'
)
traitlet = field.get_trait()
self.assertIsInstance(traitlet, Unicode)
self.assertEqual(traitlet.metadata, expected.metadata)
self.assertEqual(traitlet.default_value, expected.default_value)
def test_normalize_non_empty_string_with_default(self):
expected = 'a test string'
field = TextInputField('test_attr',
label='Test Attribute',
attr_value='default'
)
normalized = field.normalize_user_option(['a test string'])
self.assertEqual(normalized, expected)
def test_normalize_non_string(self):
expected = '1234'
field = TextInputField('test_attr',
label='Test Attribute'
)
normalized = field.normalize_user_option([1234])
self.assertEqual(normalized, expected)
def test_normalize_empty_string_with_default(self):
expected = 'default'
field = TextInputField('test_attr',
label='Test Attribute',
attr_value='default'
)
normalized = field.normalize_user_option([''])
self.assertEqual(normalized, expected)
def test_normalize_empty_string_no_default(self):
expected = ''
field = TextInputField('test_attr',
label='Test Attribute'
)
normalized = field.normalize_user_option([''])
self.assertEqual(normalized, expected)
def test_normalize_empty_string_no_default_required(self):
field = TextInputField('test_attr',
label='Test Attribute',
attr_required=True
)
self.assertRaises(ValueError, field.normalize_user_option, [''])
if __name__ == '__main__':
unittest.main()
|
py | 7df76a480653088cd1f44724cacfb9ef420dd3c0 | """
NASA-Log extractor
@auth: Yu-Hsiang Fu
@date: 2018/03/26
"""
# --------------------------------------------------------------------------------
# 1.Main function
# --------------------------------------------------------------------------------
def main_function():
folder_input = "input/"
num_log = 50000
print("Extract number of logs from NASA log file.")
with open("{0}{1}".format(folder_input, "nasa.txt"), "r", encoding="utf-8") as f_in:
with open("{0}nasa_{1}.txt".format(folder_input, num_log), "w", encoding="utf-8") as f_out:
for i in range(0, num_log):
f_out.write("{0}\n".format(f_in.readline().strip()))
if __name__ == '__main__':
main_function() |
py | 7df76aa35dfbe0787cff334a2f37807eccf12d19 | # -*- coding: utf-8 -*-
from .base_command import BaseCommand
from .command import Command
__all__ = ["BaseCommand", "Command"]
|
py | 7df76ba0966f55ca5a5f8e0fb665aed97cbc148c | import json
from flask import request, abort
from . import app, mysql
from utils import requires_auth, get_current_tick
# @app.route("/general_ping")
@app.route("/game/ping")
def general_ping():
return "lareneg"
# General
#
@app.route("/game/ison")
def game_is_on():
"""The ``/game/on`` endpoint does not require auth
It looks for an entry in game table, which means the game has started
It can be reached at
``/game/on``.
:return: JSON containing game_id.
"""
cursor = mysql.cursor()
to_return = {}
cursor.execute("SELECT id FROM game LIMIT 1")
game_cursor = cursor.fetchone()
if game_cursor is None:
to_return["num"] = "621"
to_return["msg"] = "No game is currently running..."
return json.dumps(to_return)
to_return["game_id"] = game_cursor["id"]
return json.dumps(to_return)
@app.route("/state")
@app.route("/game/state")
@requires_auth
def game_get_state():
"""The ``/game/state`` endpoint requires authentication and expects no
other arguments.
It can be reached at ``/game/state?secret=<API_SECRET>``.
It is used to retrieve the current state of the game.
The JSON response looks like::
{
"state_id": int,
"game_id": int,
"services": [List of {"service_id": int,
"service_name": string,
"port": int}],
"scripts": [List of {"script_id": int,
"upload_id": int,
"type": ("exploit", "benign", "getflag",
"setflag"),
"script_name": string,
"service_id": int}]
"run_scripts": [{"team_id": int (team_id to run scripts against),
"run_list": [Ordered list of int script_ids]}],
"state_expire": int (approximate remaining seconds in this tick),
}
:return: a JSON dictionary providing information on the current state.
"""
cursor = mysql.cursor()
# Get basic information about the game, like tick info and services
to_return = {}
current_tick, tick_start, seconds_to_next_tick, _ = get_current_tick(cursor)
to_return["state_id"] = current_tick
to_return["state_expire"] = seconds_to_next_tick
cursor.execute("SELECT id FROM game LIMIT 1")
game_cursor = cursor.fetchone()
if game_cursor is None:
to_return["num"] = "621"
to_return["msg"] = "No game is currently running..."
return json.dumps(to_return)
to_return["game_id"] = game_cursor["id"]
cursor.execute("""SELECT services.id AS service_id,
services.name as service_name,
services.port as port,
current_state as state
FROM services""")
to_return["services"] = cursor.fetchall()
# Determine which scripts exists and which should be run
cursor.execute("""SELECT id AS script_id, upload_id, filename AS script_name,
type, service_id,
current_state as state
FROM scripts""")
to_return["scripts"] = cursor.fetchall()
cursor.execute("""SELECT team_id, json_list_of_scripts_to_run AS json_list
FROM team_scripts_run_status
WHERE team_scripts_run_status.tick_id = %s""",
(current_tick,))
run_scripts = []
for team_scripts_to_run in cursor.fetchall():
team_id = team_scripts_to_run["team_id"]
run_list = json.loads(team_scripts_to_run["json_list"])
run_scripts.append({"team_id": team_id,
"run_list": run_list})
to_return["run_scripts"] = run_scripts
return json.dumps(to_return)
#@app.route("/getgameinfo")
@app.route("/game/info")
@requires_auth
def game_get_info():
"""The ``/game/info`` endpoint requires authentication and expects no
other arguments.
It can be reached at ``/game/info?secret=<API_SECRET>``.
It is used to retrieve the information about the game, like team and service
information.
The JSON response looks like::
{
"services": [List of {"service_id": int,
"service_name": string,
"port": int,
"flag_id_description": string,
"description": string,
"state": ("enabled", "disabled")}],
"teams": [List of {"team_id": int,
"team_name": string,
"url": string,
"country": 2-digit country code according
to ISO-3166-1, ZZ for unknown}]
}
:return: a JSON dictionary with a list of all teams and a list of all
services, including auxiliary information.
"""
cursor = mysql.cursor()
cursor.execute("""SELECT id as team_id, name as team_name,
url, country FROM teams""")
teams = cursor.fetchall()
_, tick_start, _, _ = get_current_tick(cursor)
cursor.execute("""SELECT id as service_id, name as service_name,
port, flag_id_description, description,
current_state as state
FROM services""")
services = cursor.fetchall()
return json.dumps({"teams": teams,
"services": services})
#@app.route("/currenttick")
@app.route("/game/tick/")
@requires_auth
def current_tick_num():
"""The ``/game/tick/`` endpoint requires authentication and expects no
other arguments.
It can be reached at ``/game/tick?secret=<API_SECRET>`` or at
``/game/tick?secret=<API_SECRET>``.
It is used to retrieve the information about the current tick.
The JSON response looks like::
{"created_on": "2015-11-30 17:01:42",
"approximate_seconds_left": 0,
"tick_id": 47}
:return: a JSON dictionary with information about the current tick.
"""
cursor = mysql.cursor()
tick_id, created_on, seconds_left, ends_on = get_current_tick(cursor)
return json.dumps({"tick_id": tick_id,
"created_on": str(created_on),
"approximate_seconds_left": seconds_left,
"ends_on": str(ends_on)})
@app.route("/game/tick/config")
@requires_auth
def get_tick_config():
"""The ``/game/tick/config`` endpoint requires authentication and expects no
other arguments.
It can be reached at ``/game/tick/config?secret=<API_SECRET>``.
It is used to retrieve the information about the tick configuration.
The JSON response looks like::
{
"NO_BEN": <Max_Number_of_benign_scripts_per_tick>,
"NO_EXP": <Max_Number_of_exploit_scripts_per_tick>,
"NO_GET_FLAGS": <Max_Number_of_get_flag_scripts_per_tick>,
"TICK_TIME": <Tick_time_in_seconds>
}
:return: a JSON directory with corresponding configuration values.
"""
cursor = mysql.cursor()
cursor.execute("""SELECT name, value FROM ticks_configuration""")
tick_config = {"NO_BEN": 0, "NO_EXP": 0, "TICK_TIME": 180, "NO_GET_FLAGS": 1}
for curr_row in cursor.fetchall():
if curr_row["name"] == "NUMBER_OF_BENIGN_SCRIPTS":
tick_config["NO_BEN"] = curr_row["value"]
if curr_row["name"] == "NUMBER_OF_EXPLOIT_SCRIPTS":
tick_config["NO_EXP"] = curr_row["value"]
if curr_row["name"] == "TICK_TIME_IN_SECONDS":
tick_config["TICK_TIME"] = curr_row["value"]
if curr_row["name"] == "NUMBER_OF_GETFLAGS":
tick_config["NO_GET_FLAGS"] = curr_row["value"]
return json.dumps(tick_config)
@app.route("/game/delete")
@requires_auth
def delete_game():
"""The ``/game/delete`` endpoint requires authentication.
This creates a row in the game table
It can be reached at
``/game/delete?secret=<API_SECRET>``.
:return: JSON containing the deleted id.
"""
cursor = mysql.cursor()
cursor.execute("""DELETE FROM game""")
mysql.database.commit()
response = dict()
response["game_id"] = 1
return json.dumps(response)
@app.route("/game/insert")
@requires_auth
def insert_game():
"""The ``/game/insert`` endpoint requires authentication.
This creates a row in the game table
It can be reached at
``/game/insert?secret=<API_SECRET>``.
:return: JSON containing inserted game_id.
"""
cursor = mysql.cursor()
cursor.execute("""INSERT INTO game VALUES (1) """)
mysql.database.commit()
response = dict()
response["game_id"] = cursor.lastrowid
return json.dumps(response)
# @app.route("/update/tick", methods=["POST"])
@app.route("/game/tick/update", methods=["POST"])
@requires_auth
def update_tick_info():
"""The ``/game/tick/update`` endpoint requires authentication.
This updates the ticks table with the provided tick info.
Note that this endpoint requires a POST request.
It can be reached at
``/game/tick/update?secret=<API_SECRET>``.
It requires the following POST inputs:
- time_to_change: ISO formatted datetime string
- created on: ISO formatted datetime string
:return: JSON containing latest tick id, corresponding to
the insertion.
"""
time_to_change = request.form.get("time_to_change")
created_on = request.form.get("created_on")
cursor = mysql.cursor()
cursor.execute("""INSERT INTO ticks (time_to_change, created_on)
VALUES (%s, %s)""", (time_to_change, created_on,))
mysql.database.commit()
response = dict()
response["tick_id"] = cursor.lastrowid
return json.dumps(response)
|
py | 7df76bc28d540f37bf54a51204794e5a00a1d5f4 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 27 11:16:27 2019
@author: S.P. van der Linden and T.J. Dijkema
@description: Rendering code for righthand bar
"""
import pygame
from renderbase import RenderBase
import yaml
WHITE = (255, 255, 255)
BLACK = (0, 0, 0, 255)
FADE_TIME = 500 # Sound fade-in/out time in ms
class Bar:
def __init__(self, image, sound, soundloop=False):
if not image:
self.image = './resources/bar_empty.png'
else:
self.image = "./resources/" + image
if image.endswith(".gif"):
self.animated_gif = True
else:
self.preloaded_image = preload_image(self.image)
if not sound:
self.preloaded_sound = None
else:
self.sound = './resources/sound/' + sound
self.preloaded_sound = pygame.mixer.Sound(self.sound)
self.soundloop = -1 if soundloop else 0
def preload_image(image_path):
return pygame.image.load(image_path).convert()
class RenderBar(RenderBase):
def __init__(self):
self.bar_mode = None
self.bars = {}
self.bars["De Melkweg"] = Bar('bar_milkyway.png', '')
self.bars[None] = Bar('bar_empty.png', '')
bodies_yaml = yaml.safe_load(open("bodies.yml", "r"))
for body in bodies_yaml:
self.bars[body["title"]] = Bar(body.get("bar_image", "bar_empty.png"), body.get("sound", ""), soundloop=body.get("soundloop", False))
self.full_init = True
self.changed_mode = True
self.current_sound = None
def render(self, screen: pygame.Surface):
rects_to_update = []
# Clear the bar
if self.full_init:
rects_to_update = screen.fill(BLACK, pygame.Rect(1100, 0, 820, 1080))
screen.blit(self.bars[self.bar_mode].preloaded_image, (1082, 0))
if self.changed_mode:
rects_to_update = screen.fill(BLACK, pygame.Rect(1100, 200, 820, 1080))
screen.blit(self.bars[self.bar_mode].preloaded_image, (1082, 0))
# Stop playing the current sound (if any)
if self.current_sound is not None:
self.current_sound.fadeout(FADE_TIME)
self.current_sound = None
# Update sounds
if self.bars[self.bar_mode].preloaded_sound is not None:
self.current_sound = self.bars[self.bar_mode].preloaded_sound
self.current_sound.play(fade_ms=FADE_TIME, loops=self.bars[self.bar_mode].soundloop)
self.changed_mode = False
return rects_to_update
def set_body_of_interest(self, body):
if body != self.bar_mode:
self.bar_mode = body
self.changed_mode = True
|
py | 7df76c80131ac14a13bda4a7382f8ffbed34775e | # imports
import argparse
import os
import cv2
# main function
def main():
# settings
argument_parser = argparse.ArgumentParser()
# create arguments
argument_parser.add_argument('-i', '--input',
type=str,
default='video.avi',
help='path to video file')
argument_parser.add_argument('-o', '--output',
type=str,
default='frames',
help='path to output frames directory')
argument_parser.add_argument('-p', '--prefix',
type=str,
default='',
help='frame prefix')
argument_parser.add_argument('-f', '--format',
type=str,
default='jpg',
help='frame as image format')
# parse arguments
arguments = argument_parser.parse_args()
# convert video to frames
video_to_frames(input=arguments.input, output=arguments.output, prefix=arguments.prefix, format=arguments.format)
# convert video to frames function
def video_to_frames(input, output, prefix, format, start=0, end=None):
# info
print('input: "{input}"'.format(input=input))
print('output: "{output}"'.format(output=output))
print('prefix: "{prefix}"'.format(prefix=prefix))
print('format: "{format}"'.format(format=format))
print('start: "{start}"'.format(start=start))
if end is not None:
print('end: "{end}"'.format(end=end))
# check output exists
if not os.path.exists(output):
print('"{output}" directory does not exist.'.format(output=output))
os.makedirs(output)
print('"{output}" directory is created.'.format(output=output))
# capture video
print('Converting is started.')
video_capture = cv2.VideoCapture(input)
# read frame
print('"{input}" is reading.'.format(input=input))
success, frame = video_capture.read()
# loop for frames
frame_count = 0
while success:
if ((end is not None) and (start <= frame_count <= end)) or ((end is None) and (start <= frame_count)):
frame_file_name = '{prefix}{frame_count:05d}.{format}'.format(prefix=prefix,
frame_count=frame_count,
format=format)
# save frame as image
save_image(image_data=frame, image_file_name=frame_file_name, image_directory_path=output)
# read next frame
success, frame = video_capture.read()
frame_count += 1
print('Converting is finished.')
# save image data as image file function
def save_image(image_data, image_file_name, image_directory_path, info=True):
image_file_path = os.path.join(image_directory_path, image_file_name)
cv2.imwrite(image_file_path, image_data)
if info:
print('"{image_file}" is saved into "{image_directory}".'.format(image_file=image_file_name,
image_directory=image_directory_path))
# main
if __name__ == '__main__':
main()
|
py | 7df76d3c68311f0721ed85fb3c085aeeaa6d5e60 | __all__ = ['tcmd']
class tcmd(object):
def __init__(self, name='tcmd'):
self.name = name
def inform(self, *args):
print('%s.inform: %s' % (self.name, ''.join(*args)))
def warn(self, *args):
print('%s.warn: %s' % (self.name, ''.join(*args)))
def fail(self, *args):
print('%s.fail: %s' % (self.name, ''.join(*args)))
|
py | 7df76da0e1034a884538f1254e41f9a357c081f8 | from numba import njit, int64, float64
from numba.typed import List as L
from numba.types import Tuple, ListType as LT
import numpy as np
@njit(Tuple((int64[:,::1],LT(LT(int64)),LT(LT(int64))))(int64[:,::1], int64, int64), cache=True)
def compute_surface_mesh_adjs(edges, num_vertices, edges_per_face):
num_faces = edges.shape[0]//edges_per_face
adjs = np.zeros((num_faces, edges_per_face), dtype=np.int64)-1
vtx2vtx = L()
vtx2face = L()
for k in range(num_vertices):
tmp1 = L()
tmp2 = L()
tmp1.append(-1)
tmp2.append(-1)
vtx2vtx.append(tmp1)
vtx2face.append(tmp2)
tmp = np.arange(num_faces)
faces_idx = np.repeat(tmp, edges_per_face)
map_ = dict()
support_set = set()
map_[(-1,-1)] = -1
support_set.add((-1,-1))
for i in range(edges.shape[0]):
e = (edges[i][0], edges[i][1])
f = faces_idx[i]
if vtx2vtx[e[0]][0] == -1:
vtx2vtx[e[0]][0] = e[1]
else:
vtx2vtx[e[0]].append(e[1])
if vtx2face[e[0]][0] == -1:
vtx2face[e[0]][0] = f
else:
vtx2face[e[0]].append(f)
if e not in support_set:
map_[(edges[i][1], edges[i][0])] = f
support_set.add((edges[i][1], edges[i][0]))
else:
idx_to_append1 = np.where(adjs[f] == -1)[0][0]
idx_to_append2 = np.where(adjs[map_[e]] == -1)[0][0]
adjs[f][idx_to_append1] = map_[e]
adjs[map_[e]][idx_to_append2] = f
return adjs, vtx2vtx, vtx2face
@njit(Tuple((int64[:,::1],LT(LT(int64)),LT(LT(int64)),LT(LT(int64))))(int64[:,::1], int64), cache=True)
def compute_tet_mesh_adjs(faces, num_vertices):
num_poly = faces.shape[0]//4
adjs = np.zeros((num_poly, 4), dtype=np.int64)-1
vtx2vtx = L()
vtx2poly = L()
vtx2face = L()
for k in range(num_vertices):
tmp1 = L()
tmp2 = L()
tmp3 = L()
tmp1.append(-1)
tmp2.append(-1)
tmp3.append(-1)
vtx2vtx.append(tmp1)
vtx2poly.append(tmp2)
vtx2face.append(tmp3)
tmp = np.arange(num_poly)
poly_idx = np.repeat(tmp, 4)
map_ = dict()
support_set = set()
map_[(-1,-1,-1)] = -1
support_set.add((-1,-1,-1))
for i in range(faces.shape[0]):
f1 = (faces[i][0], faces[i][1], faces[i][2])
f2 = (faces[i][2], faces[i][1], faces[i][0])
f3 = (faces[i][0], faces[i][2], faces[i][1])
f4 = (faces[i][1], faces[i][0], faces[i][2])
t = poly_idx[i]
if vtx2vtx[faces[i][0]][0] == -1:
vtx2vtx[faces[i][0]][0] = faces[i][1]
else:
if faces[i][1] not in vtx2vtx[faces[i][0]]:
vtx2vtx[faces[i][0]].append(faces[i][1])
if vtx2vtx[faces[i][1]][0] == -1:
vtx2vtx[faces[i][1]][0] = faces[i][2]
else:
if faces[i][2] not in vtx2vtx[faces[i][1]]:
vtx2vtx[faces[i][1]].append(faces[i][2])
if vtx2vtx[faces[i][2]][0] == -1:
vtx2vtx[faces[i][2]][0] = faces[i][0]
else:
if faces[i][0] not in vtx2vtx[faces[i][2]]:
vtx2vtx[faces[i][2]].append(faces[i][0])
for j in range(3):
if vtx2face[faces[i][j]][0] == -1:
vtx2face[faces[i][j]][0] = i
else:
if faces[i][j] not in vtx2face[faces[i][j]]:
vtx2face[faces[i][j]].append(i)
if vtx2poly[faces[i][0]][0] == -1:
vtx2poly[faces[i][0]][0] = t
else:
if t not in vtx2poly[faces[i][0]]:
vtx2poly[faces[i][0]].append(t)
if vtx2poly[faces[i][1]][0] == -1:
vtx2poly[faces[i][1]][0] = t
else:
if t not in vtx2poly[faces[i][1]]:
vtx2poly[faces[i][1]].append(t)
if vtx2poly[faces[i][2]][0] == -1:
vtx2poly[faces[i][2]][0] = t
else:
if t not in vtx2poly[faces[i][2]]:
vtx2poly[faces[i][2]].append(t)
if f1 not in support_set:
map_[f2] = t
map_[f3] = t
map_[f4] = t
support_set.add(f2)
support_set.add(f3)
support_set.add(f4)
else:
idx_to_append1 = np.where(adjs[t] == -1)[0][0]
idx_to_append2 = np.where(adjs[map_[f1]] == -1)[0][0]
adjs[t][idx_to_append1] = map_[f1]
adjs[map_[f1]][idx_to_append2] = t
return adjs, vtx2vtx, vtx2poly, vtx2face
@njit(Tuple((int64[:,::1],LT(LT(int64)),LT(LT(int64)),LT(LT(int64))))(int64[:,::1], int64), cache=True)
def compute_hex_mesh_adjs(faces, num_vertices):
num_poly = faces.shape[0]//6
adjs = np.zeros((num_poly, 6), dtype=np.int64)-1
vtx2vtx = L()
vtx2poly = L()
vtx2face = L()
for k in range(num_vertices):
tmp1 = L()
tmp2 = L()
tmp3 = L()
tmp1.append(-1)
tmp2.append(-1)
tmp3.append(-1)
vtx2vtx.append(tmp1)
vtx2poly.append(tmp2)
vtx2face.append(tmp3)
tmp = np.arange(num_poly)
poly_idx = np.repeat(tmp, 6)
map_ = dict()
support_set = set()
map_[(-1,-1,-1,-1)] = -1
support_set.add((-1,-1,-1,-1))
for i in range(faces.shape[0]):
f1 = (faces[i][0], faces[i][1], faces[i][2], faces[i][3])
f2 = (faces[i][3], faces[i][2], faces[i][1], faces[i][0])
f3 = (faces[i][2], faces[i][1], faces[i][0], faces[i][3])
f4 = (faces[i][1], faces[i][0], faces[i][3], faces[i][2])
f5 = (faces[i][0], faces[i][3], faces[i][2], faces[i][1])
t = poly_idx[i]
if vtx2vtx[faces[i][0]][0] == -1:
vtx2vtx[faces[i][0]][0] = faces[i][1]
else:
if faces[i][1] not in vtx2vtx[faces[i][0]]:
vtx2vtx[faces[i][0]].append(faces[i][1])
if vtx2vtx[faces[i][1]][0] == -1:
vtx2vtx[faces[i][1]][0] = faces[i][2]
else:
if faces[i][2] not in vtx2vtx[faces[i][1]]:
vtx2vtx[faces[i][1]].append(faces[i][2])
if vtx2vtx[faces[i][2]][0] == -1:
vtx2vtx[faces[i][2]][0] = faces[i][3]
else:
if faces[i][3] not in vtx2vtx[faces[i][2]]:
vtx2vtx[faces[i][2]].append(faces[i][3])
if vtx2vtx[faces[i][3]][0] == -1:
vtx2vtx[faces[i][3]][0] = faces[i][0]
else:
if faces[i][0] not in vtx2vtx[faces[i][3]]:
vtx2vtx[faces[i][3]].append(faces[i][0])
for j in range(4):
if vtx2face[faces[i][j]][0] == -1:
vtx2face[faces[i][j]][0] = i
else:
if faces[i][j] not in vtx2face[faces[i][j]]:
vtx2face[faces[i][j]].append(i)
if vtx2poly[faces[i][0]][0] == -1:
vtx2poly[faces[i][0]][0] = t
else:
if t not in vtx2poly[faces[i][0]]:
vtx2poly[faces[i][0]].append(t)
if vtx2poly[faces[i][1]][0] == -1:
vtx2poly[faces[i][1]][0] = t
else:
if t not in vtx2poly[faces[i][1]]:
vtx2poly[faces[i][1]].append(t)
if vtx2poly[faces[i][2]][0] == -1:
vtx2poly[faces[i][2]][0] = t
else:
if t not in vtx2poly[faces[i][2]]:
vtx2poly[faces[i][2]].append(t)
if vtx2poly[faces[i][3]][0] == -1:
vtx2poly[faces[i][3]][0] = t
else:
if t not in vtx2poly[faces[i][3]]:
vtx2poly[faces[i][3]].append(t)
if f1 not in support_set:
map_[f2] = t
map_[f3] = t
map_[f4] = t
map_[f5] = t
support_set.add(f2)
support_set.add(f3)
support_set.add(f4)
support_set.add(f5)
else:
idx_to_append1 = np.where(adjs[t] == -1)[0][0]
idx_to_append2 = np.where(adjs[map_[f1]] == -1)[0][0]
adjs[t][idx_to_append1] = map_[f1]
adjs[map_[f1]][idx_to_append2] = t
return adjs, vtx2vtx, vtx2poly, vtx2face
@njit(int64[:,::1](int64[:,::1]),cache=True)
def compute_adj_f2f_volume(faces):
adjs = np.zeros((faces.shape[0], 1), dtype=np.int64)-1
map_ = dict()
map_[(-1,-1,-1,-1)] = -1
for idx in range(faces.shape[0]):
f = np.copy(faces[idx])
f.sort()
support = (f[0],f[1],f[2],-1) if faces.shape[1] == 3 else (f[0],f[1],f[2],f[3])
if(support in map_):
idx_to_append1 = np.where(adjs[map_[support]] == -1)[0][0]
idx_to_append2 = np.where(adjs[idx] == -1)[0][0]
adjs[map_[support]][idx_to_append1] = idx
adjs[idx][idx_to_append2] = map_[support]
else:
map_[support] = idx
return adjs
def compute_face_normals(vertices, faces, quad=False):
e1_v = vertices[faces][:,1] - vertices[faces][:,0]
e2_v = vertices[faces][:,2] - vertices[faces][:,1]
if quad:
e2_v = vertices[faces][:,2] - vertices[faces][:,0]
face_normals = np.cross(e1_v, e2_v)
norm = np.linalg.norm(face_normals, axis=1)
norm.shape = (-1,1)
return face_normals / norm
@njit(float64[:,::1](float64[:,::1], LT(LT(int64))), cache=True)
def compute_vertex_normals(face_normals, vtx2face):
mean = np.zeros((1, 3), dtype=np.float64)
vtx_normals = np.zeros((len(vtx2face),3), dtype=np.float64)
idx = 0
for v2f in vtx2face:
for v in v2f:
mean = mean+face_normals[v]
mean/=len(v2f)
vtx_normals[idx] = mean
mean-=mean
idx+=1
norm = np.sqrt(np.sum(vtx_normals**2, axis=1))
norm=np.reshape(norm, (-1,1))
return vtx_normals/norm
def _compute_three_vertex_normals(tri_soup):
tmp = tri_soup[0::3]
a = tri_soup[1::3] - tmp
b = tri_soup[2::3] - tmp
cross = np.cross(a,b)
face_normals = cross / np.linalg.norm(cross, axis=1, keepdims=True)
vtx_normals = np.repeat(face_normals, 3, axis=0)
return vtx_normals
|
py | 7df76ea29a5e597c5a8aab4f024b17fda7291f3e | from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import sys
sys.path.append("/data")
from scripts.study_case.ID_14.v3.model import LeNet5
import tensorflow as tf
from sklearn.utils import shuffle
BATCH_SIZE = 64
class Train:
def __init__(self):
self.CKPT_DIR = './ckpt'
# Generate data set
mnist = input_data.read_data_sets("../data/", reshape=False, one_hot=True)
self.X_train, self.Y_train = mnist.train.images, mnist.train.labels
self.X_validation, self.Y_validation = mnist.validation.images, mnist.validation.labels
self.X_test, self.Y_test = mnist.test.images, mnist.test.labels
print("X_train.shape: ", self.X_train.shape)
print("X_validation.shape: ", self.X_validation.shape)
print("X_test.shape: ", self.X_test.shape)
self.X_train = np.pad(self.X_train, ((0, 0), (2, 2), (2, 2), (0, 0)), "constant", constant_values=0)
self.X_validation = np.pad(self.X_validation, ((0, 0), (2, 2), (2, 2), (0, 0)), "constant", constant_values=0)
self.X_test = np.pad(self.X_test, ((0, 0), (2, 2), (2, 2), (0, 0)), "constant", constant_values=0)
print("X_train.shape: ", self.X_train.shape)
print("X_validation.shape: ", self.X_validation.shape)
print("X_test.shape: ", self.X_test.shape)
self.net = LeNet5(learning_rate=0.001)
self.sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True))
self.sess.run(tf.global_variables_initializer())
def train(self):
epochs = 100 # change from 50 to 100
num_examples = len(self.X_train)
saver = tf.train.Saver(max_to_keep=5)
save_interval = 10
"""insert code"""
from scripts.utils.tf_utils import GradientSearcher
gradient_search = GradientSearcher(name="MNIST_grist")
from scripts.study_case.ID_14.v3.model import global_obj_var
# obj_function = tf.reduce_min(tf.abs(global_obj_var))
# obj_grads = tf.gradients(obj_function, self.net.x)[0]
x_train, y_train = shuffle(self.X_train, self.Y_train)
x, y = x_train[0:BATCH_SIZE], y_train[0:BATCH_SIZE]
max_val, min_val = np.max(x), np.min(x)
gradient_search.build(batch_size=BATCH_SIZE, min_val=min_val, max_val=max_val)
"""insert code"""
end = 0
while True:
end = end + BATCH_SIZE
"""inserted code"""
monitor_vars = {'loss': self.net.loss, 'obj_function': self.net.obj_function, 'obj_grad': self.net.obj_grads}
feed_dict = {self.net.x: x, self.net.label: y}
x, scores_rank = gradient_search.update_batch_data(session=self.sess, monitor_var=monitor_vars,
feed_dict=feed_dict, input_data=x,
)
"""inserted code"""
_, loss_val = self.sess.run([self.net.train, self.net.loss], feed_dict=feed_dict)
"""inserted code"""
new_batch_xs, new_batch_ys = x_train[end - BATCH_SIZE:end], y_train[end - BATCH_SIZE:end]
new_data_dict = {'x': new_batch_xs, 'y': new_batch_ys}
old_data_dict = {'x': x, 'y': y}
x, y = gradient_search.switch_new_data(new_data_dict=new_data_dict,
old_data_dict=old_data_dict,
scores_rank=scores_rank)
gradient_search.check_time()
"""inserted code"""
def evaluate(self, x_data, y_data):
error_rate, loss = self.sess.run([self.net.error_rate, self.net.loss], feed_dict={
self.net.x: x_data,
self.net.label: y_data,
})
return error_rate, loss
if __name__ == '__main__':
app = Train()
app.train()
|
py | 7df770824f91a3a799afcac91825a5017ff56958 | # TODO:
# 1. Change import to "import bokeh.plotting" and "import collectons.Counter"
# 2. No members needed
# 3. Create a class PlotData, which has two fields: xAxis and yAxis
# 4. Refactor data creation to a method, which returns a PlotData object
import bokeh.plotting as bk
import json
import collections
class PlotData(object):
def __init__(self):
self.xAxis = []
self.yAxis = []
class MonthFigure(object):
ALL_MONTHS = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December", "Unknown"]
def __init__(self):
pass
def countTheMonths(self):
with open("info.json", "r") as f:
birthdaysDict = json.load(f)
months = []
for key, value in birthdaysDict.items():
splitValue = value.split(" ")
month = splitValue[len(splitValue) - 1]
months.append(month)
return collections.Counter(months)
def showFigure(self):
data = self.getData()
self.drawPlot(data)
def drawPlot(self, data):
bk.output_file("plot.html")
p = bk.figure(x_range=MonthFigure.ALL_MONTHS)
p.vbar(x=data.xAxis, top=data.yAxis, width=0.5)
bk.show(p)
def getData(self):
monthsCounter = self.countTheMonths()
data = PlotData()
for key, value in monthsCounter.items():
data.xAxis.append(key)
data.yAxis.append(value)
return data
def main():
m = MonthFigure()
m.showFigure()
if __name__ == "__main__":
main()
|
py | 7df77124276616ddc607ffeaef75e85cb9f488be | """
**Case/Control Matching functions**
Contains all functions that implement the :ref:`maximizeControls` data preparation tool.
"""
import pandas as pd
from hopcroftkarp import HopcroftKarp
import numpy as np
CATEGORICAL_DATA = '675161f1c87ff2648c61ff1c57c780f2'
def generate_row_query(keys, deltas, tr):
q = []
for i, dt in enumerate(deltas):
key = keys[i]
is_categorical = dt == CATEGORICAL_DATA
if is_categorical:
part = '=='.join([key, tr[key].__repr__()])
else:
structure = ['abs(', key, '-', tr[key], ')', '<=', dt]
part = ''.join([str(x) for x in structure])
q.append(part)
return '&'.join(q)
def get_options(targets, controls, keys, deltas):
tt = targets[keys]
c = controls[keys]
matching = {}
if len(c) > len(tt):
for i in tt.index:
tr = tt.loc[i]
control_query = generate_row_query(keys, deltas, tr)
matches = c.query(control_query).index
# matching[i] = matches.drop_duplicates().tolist()
matching[i] = set(matches)
else:
for i in c.index:
tr = c.loc[i]
target_query = generate_row_query(keys, deltas, tr)
matches = tt.query(target_query).index
# matching[i] = matches.drop_duplicates().tolist()
matching[i] = set(matches)
return matching
def output_matches(path, outputfile, data, all_used, success, goal, matched):
new_data = data[data.index.isin(all_used)]
print('---')
if not success:
print("Could not match data 1-%d, using the maximum number of matches found by the approximation algorithm" % goal)
print("Matched data 1-%0.3f" % matched)
else:
print("Matched data 1-%s" % (matched))
new_data.to_csv(path / outputfile, index=False)
print("New group file in %s" % (path / outputfile))
return
def control_match(path, input, output, keys, deltas, condition='genotype', goal=1):
"""
Estimate an optimal case-control mapping.
Match cases/controls (defined by value of ``condition``\ ) with a ``goal``\ :1 ratio
based on similarity of ``keys``\ . Specify tolerance interval for ``keys`` via ``deltas``\ .
For an exact match, specify a delta of 0. The order of
``deltas`` values must match the order of the ``keys``\ .
Optimal matches are estimated via the HopcroftKarp algorithm. A new group file
with all matched subjects is saved to ``output``\ . The explicit matching of
cases to controls is saved to ``output``\ __matched_pairs.csv
:param path: path to input and output
:param input: name of file containing group data
:param output: name of output matched group file
:param keys: comma-separated list of matching criteria (columns in input file)
:param deltas: comma-separated list of tolerance intervals for the matching criteria
:param condition: Field denoting group assignments [default: genotype]
:param goal: n, target matching ratio (control:case => n:1) [default: 1]
:type path: pathlib Path
:type input: str
:type output: str
:type keys: str
:type deltas: str
:type condition: str
:type goal: int
:returns: None
.. note:: If the matching algorithm cannot achieve the specified matching ratio, it will issue a warning and report the achieved ratio.
"""
# Reformat arguments
keys = keys.replace(" ", "").split(',')
deltas = deltas.replace(" ", "").split(',')
deltas = [CATEGORICAL_DATA if x == '' else float(x) for x in deltas]
# save original goal value
orig_goal = goal
# Assign new value for outputfile
if output is None:
inname = input.strip('.csv')
output = inname + '__matched.csv'
match_file = path / (inname + '__matched_pairs.csv')
else:
outname = output.strip('.csv')
match_file = path / (outname + '__matched_pairs.csv')
# Read data from the provided input file
data = pd.read_csv(path / input)
# Assert that all of the provided matching keys are present in the data
for key in keys:
assert key in data.columns, '%s is not a column in the input file (%s)' % (key, input)
# Assert that condition column is present in the data
assert condition in data.columns, 'Specified condition (%s) is not a column in the input file (%s)' % (condition, input)
# Assert that condition column contains only '1' and '0'
condition_vals = np.unique(data[condition])
assert len(condition_vals) == 2, 'There are %d values (should only be 2) in the specified condition column (%s) in the input file (%s)' % (len(condition_vals), condition, input)
for val in [0, 1]:
assert val in condition_vals, 'The value %d is missing from the condition column (%s) in the input file (%s)' % (val, condition, input)
# Separate patients and controls
match_by_group0 = len(data[data[condition] == 1]) > len(data[data[condition] == 0])
if match_by_group0:
print('There are more cases (%s=1) than controls (%s=0) -- matching by controls' %(condition, condition))
targets = data[data[condition] == 0].copy()
controls = data[data[condition] == 1].copy()
else:
print('There are more controls (%s=0) than cases (%s=1) -- matching by cases' %(condition, condition))
targets = data[data[condition] == 1].copy()
controls = data[data[condition] == 0].copy()
# save original number of targets (used to check ifany are dropped)
orig_num_target = targets.shape[0]
# create dictionary to store matching pairs
targets['matching_ix'] = [[] for _ in range(targets.shape[0])]
pairing = targets[['matching_ix']].to_dict(orient='index')
cid = set()
tid = set()
set_num = 0
while goal > 0:
set_num += 1
print('Getting match set %d' % set_num)
matching = get_options(targets, controls, keys, deltas) # get all possible matches for each target
matched = HopcroftKarp(matching).maximum_matching() # find optimal pairings
# store matches
for i in targets.index:
if i in matched:
cid.add(matched[i])
tid.add(i)
pairing[i]['matching_ix'].append(matched[i])
# remove matched IDs from control pool
rem_ids = set(controls.index).difference(cid)
controls = controls.loc[rem_ids,:]
goal = goal - 1
final_ratio = float(len(cid)) / float(len(tid))
all_used = cid.union(tid)
print('Formatting matches')
# convert pairing to dataframe & get matching ids from ix
pairing_df = pd.DataFrame.from_dict(pairing, orient='index')
# copy matching ix and remove extra columns
targets.loc[pairing_df.index, 'matching_ix'] = pairing_df['matching_ix']
# get match IDs from index
get_ids = lambda i: list(data.loc[i['matching_ix'], 'id'].values)
targets['matching_ids'] = targets.apply(get_ids, axis=1)
# separate list of subject IDs & get matching subject's info
cols = keys[:]
for i in range(0,orig_goal):
match_col = 'match'+str(i+1)
cols.append(match_col)
expand = lambda x: pd.Series(x['matching_ids'])
try:
targets[match_col] = targets.apply(expand, axis=1)[i]
for key in keys:
match_info = pd.merge(targets[[match_col]], data[['id', key]], left_on=match_col, right_on='id')
match_info.rename(columns={key: match_col + '_' + key}, inplace=True)
targets = pd.merge(targets, match_info.drop(columns='id'), on=match_col,how='left')
cols.append(match_col + '_' + key)
except: # no matches found for set i
targets[match_col] = np.nan
cols.insert(0, 'id')
cols.insert(1, 'genotype')
# export matching pairs
print('Saving case/control mapping to %s' %match_file)
targets.to_csv(match_file,index=False, columns=cols)
if len(tid) != orig_num_target:
g = 'controls' if match_by_group0 else 'cases'
print('WARNING: Some %s were dropped during matching (%d out of %d %s remain)' %(g,len(tid),orig_num_target,g))
if final_ratio == orig_goal:
matching_success = 1
else:
matching_success = 0
output_matches(path, output, data, all_used, matching_success, orig_goal, final_ratio)
return
|
py | 7df771d045c373fda7221d0f02e3aa52766b994c | import base64
import time
from collections import defaultdict
import copy
import datetime
from gzip import GzipFile
from sys import platform
import docker
import docker.errors
import hashlib
import io
import logging
import os
import json
import re
import zipfile
import uuid
import tarfile
import calendar
import threading
import weakref
import requests.exceptions
from moto.awslambda.policy import Policy
from moto.core import BaseBackend, CloudFormationModel
from moto.core.exceptions import RESTError
from moto.iam.models import iam_backend
from moto.iam.exceptions import IAMNotFoundException
from moto.core.utils import unix_time_millis, BackendDict
from moto.s3.models import s3_backend
from moto.logs.models import logs_backends
from moto.s3.exceptions import MissingBucket, MissingKey
from moto import settings
from .exceptions import (
CrossAccountNotAllowed,
InvalidRoleFormat,
InvalidParameterValueException,
)
from .utils import (
make_function_arn,
make_function_ver_arn,
make_layer_arn,
make_layer_ver_arn,
split_layer_arn,
)
from moto.sqs import sqs_backends
from moto.dynamodb import dynamodb_backends
from moto.dynamodbstreams import dynamodbstreams_backends
from moto.core import ACCOUNT_ID
from moto.utilities.docker_utilities import DockerModel, parse_image_ref
logger = logging.getLogger(__name__)
try:
from tempfile import TemporaryDirectory
except ImportError:
from backports.tempfile import TemporaryDirectory
docker_3 = docker.__version__[0] >= "3"
def zip2tar(zip_bytes):
with TemporaryDirectory() as td:
tarname = os.path.join(td, "data.tar")
timeshift = int(
(datetime.datetime.now() - datetime.datetime.utcnow()).total_seconds()
)
with zipfile.ZipFile(io.BytesIO(zip_bytes), "r") as zipf, tarfile.TarFile(
tarname, "w"
) as tarf:
for zipinfo in zipf.infolist():
if zipinfo.filename[-1] == "/": # is_dir() is py3.6+
continue
tarinfo = tarfile.TarInfo(name=zipinfo.filename)
tarinfo.size = zipinfo.file_size
tarinfo.mtime = calendar.timegm(zipinfo.date_time) - timeshift
infile = zipf.open(zipinfo.filename)
tarf.addfile(tarinfo, infile)
with open(tarname, "rb") as f:
tar_data = f.read()
return tar_data
class _VolumeRefCount:
__slots__ = "refcount", "volume"
def __init__(self, refcount, volume):
self.refcount = refcount
self.volume = volume
class _DockerDataVolumeContext:
_data_vol_map = defaultdict(
lambda: _VolumeRefCount(0, None)
) # {sha256: _VolumeRefCount}
_lock = threading.Lock()
def __init__(self, lambda_func):
self._lambda_func = lambda_func
self._vol_ref = None
@property
def name(self):
return self._vol_ref.volume.name
def __enter__(self):
# See if volume is already known
with self.__class__._lock:
self._vol_ref = self.__class__._data_vol_map[self._lambda_func.code_sha_256]
self._vol_ref.refcount += 1
if self._vol_ref.refcount > 1:
return self
# See if the volume already exists
for vol in self._lambda_func.docker_client.volumes.list():
if vol.name == self._lambda_func.code_sha_256:
self._vol_ref.volume = vol
return self
# It doesn't exist so we need to create it
self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(
self._lambda_func.code_sha_256
)
if docker_3:
volumes = {self.name: {"bind": "/tmp/data", "mode": "rw"}}
else:
volumes = {self.name: "/tmp/data"}
self._lambda_func.docker_client.images.pull(
":".join(parse_image_ref("alpine"))
)
container = self._lambda_func.docker_client.containers.run(
"alpine", "sleep 100", volumes=volumes, detach=True
)
try:
tar_bytes = zip2tar(self._lambda_func.code_bytes)
container.put_archive("/tmp/data", tar_bytes)
finally:
container.remove(force=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self.__class__._lock:
self._vol_ref.refcount -= 1
if self._vol_ref.refcount == 0:
try:
self._vol_ref.volume.remove()
except docker.errors.APIError as e:
if e.status_code != 409:
raise
raise # multiple processes trying to use same volume?
def _zipfile_content(zipfile):
# more hackery to handle unicode/bytes/str in python3 and python2 -
# argh!
try:
to_unzip_code = base64.b64decode(bytes(zipfile, "utf-8"))
except Exception:
to_unzip_code = base64.b64decode(zipfile)
return to_unzip_code, len(to_unzip_code), hashlib.sha256(to_unzip_code).hexdigest()
def _validate_s3_bucket_and_key(data):
key = None
try:
# FIXME: does not validate bucket region
key = s3_backend.get_object(data["S3Bucket"], data["S3Key"])
except MissingBucket:
if do_validate_s3():
raise InvalidParameterValueException(
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist"
)
except MissingKey:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.",
)
return key
class Permission(CloudFormationModel):
def __init__(self, region):
self.region = region
@staticmethod
def cloudformation_name_type():
return "Permission"
@staticmethod
def cloudformation_type():
return "AWS::Lambda::Permission"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
backend = lambda_backends[region_name]
fn = backend.get_function(properties["FunctionName"])
fn.policy.add_statement(raw=json.dumps(properties))
return Permission(region=region_name)
class LayerVersion(CloudFormationModel):
def __init__(self, spec, region):
# required
self.region = region
self.name = spec["LayerName"]
self.content = spec["Content"]
# optional
self.description = spec.get("Description", "")
self.compatible_runtimes = spec.get("CompatibleRuntimes", [])
self.license_info = spec.get("LicenseInfo", "")
# auto-generated
self.created_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
self.version = None
self._attached = False
self._layer = None
if "ZipFile" in self.content:
self.code_bytes, self.code_size, self.code_sha_256 = _zipfile_content(
self.content["ZipFile"]
)
else:
key = _validate_s3_bucket_and_key(self.content)
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
@property
def arn(self):
if self.version:
return make_layer_ver_arn(self.region, ACCOUNT_ID, self.name, self.version)
raise ValueError("Layer version is not set")
def attach(self, layer, version):
self._attached = True
self._layer = layer
self.version = version
def get_layer_version(self):
return {
"Version": self.version,
"LayerVersionArn": self.arn,
"CreatedDate": self.created_date,
"CompatibleRuntimes": self.compatible_runtimes,
"Description": self.description,
"LicenseInfo": self.license_info,
}
@staticmethod
def cloudformation_name_type():
return "LayerVersion"
@staticmethod
def cloudformation_type():
return "AWS::Lambda::LayerVersion"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
optional_properties = ("Description", "CompatibleRuntimes", "LicenseInfo")
# required
spec = {
"Content": properties["Content"],
"LayerName": resource_name,
}
for prop in optional_properties:
if prop in properties:
spec[prop] = properties[prop]
backend = lambda_backends[region_name]
layer_version = backend.publish_layer_version(spec)
return layer_version
class Layer(object):
def __init__(self, name, region):
self.region = region
self.name = name
self.layer_arn = make_layer_arn(region, ACCOUNT_ID, self.name)
self._latest_version = 0
self.layer_versions = {}
def attach_version(self, layer_version):
self._latest_version += 1
layer_version.attach(self, self._latest_version)
self.layer_versions[str(self._latest_version)] = layer_version
def to_dict(self):
return {
"LayerName": self.name,
"LayerArn": self.layer_arn,
"LatestMatchingVersion": self.layer_versions[
str(self._latest_version)
].get_layer_version(),
}
class LambdaFunction(CloudFormationModel, DockerModel):
def __init__(self, spec, region, validate_s3=True, version=1):
DockerModel.__init__(self)
# required
self.region = region
self.code = spec["Code"]
self.function_name = spec["FunctionName"]
self.handler = spec["Handler"]
self.role = spec["Role"]
self.run_time = spec["Runtime"]
self.logs_backend = logs_backends[self.region]
self.environment_vars = spec.get("Environment", {}).get("Variables", {})
self.policy = None
self.state = "Active"
self.reserved_concurrency = spec.get("ReservedConcurrentExecutions", None)
# optional
self.description = spec.get("Description", "")
self.memory_size = spec.get("MemorySize", 128)
self.package_type = spec.get("PackageType", None)
self.publish = spec.get("Publish", False) # this is ignored currently
self.timeout = spec.get("Timeout", 3)
self.layers = self._get_layers_data(spec.get("Layers", []))
self.signing_profile_version_arn = spec.get("SigningProfileVersionArn")
self.signing_job_arn = spec.get("SigningJobArn")
self.code_signing_config_arn = spec.get("CodeSigningConfigArn")
self.logs_group_name = "/aws/lambda/{}".format(self.function_name)
# this isn't finished yet. it needs to find out the VpcId value
self._vpc_config = spec.get(
"VpcConfig", {"SubnetIds": [], "SecurityGroupIds": []}
)
# auto-generated
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
if "ZipFile" in self.code:
self.code_bytes, self.code_size, self.code_sha_256 = _zipfile_content(
self.code["ZipFile"]
)
# TODO: we should be putting this in a lambda bucket
self.code["UUID"] = str(uuid.uuid4())
self.code["S3Key"] = "{}-{}".format(self.function_name, self.code["UUID"])
else:
key = _validate_s3_bucket_and_key(self.code)
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
else:
self.code_bytes = ""
self.code_size = 0
self.code_sha_256 = ""
self.function_arn = make_function_arn(
self.region, ACCOUNT_ID, self.function_name
)
if spec.get("Tags"):
self.tags = spec.get("Tags")
else:
self.tags = dict()
def set_version(self, version):
self.function_arn = make_function_ver_arn(
self.region, ACCOUNT_ID, self.function_name, version
)
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
@property
def vpc_config(self):
config = self._vpc_config.copy()
if config["SecurityGroupIds"]:
config.update({"VpcId": "vpc-123abc"})
return config
@property
def physical_resource_id(self):
return self.function_name
def __repr__(self):
return json.dumps(self.get_configuration())
def _get_layers_data(self, layers_versions_arns):
backend = lambda_backends[self.region]
layer_versions = [
backend.layers_versions_by_arn(layer_version)
for layer_version in layers_versions_arns
]
if not all(layer_versions):
raise ValueError(
"InvalidParameterValueException",
"One or more LayerVersion does not exist {0}".format(
layers_versions_arns
),
)
return [{"Arn": lv.arn, "CodeSize": lv.code_size} for lv in layer_versions]
def get_code_signing_config(self):
return {
"CodeSigningConfigArn": self.code_signing_config_arn,
"FunctionName": self.function_name,
}
def get_configuration(self):
config = {
"CodeSha256": self.code_sha_256,
"CodeSize": self.code_size,
"Description": self.description,
"FunctionArn": self.function_arn,
"FunctionName": self.function_name,
"Handler": self.handler,
"LastModified": self.last_modified,
"MemorySize": self.memory_size,
"Role": self.role,
"Runtime": self.run_time,
"State": self.state,
"PackageType": self.package_type,
"Timeout": self.timeout,
"Version": str(self.version),
"VpcConfig": self.vpc_config,
"Layers": self.layers,
"SigningProfileVersionArn": self.signing_profile_version_arn,
"SigningJobArn": self.signing_job_arn,
}
if self.environment_vars:
config["Environment"] = {"Variables": self.environment_vars}
return config
def get_code(self):
code = {
"Code": {
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format(
self.region, self.code["S3Key"]
),
"RepositoryType": "S3",
},
"Configuration": self.get_configuration(),
}
if self.reserved_concurrency:
code.update(
{
"Concurrency": {
"ReservedConcurrentExecutions": self.reserved_concurrency
}
}
)
return code
def update_configuration(self, config_updates):
for key, value in config_updates.items():
if key == "Description":
self.description = value
elif key == "Handler":
self.handler = value
elif key == "MemorySize":
self.memory_size = value
elif key == "Role":
self.role = value
elif key == "Runtime":
self.run_time = value
elif key == "Timeout":
self.timeout = value
elif key == "VpcConfig":
self._vpc_config = value
elif key == "Environment":
self.environment_vars = value["Variables"]
elif key == "Layers":
self.layers = self._get_layers_data(value)
return self.get_configuration()
def update_function_code(self, updated_spec):
if "DryRun" in updated_spec and updated_spec["DryRun"]:
return self.get_configuration()
if "ZipFile" in updated_spec:
self.code["ZipFile"] = updated_spec["ZipFile"]
# using the "hackery" from __init__ because it seems to work
# TODOs and FIXMEs included, because they'll need to be fixed
# in both places now
try:
to_unzip_code = base64.b64decode(
bytes(updated_spec["ZipFile"], "utf-8")
)
except Exception:
to_unzip_code = base64.b64decode(updated_spec["ZipFile"])
self.code_bytes = to_unzip_code
self.code_size = len(to_unzip_code)
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
# TODO: we should be putting this in a lambda bucket
self.code["UUID"] = str(uuid.uuid4())
self.code["S3Key"] = "{}-{}".format(self.function_name, self.code["UUID"])
elif "S3Bucket" in updated_spec and "S3Key" in updated_spec:
key = None
try:
# FIXME: does not validate bucket region
key = s3_backend.get_object(
updated_spec["S3Bucket"], updated_spec["S3Key"]
)
except MissingBucket:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist",
)
except MissingKey:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.",
)
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
self.code["S3Bucket"] = updated_spec["S3Bucket"]
self.code["S3Key"] = updated_spec["S3Key"]
return self.get_configuration()
@staticmethod
def convert(s):
try:
return str(s, encoding="utf-8")
except Exception:
return s
def _invoke_lambda(self, code, event=None, context=None):
# Create the LogGroup if necessary, to write the result to
self.logs_backend.ensure_log_group(self.logs_group_name, [])
# TODO: context not yet implemented
if event is None:
event = dict()
if context is None:
context = {}
output = None
try:
# TODO: I believe we can keep the container running and feed events as needed
# also need to hook it up to the other services so it can make kws/s3 etc calls
# Should get invoke_id /RequestId from invocation
env_vars = {
"_HANDLER": self.handler,
"AWS_EXECUTION_ENV": "AWS_Lambda_{}".format(self.run_time),
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
"AWS_LAMBDA_FUNCTION_NAME": self.function_name,
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
"AWS_LAMBDA_FUNCTION_VERSION": self.version,
"AWS_REGION": self.region,
"AWS_ACCESS_KEY_ID": "role-account-id",
"AWS_SECRET_ACCESS_KEY": "role-secret-key",
"AWS_SESSION_TOKEN": "session-token",
}
env_vars.update(self.environment_vars)
env_vars["MOTO_HOST"] = settings.moto_server_host()
env_vars["MOTO_PORT"] = settings.moto_server_port()
env_vars[
"MOTO_HTTP_ENDPOINT"
] = f'{env_vars["MOTO_HOST"]}:{env_vars["MOTO_PORT"]}'
container = exit_code = None
log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON)
with _DockerDataVolumeContext(self) as data_vol:
try:
run_kwargs = dict()
network_name = settings.moto_network_name()
network_mode = settings.moto_network_mode()
if network_name:
run_kwargs["network"] = network_name
elif network_mode:
run_kwargs["network_mode"] = network_mode
elif settings.TEST_SERVER_MODE:
# AWSLambda can make HTTP requests to a Docker container called 'motoserver'
# Only works if our Docker-container is named 'motoserver'
# TODO: should remove this and rely on 'network_mode' instead, as this is too tightly coupled with our own test setup
run_kwargs["links"] = {"motoserver": "motoserver"}
# add host.docker.internal host on linux to emulate Mac + Windows behavior
# for communication with other mock AWS services running on localhost
if platform == "linux" or platform == "linux2":
run_kwargs["extra_hosts"] = {
"host.docker.internal": "host-gateway"
}
image_repo = settings.moto_lambda_image()
image_ref = f"{image_repo}:{self.run_time}"
self.docker_client.images.pull(":".join(parse_image_ref(image_ref)))
container = self.docker_client.containers.run(
image_ref,
[self.handler, json.dumps(event)],
remove=False,
mem_limit="{}m".format(self.memory_size),
volumes=["{}:/var/task".format(data_vol.name)],
environment=env_vars,
detach=True,
log_config=log_config,
**run_kwargs,
)
finally:
if container:
try:
exit_code = container.wait(timeout=300)
except requests.exceptions.ReadTimeout:
exit_code = -1
container.stop()
container.kill()
else:
if docker_3:
exit_code = exit_code["StatusCode"]
output = container.logs(stdout=False, stderr=True)
output += container.logs(stdout=True, stderr=False)
container.remove()
output = output.decode("utf-8")
self.save_logs(output)
# We only care about the response from the lambda
# Which is the last line of the output, according to https://github.com/lambci/docker-lambda/issues/25
resp = output.splitlines()[-1]
logs = os.linesep.join(
[line for line in self.convert(output).splitlines()[:-1]]
)
invocation_error = exit_code != 0
return resp, invocation_error, logs
except docker.errors.DockerException as e:
# Docker itself is probably not running - there will be no Lambda-logs to handle
msg = "error running docker: {}".format(e)
self.save_logs(msg)
return msg, True, ""
def save_logs(self, output):
# Send output to "logs" backend
invoke_id = uuid.uuid4().hex
log_stream_name = (
"{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
date=datetime.datetime.utcnow(),
version=self.version,
invoke_id=invoke_id,
)
)
self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)
log_events = [
{"timestamp": unix_time_millis(), "message": line}
for line in output.splitlines()
]
self.logs_backend.put_log_events(
self.logs_group_name, log_stream_name, log_events, None
)
def invoke(self, body, request_headers, response_headers):
if body:
body = json.loads(body)
else:
body = "{}"
# Get the invocation type:
res, errored, logs = self._invoke_lambda(code=self.code, event=body)
inv_type = request_headers.get("x-amz-invocation-type", "RequestResponse")
if inv_type == "RequestResponse":
encoded = base64.b64encode(logs.encode("utf-8"))
response_headers["x-amz-log-result"] = encoded.decode("utf-8")
result = res.encode("utf-8")
else:
result = res
if errored:
response_headers["x-amz-function-error"] = "Handled"
return result
@staticmethod
def cloudformation_name_type():
return "FunctionName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html
return "AWS::Lambda::Function"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
optional_properties = (
"Description",
"MemorySize",
"Publish",
"Timeout",
"VpcConfig",
"Environment",
"ReservedConcurrentExecutions",
)
# required
spec = {
"Code": properties["Code"],
"FunctionName": resource_name,
"Handler": properties["Handler"],
"Role": properties["Role"],
"Runtime": properties["Runtime"],
}
# NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the
# default logic
for prop in optional_properties:
if prop in properties:
spec[prop] = properties[prop]
# when ZipFile is present in CloudFormation, per the official docs,
# the code it's a plaintext code snippet up to 4096 bytes.
# this snippet converts this plaintext code to a proper base64-encoded ZIP file.
if "ZipFile" in properties["Code"]:
spec["Code"]["ZipFile"] = base64.b64encode(
cls._create_zipfile_from_plaintext_code(spec["Code"]["ZipFile"])
)
backend = lambda_backends[region_name]
fn = backend.create_function(spec)
return fn
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["Arn"]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return make_function_arn(self.region, ACCOUNT_ID, self.function_name)
raise UnformattedGetAttTemplateException()
@classmethod
def update_from_cloudformation_json(
cls, new_resource_name, cloudformation_json, original_resource, region_name
):
updated_props = cloudformation_json["Properties"]
original_resource.update_configuration(updated_props)
original_resource.update_function_code(updated_props["Code"])
return original_resource
@staticmethod
def _create_zipfile_from_plaintext_code(code):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("index.py", code)
# This should really be part of the 'lambci' docker image
from moto.packages.cfnresponse import cfnresponse
with open(cfnresponse.__file__) as cfn:
zip_file.writestr("cfnresponse.py", cfn.read())
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def delete(self, region):
lambda_backends[region].delete_function(self.function_name)
class EventSourceMapping(CloudFormationModel):
def __init__(self, spec):
# required
self.function_name = spec["FunctionName"]
self.event_source_arn = spec["EventSourceArn"]
# optional
self.batch_size = spec.get("BatchSize")
self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON")
self.enabled = spec.get("Enabled", True)
self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None)
self.function_arn = spec["FunctionArn"]
self.uuid = str(uuid.uuid4())
self.last_modified = time.mktime(datetime.datetime.utcnow().timetuple())
def _get_service_source_from_arn(self, event_source_arn):
return event_source_arn.split(":")[2].lower()
def _validate_event_source(self, event_source_arn):
valid_services = ("dynamodb", "kinesis", "sqs")
service = self._get_service_source_from_arn(event_source_arn)
return True if service in valid_services else False
@property
def event_source_arn(self):
return self._event_source_arn
@event_source_arn.setter
def event_source_arn(self, event_source_arn):
if not self._validate_event_source(event_source_arn):
raise ValueError(
"InvalidParameterValueException", "Unsupported event source type"
)
self._event_source_arn = event_source_arn
@property
def batch_size(self):
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size):
batch_size_service_map = {
"kinesis": (100, 10000),
"dynamodb": (100, 1000),
"sqs": (10, 10),
}
source_type = self._get_service_source_from_arn(self.event_source_arn)
batch_size_for_source = batch_size_service_map[source_type]
if batch_size is None:
self._batch_size = batch_size_for_source[0]
elif batch_size > batch_size_for_source[1]:
error_message = "BatchSize {} exceeds the max of {}".format(
batch_size, batch_size_for_source[1]
)
raise ValueError("InvalidParameterValueException", error_message)
else:
self._batch_size = int(batch_size)
def get_configuration(self):
return {
"UUID": self.uuid,
"BatchSize": self.batch_size,
"EventSourceArn": self.event_source_arn,
"FunctionArn": self.function_arn,
"LastModified": self.last_modified,
"LastProcessingResult": "",
"State": "Enabled" if self.enabled else "Disabled",
"StateTransitionReason": "User initiated",
}
def delete(self, region_name):
lambda_backend = lambda_backends[region_name]
lambda_backend.delete_event_source_mapping(self.uuid)
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
return "AWS::Lambda::EventSourceMapping"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
lambda_backend = lambda_backends[region_name]
return lambda_backend.create_event_source_mapping(properties)
@classmethod
def update_from_cloudformation_json(
cls, new_resource_name, cloudformation_json, original_resource, region_name
):
properties = cloudformation_json["Properties"]
event_source_uuid = original_resource.uuid
lambda_backend = lambda_backends[region_name]
return lambda_backend.update_event_source_mapping(event_source_uuid, properties)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
lambda_backend = lambda_backends[region_name]
esms = lambda_backend.list_event_source_mappings(
event_source_arn=properties["EventSourceArn"],
function_name=properties["FunctionName"],
)
for esm in esms:
if esm.uuid == resource_name:
esm.delete(region_name)
@property
def physical_resource_id(self):
return self.uuid
class LambdaVersion(CloudFormationModel):
def __init__(self, spec):
self.version = spec["Version"]
def __repr__(self):
return str(self.logical_resource_id)
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-version.html
return "AWS::Lambda::Version"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
function_name = properties["FunctionName"]
func = lambda_backends[region_name].publish_function(function_name)
spec = {"Version": func.version}
return LambdaVersion(spec)
class LambdaStorage(object):
def __init__(self):
# Format 'func_name' {'alias': {}, 'versions': []}
self._functions = {}
self._arns = weakref.WeakValueDictionary()
def _get_latest(self, name):
return self._functions[name]["latest"]
def _get_version(self, name, version):
index = version - 1
try:
return self._functions[name]["versions"][index]
except IndexError:
return None
def _get_alias(self, name, alias):
return self._functions[name]["alias"].get(alias, None)
def get_function_by_name(self, name, qualifier=None):
if name not in self._functions:
return None
if qualifier is None:
return self._get_latest(name)
try:
return self._get_version(name, int(qualifier))
except ValueError:
return self._functions[name]["latest"]
def list_versions_by_function(self, name):
if name not in self._functions:
return None
latest = copy.copy(self._functions[name]["latest"])
latest.function_arn += ":$LATEST"
return [latest] + self._functions[name]["versions"]
def get_arn(self, arn):
return self._arns.get(arn, None)
def get_function_by_name_or_arn(self, name_or_arn, qualifier=None):
return self.get_function_by_name(name_or_arn, qualifier) or self.get_arn(
name_or_arn
)
def put_function(self, fn):
"""
:param fn: Function
:type fn: LambdaFunction
"""
valid_role = re.match(InvalidRoleFormat.pattern, fn.role)
if valid_role:
account = valid_role.group(2)
if account != ACCOUNT_ID:
raise CrossAccountNotAllowed()
try:
iam_backend.get_role_by_arn(fn.role)
except IAMNotFoundException:
raise InvalidParameterValueException(
"The role defined for the function cannot be assumed by Lambda."
)
else:
raise InvalidRoleFormat(fn.role)
if fn.function_name in self._functions:
self._functions[fn.function_name]["latest"] = fn
else:
self._functions[fn.function_name] = {
"latest": fn,
"versions": [],
"alias": weakref.WeakValueDictionary(),
}
# instantiate a new policy for this version of the lambda
fn.policy = Policy(fn)
self._arns[fn.function_arn] = fn
def publish_function(self, name_or_arn, description=""):
function = self.get_function_by_name_or_arn(name_or_arn)
name = function.function_name
if name not in self._functions:
return None
if not self._functions[name]["latest"]:
return None
new_version = len(self._functions[name]["versions"]) + 1
fn = copy.copy(self._functions[name]["latest"])
fn.set_version(new_version)
if description:
fn.description = description
self._functions[name]["versions"].append(fn)
self._arns[fn.function_arn] = fn
return fn
def del_function(self, name_or_arn, qualifier=None):
function = self.get_function_by_name_or_arn(name_or_arn)
if function:
name = function.function_name
if not qualifier:
# Something is still reffing this so delete all arns
latest = self._functions[name]["latest"].function_arn
del self._arns[latest]
for fn in self._functions[name]["versions"]:
del self._arns[fn.function_arn]
del self._functions[name]
return True
elif qualifier == "$LATEST":
self._functions[name]["latest"] = None
# If theres no functions left
if (
not self._functions[name]["versions"]
and not self._functions[name]["latest"]
):
del self._functions[name]
return True
else:
fn = self.get_function_by_name(name, qualifier)
if fn:
self._functions[name]["versions"].remove(fn)
# If theres no functions left
if (
not self._functions[name]["versions"]
and not self._functions[name]["latest"]
):
del self._functions[name]
return True
return False
def all(self):
result = []
for function_group in self._functions.values():
latest = copy.deepcopy(function_group["latest"])
latest.function_arn = "{}:$LATEST".format(latest.function_arn)
result.append(latest)
result.extend(function_group["versions"])
return result
def latest(self):
"""
Return the list of functions with version @LATEST
:return:
"""
result = []
for function_group in self._functions.values():
if function_group["latest"] is not None:
result.append(function_group["latest"])
return result
class LayerStorage(object):
def __init__(self):
self._layers = {}
self._arns = weakref.WeakValueDictionary()
def put_layer_version(self, layer_version):
"""
:param layer_version: LayerVersion
"""
if layer_version.name not in self._layers:
self._layers[layer_version.name] = Layer(
layer_version.name, layer_version.region
)
self._layers[layer_version.name].attach_version(layer_version)
def list_layers(self):
return [layer.to_dict() for layer in self._layers.values()]
def get_layer_versions(self, layer_name):
if layer_name in self._layers:
return list(iter(self._layers[layer_name].layer_versions.values()))
return []
def get_layer_version_by_arn(self, layer_version_arn):
split_arn = split_layer_arn(layer_version_arn)
if split_arn.layer_name in self._layers:
return self._layers[split_arn.layer_name].layer_versions.get(
split_arn.version, None
)
return None
class LambdaBackend(BaseBackend):
"""
Implementation of the AWS Lambda endpoint.
Invoking functions is supported - they will run inside a Docker container, emulating the real AWS behaviour as closely as possible.
It is possible to connect from AWS Lambdas to other services, as long as you are running Moto in ServerMode.
The Lambda has access to environment variables `MOTO_HOST` and `MOTO_PORT`, which can be used to build the url that MotoServer runs on:
.. sourcecode:: python
def lambda_handler(event, context):
host = os.environ.get("MOTO_HOST")
port = os.environ.get("MOTO_PORT")
url = host + ":" + port
ec2 = boto3.client('ec2', region_name='us-west-2', endpoint_url=url)
# Or even simpler:
full_url = os.environ.get("MOTO_HTTP_ENDPOINT")
ec2 = boto3.client("ec2", region_name="eu-west-1", endpoint_url=full_url)
ec2.do_whatever_inside_the_existing_moto_server()
Moto will run on port 5000 by default. This can be overwritten by setting an environment variable when starting Moto:
.. sourcecode:: bash
# This env var will be propagated to the Docker container running the Lambda functions
MOTO_PORT=5000 moto_server
The Docker container uses the default network mode, `bridge`.
The following environment variables are available for fine-grained control over the Docker connection options:
.. sourcecode:: bash
# Provide the name of a custom network to connect to
MOTO_DOCKER_NETWORK_NAME=mycustomnetwork moto_server
# Override the network mode
# For example, network_mode=host would use the network of the host machine
# Note that this option will be ignored if MOTO_DOCKER_NETWORK_NAME is also set
MOTO_DOCKER_NETWORK_MODE=host moto_server
The Docker images used by Moto are taken from the `lambci/lambda`-repo by default. Use the following environment variable to configure a different repo:
.. sourcecode:: bash
MOTO_DOCKER_LAMBDA_IMAGE=mLupin/docker-lambda
.. note:: When using the decorators, a Docker container cannot reach Moto, as it does not run as a server. Any boto3-invocations used within your Lambda will try to connect to AWS.
"""
def __init__(self, region_name):
self._lambdas = LambdaStorage()
self._event_source_mappings = {}
self._layers = LayerStorage()
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""Default VPC endpoint service."""
return BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "lambda"
)
def create_function(self, spec):
function_name = spec.get("FunctionName", None)
if function_name is None:
raise RESTError("InvalidParameterValueException", "Missing FunctionName")
fn = LambdaFunction(spec, self.region_name, version="$LATEST")
self._lambdas.put_function(fn)
if spec.get("Publish"):
ver = self.publish_function(function_name)
fn = copy.deepcopy(
fn
) # We don't want to change the actual version - just the return value
fn.version = ver.version
return fn
def create_event_source_mapping(self, spec):
required = ["EventSourceArn", "FunctionName"]
for param in required:
if not spec.get(param):
raise RESTError(
"InvalidParameterValueException", "Missing {}".format(param)
)
# Validate function name
func = self._lambdas.get_function_by_name_or_arn(spec.get("FunctionName", ""))
if not func:
raise RESTError("ResourceNotFoundException", "Invalid FunctionName")
# Validate queue
for queue in sqs_backends[self.region_name].queues.values():
if queue.queue_arn == spec["EventSourceArn"]:
if queue.lambda_event_source_mappings.get("func.function_arn"):
# TODO: Correct exception?
raise RESTError(
"ResourceConflictException", "The resource already exists."
)
if queue.fifo_queue:
raise RESTError(
"InvalidParameterValueException",
"{} is FIFO".format(queue.queue_arn),
)
else:
spec.update({"FunctionArn": func.function_arn})
esm = EventSourceMapping(spec)
self._event_source_mappings[esm.uuid] = esm
# Set backend function on queue
queue.lambda_event_source_mappings[esm.function_arn] = esm
return esm
for stream in json.loads(
dynamodbstreams_backends[self.region_name].list_streams()
)["Streams"]:
if stream["StreamArn"] == spec["EventSourceArn"]:
spec.update({"FunctionArn": func.function_arn})
esm = EventSourceMapping(spec)
self._event_source_mappings[esm.uuid] = esm
table_name = stream["TableName"]
table = dynamodb_backends[self.region_name].get_table(table_name)
table.lambda_event_source_mappings[esm.function_arn] = esm
return esm
raise RESTError("ResourceNotFoundException", "Invalid EventSourceArn")
def publish_layer_version(self, spec):
required = ["LayerName", "Content"]
for param in required:
if not spec.get(param):
raise RESTError(
"InvalidParameterValueException", "Missing {}".format(param)
)
layer_version = LayerVersion(spec, self.region_name)
self._layers.put_layer_version(layer_version)
return layer_version
def list_layers(self):
return self._layers.list_layers()
def get_layer_versions(self, layer_name):
return self._layers.get_layer_versions(layer_name)
def layers_versions_by_arn(self, layer_version_arn):
return self._layers.get_layer_version_by_arn(layer_version_arn)
def publish_function(self, function_name, description=""):
return self._lambdas.publish_function(function_name, description)
def get_function(self, function_name_or_arn, qualifier=None):
return self._lambdas.get_function_by_name_or_arn(
function_name_or_arn, qualifier
)
def list_versions_by_function(self, function_name):
return self._lambdas.list_versions_by_function(function_name)
def get_event_source_mapping(self, uuid):
return self._event_source_mappings.get(uuid)
def delete_event_source_mapping(self, uuid):
return self._event_source_mappings.pop(uuid)
def update_event_source_mapping(self, uuid, spec):
esm = self.get_event_source_mapping(uuid)
if not esm:
return False
for key in spec.keys():
if key == "FunctionName":
func = self._lambdas.get_function_by_name_or_arn(spec[key])
esm.function_arn = func.function_arn
elif key == "BatchSize":
esm.batch_size = spec[key]
elif key == "Enabled":
esm.enabled = spec[key]
esm.last_modified = time.mktime(datetime.datetime.utcnow().timetuple())
return esm
def list_event_source_mappings(self, event_source_arn, function_name):
esms = list(self._event_source_mappings.values())
if event_source_arn:
esms = list(filter(lambda x: x.event_source_arn == event_source_arn, esms))
if function_name:
esms = list(filter(lambda x: x.function_name == function_name, esms))
return esms
def get_function_by_arn(self, function_arn):
return self._lambdas.get_arn(function_arn)
def delete_function(self, function_name, qualifier=None):
return self._lambdas.del_function(function_name, qualifier)
def list_functions(self, func_version=None):
if func_version == "ALL":
return self._lambdas.all()
return self._lambdas.latest()
def send_sqs_batch(self, function_arn, messages, queue_arn):
success = True
for message in messages:
func = self.get_function_by_arn(function_arn)
result = self._send_sqs_message(func, message, queue_arn)
if not result:
success = False
return success
def _send_sqs_message(self, func, message, queue_arn):
event = {
"Records": [
{
"messageId": message.id,
"receiptHandle": message.receipt_handle,
"body": message.body,
"attributes": {
"ApproximateReceiveCount": "1",
"SentTimestamp": "1545082649183",
"SenderId": "AIDAIENQZJOLO23YVJ4VO",
"ApproximateFirstReceiveTimestamp": "1545082649185",
},
"messageAttributes": {},
"md5OfBody": "098f6bcd4621d373cade4e832627b4f6",
"eventSource": "aws:sqs",
"eventSourceARN": queue_arn,
"awsRegion": self.region_name,
}
]
}
request_headers = {}
response_headers = {}
func.invoke(json.dumps(event), request_headers, response_headers)
return "x-amz-function-error" not in response_headers
def send_sns_message(self, function_name, message, subject=None, qualifier=None):
event = {
"Records": [
{
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:EXAMPLE",
"EventSource": "aws:sns",
"Sns": {
"SignatureVersion": "1",
"Timestamp": "1970-01-01T00:00:00.000Z",
"Signature": "EXAMPLE",
"SigningCertUrl": "EXAMPLE",
"MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e",
"Message": message,
"MessageAttributes": {
"Test": {"Type": "String", "Value": "TestString"},
"TestBinary": {"Type": "Binary", "Value": "TestBinary"},
},
"Type": "Notification",
"UnsubscribeUrl": "EXAMPLE",
"TopicArn": "arn:aws:sns:EXAMPLE",
"Subject": subject or "TestInvoke",
},
}
]
}
func = self._lambdas.get_function_by_name_or_arn(function_name, qualifier)
func.invoke(json.dumps(event), {}, {})
def send_dynamodb_items(self, function_arn, items, source):
event = {
"Records": [
{
"eventID": item.to_json()["eventID"],
"eventName": "INSERT",
"eventVersion": item.to_json()["eventVersion"],
"eventSource": item.to_json()["eventSource"],
"awsRegion": self.region_name,
"dynamodb": item.to_json()["dynamodb"],
"eventSourceARN": source,
}
for item in items
]
}
func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {})
def send_log_event(
self, function_arn, filter_name, log_group_name, log_stream_name, log_events
):
data = {
"messageType": "DATA_MESSAGE",
"owner": ACCOUNT_ID,
"logGroup": log_group_name,
"logStream": log_stream_name,
"subscriptionFilters": [filter_name],
"logEvents": log_events,
}
output = io.BytesIO()
with GzipFile(fileobj=output, mode="w") as f:
f.write(json.dumps(data, separators=(",", ":")).encode("utf-8"))
payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8")
event = {"awslogs": {"data": payload_gz_encoded}}
func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {})
def list_tags(self, resource):
return self.get_function_by_arn(resource).tags
def tag_resource(self, resource, tags):
fn = self.get_function_by_arn(resource)
if not fn:
return False
fn.tags.update(tags)
return True
def untag_resource(self, resource, tagKeys):
fn = self.get_function_by_arn(resource)
if fn:
for key in tagKeys:
try:
del fn.tags[key]
except KeyError:
pass
# Don't care
return True
return False
def add_permission(self, function_name, raw):
fn = self.get_function(function_name)
fn.policy.add_statement(raw)
def remove_permission(self, function_name, sid, revision=""):
fn = self.get_function(function_name)
fn.policy.del_statement(sid, revision)
def get_code_signing_config(self, function_name):
fn = self.get_function(function_name)
return fn.get_code_signing_config()
def get_policy(self, function_name):
fn = self.get_function(function_name)
return fn.policy.get_policy()
def get_policy_wire_format(self, function_name):
fn = self.get_function(function_name)
return fn.policy.wire_format()
def update_function_code(self, function_name, qualifier, body):
fn = self.get_function(function_name, qualifier)
if fn:
if body.get("Publish", False):
fn = self.publish_function(function_name)
config = fn.update_function_code(body)
return config
else:
return None
def update_function_configuration(self, function_name, qualifier, body):
fn = self.get_function(function_name, qualifier)
return fn.update_configuration(body) if fn else None
def invoke(self, function_name, qualifier, body, headers, response_headers):
fn = self.get_function(function_name, qualifier)
if fn:
payload = fn.invoke(body, headers, response_headers)
response_headers["Content-Length"] = str(len(payload))
return payload
else:
return None
def put_function_concurrency(self, function_name, reserved_concurrency):
fn = self.get_function(function_name)
fn.reserved_concurrency = reserved_concurrency
return fn.reserved_concurrency
def delete_function_concurrency(self, function_name):
fn = self.get_function(function_name)
fn.reserved_concurrency = None
return fn.reserved_concurrency
def get_function_concurrency(self, function_name):
fn = self.get_function(function_name)
return fn.reserved_concurrency
def do_validate_s3():
return os.environ.get("VALIDATE_LAMBDA_S3", "") in ["", "1", "true"]
lambda_backends = BackendDict(LambdaBackend, "lambda")
|
py | 7df77313e5f090a973596de5f002a7779d1519d6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import sys
import os
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist bdist_wheel upload")
sys.exit()
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
# TODO: put package requirements here
]
setup(
name='exptk',
version='0.2.1',
description="A toolkit for building a machine learning project",
long_description=readme,
author="Chia-Jung, Yang",
author_email='[email protected]',
url='https://github.com/jeroyang/exptk',
packages=[
'exptk',
],
package_dir={'exptk':
'exptk'},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='exptk',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests'
)
|
py | 7df773e2cfc6d554a6ab5578ca148c154083b792 | import typing
from discord.ext import commands
from app import checks
from app.classes.context import MyContext
from app.i18n import t_
if typing.TYPE_CHECKING:
from app.classes.bot import Bot
class Premium(commands.Cog):
def __init__(self, bot: "Bot"):
self.bot = bot
@commands.command(
name="refreshroles", help=t_("Refresh your donor/patron roles.", True)
)
@checks.support_server()
@commands.cooldown(1, 5, type=commands.BucketType.user)
async def refresh_roles(self, ctx: "MyContext"):
self.bot.dispatch("update_prem_roles", ctx.author.id)
await ctx.send(t_("Your roles should update momentarily."))
def setup(bot: "Bot"):
bot.add_cog(Premium(bot))
|
py | 7df7742686a86033d7bc2377ad0998c56447f1fd | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
from dulwich import porcelain
from dulwich.diff_tree import _NULL_ENTRY, TreeEntry, _is_tree, _tree_entries
from git import diff3, git_reset
from git.gitutils import (
GitError,
_get_repo,
count_commits_between,
find_revision_sha,
get_remote_tracking_branch,
merge_base
)
def _merge_entries(path, trees):
"""Merge the entries of two trees.
:param path: A path to prepend to all tree entry names.
:param tree1: The first Tree object to iterate, or None.
:param tree2: The second Tree object to iterate, or None.
:return: A list of pairs of TreeEntry objects for each pair of entries in
the trees. If an entry exists in one tree but not the other, the other
entry will have all attributes set to None. If neither entry's path is
None, they are guaranteed to match.
"""
entries = []
for tree in trees:
entries.append(_tree_entries(path, tree))
inds = []
lens = []
for e in entries:
inds.append(0)
lens.append(len(e))
result = []
while any([ind < l for ind, l in zip(inds, lens)]):
next_entry = [e[ind] if ind < l else _NULL_ENTRY for e, ind, l in zip(entries, inds, lens)]
paths = [e.path for e in next_entry if e.path]
minpath = min(paths)
merged = [e if e.path == minpath else _NULL_ENTRY for e in next_entry]
result.append(merged)
inds = [ind + 1 if e.path == minpath else ind for e, ind in zip(next_entry, inds)]
return result
def all_eq(entries):
all([i == j for i in entries for j in entries])
def first_nonempty(entries):
result = None
for entry in entries:
result = result or entry
return result
def walk_trees(store, tree_ids, prune_identical=False):
"""Recursively walk all the entries of N trees.
Iteration is depth-first pre-order, as in e.g. os.walk.
:param store: An ObjectStore for looking up objects.
:param trees: iterable of SHAs for N trees
:param prune_identical: If True, identical subtrees will not be walked.
:return: Iterator over tuple contsining N TreeEntry objects for each of entries
in the trees and their subtrees recursively. If an entry exists in one
tree but not the other, the other entry will have all attributes set
to None. If neither entry's path is None, they are guaranteed to
match.
"""
# This could be fairly easily generalized to >2 trees if we find a use
# case.
modes = [tree_id and stat.S_IFDIR or None for tree_id in tree_ids]
todo = [[TreeEntry(b'', mode, tree_id) for mode, tree_id in zip(modes, tree_ids)]]
while todo:
entries = todo.pop()
is_trees = [_is_tree(entry) for entry in entries]
if prune_identical and all(is_trees) and all_eq(entries):
continue
trees = [is_tree and store[entry.sha] or None for is_tree, entry in zip(is_trees, entries)]
path = first_nonempty([entry.path for entry in entries])
todo.extend(reversed(_merge_entries(path, trees)))
yield tuple(entries)
def merge_trees(store, base, mine, theirs):
''' takes tree ids for base, mine, and theirs. merge trees into current working tee'''
num_conflicts = 0
added = []
removed = []
w = walk_trees(store, [base, mine, theirs], True)
count = 0
for b, m, t in w:
if _is_tree(b) or _is_tree(m) or _is_tree(t):
#todo... handle mkdir, rmdir
continue
# if mine == theirs match, use either
elif m == t:
if not b.path:
print(' ', m.path, 'was added, but matches already')
continue #leave workng tree alone
# if base==theirs, but not mine, already deleted (do nothing)
elif b == t and not m.path:
print(' ', b.path, ' already deleted in head')
continue
# if base==mine, but not theirs, delete
elif b == m and not t.path:
print(' -', m.path, ' was deleted in theirs.')
os.remove(m.path)
removed.append(m.path)
elif not b.path and m.path and not t.path: #add in mine
print(' ', m.path, 'added in mine')
continue
elif not b.path and t.path and not m.path: # add theirs to mine
# add theirs
print(' +', t.path, ': adding to head')
with open(t.path, 'w') as f:
f.write(store[t.sha].data)
added.append(t.path)
elif not m == t: # conflict
print(' ?', m.path, ': merging conflicts')
result = diff3.merge(
store[m.sha].data.splitlines(True),
store[b.sha].data.splitlines(True) if b.sha else [''],
store[t.sha].data.splitlines(True)
)
mergedfile = result['body']
had_conflict = result['conflict']
with open(m.path, 'w') as f:
for line in mergedfile:
f.write(line)
if had_conflict:
num_conflicts += 1
print(
' !!! {} had a conflict that could not be resolved.\n conflict markers added to file in working tree.\n you need to resolve manually '
.format(m.path)
)
added.append(m.path)
return num_conflicts, added, removed
def mergecommits(store, base, mine, theirs):
merge_trees(store, store[base].tree, store[mine].tree, store[theirs].tree)
def merge(args):
helptext = '''git merge' [--msg <msg>] [<commit>]
git merge --abort\n
merges <commit> into HEAD, or remote tracking branch if commit not specified.
<commit> can be a local or remote ref, or an existing commit sha.
merge will handle unambiguous conflicts between head and other
merge head, and will insert conflict markers if conflicts cannot be resolved.
note that the strategy used will prefer changes in the local head.
for instance, if HEAD deleted a section, while MERGE_HEAD modified the same
action, the section will be deleted from the final without indicating a conflict.
be sure to commit any local changes before running merge, as files in working tree (i.e on disk) are changed, and checked in, which will probably overwrite any local uncomitted changes.
note merge will not actually commit anything. run git commit to commit a successful merge.
--abort will remove the MERGE_HEAD and MERGE_MSG files, and will reset staging area, but wont affect files on disk. use git reset --hard or git checkout if this is desired.
'''
repo = _get_repo()
print('_' * 30)
parser = argparse.ArgumentParser(prog='merge', usage=helptext)
parser.add_argument(
'commit',
action='store',
nargs='?',
help='commit sha, local branch, or remote branch name to merge from'
)
parser.add_argument('--msg', nargs=1, action='store', help='commit message to store')
parser.add_argument('--abort', action='store_true', help='abort in progress merge attempt')
result = parser.parse_args(args)
if result.abort:
print(
'attempting to undo merge. beware, files in working tree are not touched. \nused git reset --hard to revert particular files'
)
git_reset([])
os.remove(os.path.join(repo.repo.controldir(), 'MERGE_HEAD'))
os.remove(os.path.join(repo.repo.controldir(), 'MERGE_MSG'))
#todo: check for uncommitted changes and confirm
# first, determine merge head
merge_head = find_revision_sha(repo, result.commit or get_remote_tracking_branch(repo, repo.active_branch))
if not merge_head:
raise GitError(
'must specify a commit sha, branch, remote tracking branch to merge from. or, need to set-upstream branch using git branch --set-upstream <remote>[/<branch>]'
)
head = find_revision_sha(repo, repo.active_branch)
base_sha = merge_base(repo, head, merge_head)[0] #fixme, what if multiple bases
if base_sha == head:
print('Fast forwarding {} to {}'.format(repo.active_branch, merge_head))
repo.refs['HEAD'] = merge_head
return
if base_sha == merge_head:
print('head is already up to date')
return
print(
'merging <{}> into <{}>\n{} commits ahead of merge base <{}> respectively'.format(
merge_head[0:7],
head[0:7],
count_commits_between(repo,
merge_head,
head),
base_sha[0:7]
)
)
base_tree = repo[base_sha].tree
merge_head_tree = repo[merge_head].tree
head_tree = repo[head].tree
num_conflicts, added, removed = merge_trees(repo.repo.object_store, base_tree, head_tree, merge_head_tree)
# update index
if added:
porcelain.add(repo.path, added)
if removed:
porcelain.rm(repo.path, removed)
repo.repo._put_named_file('MERGE_HEAD', merge_head)
repo.repo._put_named_file('MERGE_MSG', 'Merged from {}({})'.format(merge_head, result.commit))
print('Merge complete with {} conflicted files'.format(num_conflicts))
print(
'''Merged files were added to the staging area, but have not yet been comitted.
Review changes (e.g. git diff or git diff>changes.txt; edit changes.txt ), and
resolve any conflict markers before comitting.
Use git add on any files updated after resolving conflicts.
Run git commit to complete the merge process.
'''
)
if __name__ == '__main__':
import sys
merge(sys.argv[1:])
|
py | 7df774e0dcb237d5416766c59e5a28f2105a4f9e | qType = {
'SPARQL': 'sparql',
'TPF': 'tpf',
'JSON': 'json'
}
|
py | 7df7758f8642f66514e817f8ec43fa5ed991c213 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 拷贝数据
Case Name : formatter与fixed不共用进行copy to/from
Description :
1.创建测试表并插入数据
2.构造数据文件
3.formatter与fixed不共用进行copy to
4.formatter与fixed不共用进行copy from
5.清理环境
Expect :
1.创建测试表并插入数据成功
2.构造数据文件成功
3.copy失败
4.copy失败
5.清理环境成功
History :
"""
import unittest
import os
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
logger = Logger()
class CopyFile(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('Opengauss_Function_Dml_Copy_Case0067开始执行')
self.commonsh = CommonSH('PrimaryDbUser')
self.userNode = Node(node='PrimaryDbUser')
self.Constant = Constant()
self.tb_name = 't_copy_67'
self.file_name = 'testcopy67.dat'
self.copy_dir_path = os.path.join(macro.DB_INSTANCE_PATH,
'pg_copydir')
def test_copy_file(self):
text = 'step1:创建测试表并对测试表插入数据' \
'Expect:创建测试表并插入数据成功'
self.log.info(text)
sql_cmd = self.commonsh.execut_db_sql(
f"drop table if exists {self.tb_name};"
f"create table {self.tb_name} (sk integer,id varchar(16),"
f"name varchar(20),sq_ft integer);"
f"insert into {self.tb_name} values (001,'sk1','tt1',3331);"
f"insert into {self.tb_name} values (002,'sk2','tt2',3332);"
f"insert into {self.tb_name} values (003,'sk3','tt3',3333);")
self.log.info(sql_cmd)
self.assertIn(self.Constant.CREATE_TABLE_SUCCESS, sql_cmd,
'执行失败:' + text)
self.assertIn(self.Constant.INSERT_SUCCESS_MSG, sql_cmd,
'执行失败:' + text)
text = 'step2:构造数据文件 Expect:构造数据文件成功'
self.log.info(text)
excute_cmd = f'''mkdir {self.copy_dir_path};
touch {os.path.join(self.copy_dir_path, self.file_name)};'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
text = 'step3:formatter与fixed不共用进行copy to Expect:copy失败'
self.log.info(text)
sql_cmd = self.commonsh.execut_db_sql(
f"copy {self.tb_name} to '"
f"{os.path.join(self.copy_dir_path, self.file_name)}' formatter"
f"(sk(0,1),id(2,3),name(5,3),sq_ft(8,4));")
self.log.info(sql_cmd)
self.assertIn('FORMATTER only can be specified in FIXED mode',
sql_cmd, '执行失败:' + text)
text = 'step4:formatter与fixed不共用进行copy from Expect:copy失败'
self.log.info(text)
sql_cmd = self.commonsh.execut_db_sql(
f"copy {self.tb_name} from '"
f"{os.path.join(self.copy_dir_path, self.file_name)}'"
f" formatter(name(2,4));")
self.log.info(sql_cmd)
self.assertIn('FORMATTER only can be specified in FIXED mode',
sql_cmd, '执行失败:' + text)
def tearDown(self):
text = 'step5:清理环境 Expect:清理环境成功'
self.log.info(text)
sql_cmd = self.commonsh.execut_db_sql(
f"drop table if exists {self.tb_name};")
self.log.info(sql_cmd)
excute_cmd = f'''rm -rf {self.copy_dir_path}'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd)
self.log.info('Opengauss_Function_Dml_Copy_Case0067执行完成')
|
py | 7df775b7a6e75d2618c3de99c2d8d14c8ef83f5f | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 65000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | 7df775cfe708bf833606cef5ec709e424d67c99c | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 1 13:21:35 2019
@author: khale
"""
# Standard library imports.
import sys
import time
# Third party imports.
import numpy as np
import scipy as sc
import pandas as pd
import numba
#from scipy.sparse.linalg import spsolve
# Local imports.
try:
from ..math_funcs._cython_definitions.matrix_funcs import matrix_assembler
except ModuleNotFoundError:
print("Cythonized modules not found!")
print("Falling back to -less efficient- numba mode!")
from ..math_funcs.numba_funcs import matrix_assembler
###############################################################################
###############################################################################
@numba.njit(cache=True, nogil=True)
def solve(A, b):
x = np.linalg.solve(A, b)
return x
def progress_bar(steps, i, t0, t_sim):
sys.stdout.write('\r')
length = (100*(1+i)//(4*steps))
percentage = 100*(1+i)//steps
t = time.perf_counter() - t0
format_ = ('='*length,percentage,i+1, steps, t_sim, t)
sys.stdout.write("[%-25s] %d%%, (%s/%s) steps. Sim Time = %.5s | CPU Time = %.5s (s)" % format_)
sys.stdout.flush()
###############################################################################
###############################################################################
class abstract_solver(object):
def __init__(self, model):
self.model = model
self._construct_system_arrays()
self._initialize_model()
self._create_indicies()
self._construct_containers()
def _construct_system_arrays(self):
n = self.model.n
nc = self.model.nc
self._q = np.zeros((n, 1), dtype=np.float64)
self._qd = np.zeros((n, 1), dtype=np.float64)
self._qdd = np.zeros((n, 1), dtype=np.float64)
self._lgr = np.zeros((nc, 1), dtype=np.float64)
#self._pos_m = np.zeros((nc, 1), dtype=np.float64)
#self._vel_m = np.zeros((nc, 1), dtype=np.float64)
#self._acc_m = np.zeros((nc, 1), dtype=np.float64)
self._jac_ = np.zeros((nc, n), dtype=np.float64)
self._mass = np.zeros((n, n), dtype=np.float64)
self._mass_matrix_rows = np.arange(self.model.ncols, dtype=np.intc)
self._coeff_matrix = np.zeros((n + nc, n + nc), dtype=np.float64)
def set_initial_states(self, q, qd):
assert q.shape == self._q.shape
assert qd.shape == self._q.shape
self._set_gen_coordinates(q)
self._set_gen_velocities(qd)
self._pos_history[0] = q.copy()
self._vel_history[0] = qd.copy()
def set_time_array(self, duration, spacing):
if duration > spacing:
time_array = np.arange(0, duration, spacing)
step_size = spacing
elif duration < spacing:
time_array, step_size = np.linspace(0, duration, spacing, retstep=True)
else:
raise ValueError('Time array is not properly sampled.')
self.time_array = time_array
self.step_size = step_size
self._construct_containers(time_array.size)
self.set_initial_states(self.model._q, self.model._qd)
def eval_reactions(self):
self._reactions = {}
time_array = self.time_array
bar_length = len(time_array)
print("\nEvaluating System Constraints' Forces.")
t0 = time.perf_counter()
dt = self.step_size
for i, t in enumerate(time_array):
# Updating the progress bar
progress_bar(bar_length, i, t0, t+dt)
self._set_time(t)
self._set_gen_coordinates(self._pos_history[i])
self._set_lagrange_multipliers(self._lgr_history[i])
reactions = self._eval_reactions_eq()
self._reactions[i] = reactions
values = {i:np.concatenate(list(v.values())) for i,v in self._reactions.items()}
self.reactions_dataframe = pd.DataFrame(
data = np.concatenate(list(values.values()),1).T,
columns = self._reactions_indicies)
self.reactions_dataframe['time'] = time_array
def _initialize_model(self):
model = self.model
model.initialize(self._q, self._qd, self._qdd, self._lgr)
def _create_indicies(self):
model = self.model
sorted_coordinates = {v:k for k,v in model.indicies_map.items()}
self._coordinates_indicies = []
for name in sorted_coordinates.values():
self._coordinates_indicies += ['%s.%s'%(name, i)
for i in ['x', 'y', 'z', 'e0', 'e1', 'e2', 'e3']]
self._reactions_indicies = []
for name in model.reactions_indicies:
self._reactions_indicies += ['%s.%s'%(name, i)
for i in ['x','y','z']]
def _construct_containers(self, size=None):
self._pos_history = {}#np.empty((size,), dtype=np.ndarray)
self._vel_history = {}#np.empty((size,), dtype=np.ndarray)
self._acc_history = {}#np.empty((size,), dtype=np.ndarray)
self._lgr_history = {}#np.empty((size,), dtype=np.ndarray)
def _creat_results_dataframes(self):
columns = self._coordinates_indicies
constraints = self._reactions_indicies
pos_data = list(self._pos_history.values())
vel_data = list(self._vel_history.values())
acc_data = list(self._acc_history.values())
lgr_data = list(self._lgr_history.values())
self.pos_dataframe = pd.DataFrame(
data = np.concatenate(pos_data,1).T,
columns = columns)
self.vel_dataframe = pd.DataFrame(
data = np.concatenate(vel_data,1).T,
columns = columns)
self.acc_dataframe = pd.DataFrame(
data = np.concatenate(acc_data,1).T,
columns = columns)
self.lgr_dataframe = pd.DataFrame(
data = np.concatenate(lgr_data,1).T,
columns = range(self.model.nc))
time_array = self.time_array
self.pos_dataframe['time'] = time_array
self.vel_dataframe['time'] = time_array
self.acc_dataframe['time'] = time_array
self.lgr_dataframe['time'] = time_array
def _assemble_equations(self, data):
mat = np.concatenate(data)
return mat
def _set_time(self, t):
self.model.t = t
def _set_gen_coordinates(self, q):
self._q[:] = q
def _set_gen_velocities(self, qd):
self._qd[:] = qd
def _set_gen_accelerations(self, qdd):
self._qdd[:] = qdd
def _set_lagrange_multipliers(self, lgr):
self._lgr[:] = lgr
def _eval_pos_eq(self):
self.model.eval_pos_eq()
data = self.model.pos_eq_blocks
mat = self._assemble_equations(data)
return mat
def _eval_vel_eq(self):
self.model.eval_vel_eq()
data = self.model.vel_eq_blocks
mat = self._assemble_equations(data)
return mat
def _eval_acc_eq(self):
self.model.eval_acc_eq()
data = self.model.acc_eq_blocks
mat = self._assemble_equations(data)
return mat
def _eval_jac_eq(self):
self.model.eval_jac_eq()
rows = self.model.jac_rows
cols = self.model.jac_cols
data = self.model.jac_eq_blocks
shape = (self.model.nc, self.model.n)
matrix_assembler(self._jac_, data, rows, cols, shape)
return self._jac_
def _eval_mass_eq(self):
self.model.eval_mass_eq()
data = self.model.mass_eq_blocks
n = self.model.n
rows = cols = self._mass_matrix_rows
matrix_assembler(self._mass, data, rows, cols, (n, n))
return self._mass
def _eval_frc_eq(self):
self.model.eval_frc_eq()
data = self.model.frc_eq_blocks
mat = self._assemble_equations(data)
return mat
def _eval_reactions_eq(self):
self.model.eval_reactions_eq()
return self.model.reactions
# @profile
def _solve_constraints(self, guess):
self._set_gen_coordinates(guess)
A = self._eval_jac_eq()
b = self._eval_pos_eq()
lu, p = self._factorize_jacobian(A)
delta_q = sc.linalg.lu_solve((lu, p), -b)
#delta_q = solve(A, -b)
itr=0
while np.linalg.norm(delta_q)>1e-4:
# print(np.linalg.norm(delta_q))
guess += delta_q
self._set_gen_coordinates(guess)
b = self._eval_pos_eq()
delta_q = sc.linalg.lu_solve((lu, p), -b)
if (itr % 10) == 0 and itr != 0:
#print('Updating Jacobian\n')
A = self._eval_jac_eq()
lu, p = self._factorize_jacobian(A)
delta_q = sc.linalg.lu_solve((lu, p), -b)
if itr > 50:
print("Iterations exceded \n")
#raise ValueError("Iterations exceded \n")
break
itr+=1
self._jac = self._eval_jac_eq()
def _factorize_jacobian(self, jacobian):
lu, p = sc.linalg.lu_factor(jacobian)
return lu, p
|
py | 7df775eaf9932eaf1b3e12a2a9314a928c0bdfa8 | # The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import ast
import sys
import astor
source_ast = ast.parse(open(sys.argv[1], 'r').read())
for node in ast.walk(source_ast):
if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef, ast.Module)):
continue
if ast.get_docstring(node) is not None:
node.body = node.body[1:]
if len(node.body) == 0:
node.body.append(ast.Pass())
with open(sys.argv[1], 'w') as outfile:
outfile.write(astor.to_source(source_ast))
|
py | 7df77600a64a7f7706965fc1563c4ac2ba7cdc1e | # Generated by Django 2.2.1 on 2019-05-27 10:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employees', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='web',
field=models.CharField(max_length=100),
),
]
|
py | 7df7769795dfb0e5971ea828821c13a54f50504d | import argparse
from argparse import Namespace
from corc.defaults import (
ANSIBLE,
CLUSTER,
CONFIG,
INSTANCE,
JOB,
OCI,
STORAGE,
STORAGE_S3,
VCN,
)
from corc.cli.configurer.ansible import valid_ansible_group
from corc.cli.parsers.cluster.cluster import valid_cluster_group
from corc.cli.parsers.config.config import valid_config_group
from corc.cli.parsers.instance.instance import valid_instance_group
from corc.cli.parsers.job.job import valid_job_group
from corc.cli.parsers.network.vcn import valid_vcn_group
from corc.cli.parsers.providers.oci.profile import valid_oci_group
from corc.cli.parsers.storage.storage import valid_storage_group
from corc.cli.parsers.storage.s3 import valid_s3_group
def strip_argument_prefix(arguments, prefix=""):
return {k.replace(prefix, ""): v for k, v in arguments.items()}
def _get_arguments(arguments, startswith=""):
return {k: v for k, v in arguments.items() if k.startswith(startswith)}
argument_groups = {
ANSIBLE: valid_ansible_group,
CLUSTER: valid_cluster_group,
INSTANCE: valid_instance_group,
CONFIG: valid_config_group,
JOB: valid_job_group,
OCI: valid_oci_group,
STORAGE: valid_storage_group,
STORAGE_S3: valid_s3_group,
VCN: valid_vcn_group,
}
def extract_arguments(arguments, argument_types, strip_group_prefix=True):
stripped_args = {}
for argument_group in argument_types:
arguments_dict = vars(arguments)
group_args = _get_arguments(arguments_dict, argument_group.lower())
if strip_group_prefix:
group_args = strip_argument_prefix(group_args, argument_group.lower() + "_")
stripped_args.update(group_args)
return Namespace(**stripped_args)
def wrap_extract_arguments(arguments, argument_types, strip_group_prefix=True):
wrapped_arguments = {}
for argument_group in argument_types:
lower_argument_group = argument_group.lower()
arguments_dict = vars(arguments)
group_args = _get_arguments(arguments_dict, lower_argument_group)
if strip_group_prefix:
group_args = strip_argument_prefix(group_args, lower_argument_group + "_")
wrapped_arguments[lower_argument_group] = group_args
return wrapped_arguments
def get_arguments(argument_types, strip_group_prefix=True, parser=None):
if not parser:
parser = argparse.ArgumentParser()
for argument_group in argument_types:
if argument_group in argument_groups:
argument_groups[argument_group](parser)
args, unknown = parser.parse_known_intermixed_args()
if strip_group_prefix:
stripped_args = {}
for argument_group in argument_types:
group_args = _get_arguments(vars(args), argument_group.lower())
group_args = strip_argument_prefix(group_args, argument_group.lower() + "_")
stripped_args.update(group_args)
return Namespace(**stripped_args)
return args
|
py | 7df777617924abd00a039f9b96571b4d0d6283af | from common.execution.fill import Fill
from common.execution.position import Position
class Portfolio:
def __init__(self, name: str):
self.name = name
self.positions = {}
def add_position(self, *positions: Position):
for position in positions:
if position in self.positions:
return
self.positions[position.instrument] = position
def add_fills(self, fills: Fill):
if hasattr(fills, '__iter__'):
for fill in fills:
self.__get_position__(fill).add_fill(fill)
else:
fill = fills
self.__get_position__(fill).add_fill(fill)
def __get_position__(self, fill):
pos = self.positions.get(fill.instrument)
if pos is None:
pos = Position(fill.instrument)
self.positions[fill.instrument] = pos
return pos
def make_copy(self):
strategy = Portfolio(self.name)
for position in self.positions.values():
strategy.add_position(Position(position.instrument, position.quantity, position.price, position.fixed_pnl))
return strategy
def fixed_pnl(self):
pnl = 0
for position in self.positions.values():
pnl += position.fixed_pnl
return pnl
def is_closed(self):
for position in self.positions.values():
if position.quantity != 0:
return False
return True
|
py | 7df778262def66c96ca7bdd28187a2cc7abc7153 | import cv2
import numpy as np
import argparse, sys, os
from GUIdriver import *
import os.path
from os import path
def endprogram():
print ("\nProgram terminated!")
sys.exit()
#Reading the image by parsing the argument
text = str(ImageFile)
imgid=text
print ("\n*********************\nImage : " + ImageFile + "\n*********************")
img = cv2.imread(text)
img = cv2.resize(img ,((int)(img.shape[1]/5),(int)(img.shape[0]/5)))
original = img.copy()
neworiginal = img.copy()
cv2.imshow('original',img)
#Calculating number of pixels with shade of white(p) to check if exclusion of these pixels is required or not (if more than a fixed %) in order to differentiate the white background or white patches in image caused by flash, if present.
p = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
B = img[i][j][0]
G = img[i][j][1]
R = img[i][j][2]
if (B > 110 and G > 110 and R > 110):
p += 1
#finding the % of pixels in shade of white
totalpixels = img.shape[0]*img.shape[1]
per_white = 100 * p/totalpixels
'''
print 'percantage of white: ' + str(per_white) + '\n'
print 'total: ' + str(totalpixels) + '\n'
print 'white: ' + str(p) + '\n'
'''
#excluding all the pixels with colour close to white if they are more than 10% in the image
if per_white > 10:
img[i][j] = [200,200,200]
cv2.imshow('color change', img)
#Guassian blur
blur1 = cv2.GaussianBlur(img,(3,3),1)
#mean-shift algo
newimg = np.zeros((img.shape[0], img.shape[1],3),np.uint8)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER , 10 ,1.0)
img = cv2.pyrMeanShiftFiltering(blur1, 20, 30, newimg, 0, criteria)
cv2.imshow('means shift image',img)
#Guassian blur
blur = cv2.GaussianBlur(img,(11,11),1)
#Canny-edge detection
canny = cv2.Canny(blur, 160, 290)
canny = cv2.cvtColor(canny,cv2.COLOR_GRAY2BGR)
#creating border around image to close any open curve cut by the image border
#bordered = cv2.copyMakeBorder(canny,10,10,10,10, cv2.BORDER_CONSTANT, (255,255,255)) #function not working(not making white coloured border)
#bordered = cv2.rectangle(canny,(-2,-2),(275,183),(255,255,255),3)
#cv2.imshow('Canny on meanshift bordered image',bordered)
#contour to find leafs
bordered = cv2.cvtColor(canny,cv2.COLOR_BGR2GRAY)
contours,hierarchy = cv2.findContours(bordered, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
maxC = 0
for x in range(len(contours)): #if take max or one less than max then will not work in
if len(contours[x]) > maxC: # pictures with zoomed leaf images
maxC = len(contours[x])
maxid = x
perimeter = cv2.arcLength(contours[maxid],True)
#print perimeter
Tarea = cv2.contourArea(contours[maxid])
cv2.drawContours(neworiginal,contours[maxid],-1,(0,0,255))
cv2.imshow('Contour',neworiginal)
#cv2.imwrite('Contour complete leaf.jpg',neworiginal)
#Creating rectangular roi around contour
height, width, _ = canny.shape
min_x, min_y = width, height
max_x = max_y = 0
frame = canny.copy()
# computes the bounding box for the contour, and draws it on the frame,
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contours[maxid])
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w > 80 and h > 80:
#cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0), 2) #we do not draw the rectangle as it interferes with contour later on
roi = img[y:y+h , x:x+w]
originalroi = original[y:y+h , x:x+w]
if (max_x - min_x > 0 and max_y - min_y > 0):
roi = img[min_y:max_y , min_x:max_x]
originalroi = original[min_y:max_y , min_x:max_x]
#cv2.rectangle(frame, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2) #we do not draw the rectangle as it interferes with contour
cv2.imshow('ROI', frame)
cv2.imshow('rectangle ROI', roi)
img = roi
#Changing colour-space
#imghsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
imghls = cv2.cvtColor(roi, cv2.COLOR_BGR2HLS)
cv2.imshow('HLS', imghls)
imghls[np.where((imghls==[30,200,2]).all(axis=2))] = [0,200,0]
cv2.imshow('new HLS', imghls)
#Only hue channel
huehls = imghls[:,:,0]
cv2.imshow('img_hue hls',huehls)
#ret, huehls = cv2.threshold(huehls,2,255,cv2.THRESH_BINARY)
huehls[np.where(huehls==[0])] = [35]
cv2.imshow('img_hue with my mask',huehls)
#Thresholding on hue image
ret, thresh = cv2.threshold(huehls,28,255,cv2.THRESH_BINARY_INV)
cv2.imshow('thresh', thresh)
#Masking thresholded image from original image
mask = cv2.bitwise_and(originalroi,originalroi,mask = thresh)
cv2.imshow('masked out img',mask)
#Finding contours for all infected regions
contours,heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
Infarea = 0
for x in range(len(contours)):
cv2.drawContours(originalroi,contours[x],-1,(0,0,255))
cv2.imshow('Contour masked',originalroi)
#Calculating area of infected region
Infarea += cv2.contourArea(contours[x])
if Infarea > Tarea:
Tarea = img.shape[0]*img.shape[1]
print ('_________________________________________\n Perimeter: %.2f' %(perimeter)
+ '\n_________________________________________')
print ('_________________________________________\n Total area: %.2f' %(Tarea)
+ '\n_________________________________________')
#Finding the percentage of infection in the leaf
print ('_________________________________________\n Infected area: %.2f' %(Infarea)
+ '\n_________________________________________')
try:
per = 100 * Infarea/Tarea
except ZeroDivisionError:
per = 0
print ('_________________________________________\n Percentage of infection region: %.2f' %(per)
+ '\n_________________________________________')
cv2.imshow('orig',original)
"""****************************************update dataset*******************************************"""
#Updating a dataset file to maintain log of the leaf images identified.
print("\nDo you want to run the classifier(Y/N):")
n = cv2.waitKey(0) & 0xFF
#import csv file library
import csv
filename = 'Datasetunlabelledlog.csv'
fieldnames = ['fortnum', 'imgid', 'feature1', 'feature2', 'feature3']
import inspect
if (n == ord('y') or n== ord('Y')):
print ('Appending to ' + str(filename)+ '...')
print ('\nFile ' + str(filename)+ ' updated!' )
try:
results = []
with open(os.path.join('datasetlog',filename)) as File:
reader = csv.DictReader(File)
for rows in reader:
results.append(rows)
print(results)
try:
#first character(fortnum) of previously appended line
preflod = int(results[len(results)-1]['fortnum'])
#if new file
except IndexError:
preflod = -1
if preflod < 9:
fortnum = preflod + 1
elif preflod > 9:
fortnum = 0
File.close()
L = {'fortnum': str(fortnum), 'imgid': os.path.basename(imgid), 'feature1': str(Tarea), 'feature2': str(Infarea), 'feature3': str(perimeter)}
with open(os.path.join('datasetlog',filename),'a+') as File:
writer = csv.DictWriter(File, fieldnames = fieldnames)
writer.writerow(L)
File.close()
except IOError:
if(path.exists('datasetlog')== False) :
os.mkdir('datasetlog')
fortnum = 0
L = {'fortnum': str(fortnum), 'imgid': os.path.basename(imgid), 'feature1': str(Tarea), 'feature2': str(Infarea), 'feature3': str(perimeter)}
print("HI")
with open(os.path.join('datasetlog',filename),'w+') as File:
writer = csv.DictWriter(File, fieldnames = fieldnames)
writer.writeheader()
writer.writerow(L)
File.close()
finally:
import classifier
elif (n == ord('n') or n == ord('N')) :
print ('File not updated! \nSuccessfully terminated!')
else:
print ('invalid input!')
endprogram()
|
py | 7df77a3b922eb18e7050ff6012f2d44cac8af909 | import math, logging
from typing import Union, Optional
import pydub
import simpleaudio as sa
from klgists.common.operators import approxeq
class BadVolumeException(Exception): pass
class BadAudioLengthException(Exception): pass
class AudioInfo:
"""All information necessary to play an audio file.
Most importantly, contains a static build() method that will alter the volume extend and truncate an audio segment
as needed for a specified length.
"""
def __init__(self, name: str, wave_obj: sa.WaveObject, duration_ms: Optional[float], intensity: float):
self.name = name
self.wave_obj = wave_obj
self.duration_ms = duration_ms
self.intensity = intensity
def __str__(self):
return "AudioInfo({}ms@{}dB)".format(self.duration_ms, round(self.intensity, 5))
@staticmethod
def build(
name: str, song: pydub.AudioSegment,
applied_length: Optional[int]=None, volume: int=255, volume_floor: int = -50,
bytes_per_sample: int=2, sample_rate: int=44100
):
if applied_length is not None and applied_length < 0:
raise BadAudioLengthException("The length is {} but cannot be negative".format(applied_length))
if volume < 0 or volume > 255:
raise BadVolumeException("The volume is {} but must be 0–255".format(volume))
if applied_length is None:
resized = song
else:
n_repeats = math.ceil(applied_length / len(song))
resized = (song * n_repeats)[0:applied_length]
if volume == 0 or applied_length == 0:
final = pydub.AudioSegment.silent(duration=0.5)
else:
final = resized + (volume * (volume_floor / 255) - volume_floor)
if applied_length is not None:
assert len(resized) << approxeq >> applied_length or applied_length == 1,\
"The actual audio stimulus length is {}, but the length in stimulus_frames is {}".format(len(resized), applied_length)
play_obj = sa.WaveObject(final.raw_data, 1, bytes_per_sample, sample_rate)
return AudioInfo(name, play_obj, applied_length, volume)
__all__ = ['AudioInfo']
|
py | 7df77ade2d964f4ef6ac177f12f5f54142f0c727 | """
MIT License
Copyright (c) 2020 Dominik Kopczynski - dominik.kopczynski {at} isas.de
Nils Hoffmann - nils.hoffmann {at} isas.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
import os
import csv
import pygoslin
from pygoslin.parser.Parser import *
from pygoslin.parser.ParserCommon import *
from pygoslin.domain.LipidLevel import *
from pygoslin.domain.Element import compute_sum_formula
class LipidMapsTest(unittest.TestCase):
def test_parser(self):
lipid_data = []
file_name = os.path.join("pygoslin", "data", "goslin", "testfiles", "fatty-acids-test.csv")
with open(file_name, mode = "rt") as infile:
lipidreader = csv.reader(infile, delimiter=',', quotechar='"')
for row in lipidreader:
lipid_data.append(row)
parser = FattyAcidParser()
shorthand_parser = ShorthandParser()
formula_parser = SumFormulaParser()
for lipid_row in lipid_data:
lmid, lipid_name, formula, expected_lipid_name = lipid_row
formula = compute_sum_formula(formula_parser.parse(formula))
lipid = parser.parse(lipid_name)
self.assertEqual(expected_lipid_name, lipid.get_lipid_string(), "%s: %s != %s (computed, fa)" % (lmid, expected_lipid_name, lipid.get_lipid_string()))
lipid_formula = lipid.get_sum_formula()
self.assertEqual(formula, lipid_formula, "formula %s: %s != %s (computed, fa)" % (lmid, formula, lipid_formula))
if lipid_name.lower().find("cyano") >= 0: continue
lipid2 = shorthand_parser.parse(lipid.get_lipid_string())
lipid_formula = lipid2.get_sum_formula()
self.assertEqual(formula, lipid_formula, "lipid %s: %s != %s (computed, shorthand)" % (lmid, formula, lipid_formula))
lipid2 = shorthand_parser.parse(lipid.get_lipid_string(LipidLevel.MOLECULAR_SPECIES))
lipid_formula = lipid2.get_sum_formula()
self.assertEqual(formula, lipid_formula, "molecular lipid '%s': %s != %s (computed, shorthand)" % (lmid, formula, lipid_formula))
lipid2 = shorthand_parser.parse(lipid.get_lipid_string(LipidLevel.SPECIES))
lipid_formula = lipid2.get_sum_formula()
self.assertEqual(formula, lipid_formula, "species lipid '%s': %s != %s (computed, shorthand)" % (lmid, formula, lipid_formula))
|
py | 7df77b9559be16be81213688e21ae0839b72e3e8 | from sqlalchemy import text
from analogues.conf import cdsdb
from grib import GribFile
import numpy as np
from analogues.fingerprint import FingerPrint
class FieldSQL:
def __init__(self):
self._fingerprint_table = None
self._file_table = None
self._sql_dates = None
self._minimum = None
self._maximum = None
self._mean = None
self._stddev = None
self._smoothness1_maximum = None
self._smoothness1_average = None
self._smoothness1_average_no_constants = None
self._smoothness2_maximum = None
self._smoothness2_average = None
self.update_missing_sql_fields()
self._SELECT_SAMPLE = None
self._SELECT_FIRST_SAMPLE = None
def seed(self, valid_dates):
insert = text("""
INSERT INTO {table} (valid_date) VALUES (:valid_date)
ON CONFLICT DO NOTHING;
""".format(table=self.fingerprint_table))
with cdsdb.begin() as connection:
for valid_date in valid_dates:
connection.execute(insert, valid_date=valid_date)
@property
def fingerprint_table(self):
if self._fingerprint_table is None:
self._fingerprint_table = "fingerprint_{param}_{domain}_{dataset}".format(param=self.param,
domain=self.domain,
dataset=self.dataset)
STMT = text("""
CREATE TABLE IF NOT EXISTS {table} (
valid_date TIMESTAMP NOT NULL UNIQUE,
-- Fingerprint
fingerprint_s INTEGER , -- should be smallint, but smallint is signed
fingerprint_r REAL , -- mean
field_min REAL,
field_max REAL,
-- FILE
file_id INTEGER, -- REFERENCES files(id),
position BIGINT,
-- Updated
updated TIMESTAMP NOT NULL DEFAULT ({now})
);
""".format(table=self._fingerprint_table,
now=cdsdb.sql_now))
with cdsdb.begin() as connection:
connection.execute(STMT)
# for col in ('field_min', 'field_max'):
# try:
# with cdsdb.begin() as connection:
# alter = "alter table {table} add column {col} real".format(table=self._fingerprint_table, col=col)
# connection.execute(text(alter))
# except Exception as e:
# print(e)
# pass
return self._fingerprint_table
@property
def file_table(self):
if self._file_table is None:
self._file_table = "file_{param}_{domain}_{dataset}".format(param=self.param,
domain=self.domain,
dataset=self.dataset)
STMT = text("""
CREATE TABLE IF NOT EXISTS {table} (
id {increment},
path TEXT UNIQUE NOT NULL --CHECK (path <> '')
);
""".format(table=self._file_table,
increment=cdsdb.sql_autoincrement))
with cdsdb.begin() as connection:
connection.execute(STMT)
return self._file_table
def fingerprints(self):
STMT = text("""
SELECT valid_date, fingerprint_r, fingerprint_s FROM {table}
WHERE fingerprint_r IS NOT NULL
AND fingerprint_s IS NOT NULL
AND file_id IS NOT NULL
""".format(table=self.fingerprint_table))
with cdsdb.begin() as connection:
result = connection.execute(STMT)
return dict((cdsdb.sql_to_datetime(d[0]), (d[1], d[2])) for d in result)
@property
def SELECT_SAMPLE(self):
if self._SELECT_SAMPLE is None:
self._SELECT_SAMPLE = text("""
SELECT path, position FROM {file_table}, {fingerprint_table}
WHERE {file_table}.id = {fingerprint_table}.file_id
AND valid_date=:valid_date
""".format(file_table=self.file_table, fingerprint_table=self.fingerprint_table))
return self._SELECT_SAMPLE
@property
def SELECT_FIRST_SAMPLE(self):
if self._SELECT_FIRST_SAMPLE is None:
self._SELECT_FIRST_SAMPLE = text("""
SELECT path, position FROM {file_table}, {fingerprint_table}
WHERE {file_table}.id = {fingerprint_table}.file_id AND fingerprint_r = (
SELECT MAX(fingerprint_r) FROM {fingerprint_table}
WHERE file_id IS NOT NULL)
""".format(file_table=self.file_table, fingerprint_table=self.fingerprint_table))
return self._SELECT_FIRST_SAMPLE
@property
def max_fingerprint_distance(self):
if self._max_fingerprint_distance is None:
GET_ALPHA = text("SELECT max_fingerprint_distance FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._max_fingerprint_distance = connection.execute(GET_ALPHA,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._max_fingerprint_distance is None:
self._max_fingerprint_distance = 0.0
return self._max_fingerprint_distance
@max_fingerprint_distance.setter
def max_fingerprint_distance(self, max_fingerprint_distance):
self._max_fingerprint_distance = max_fingerprint_distance
SET_ALPHA = text("""
INSERT INTO alpha (param, domain, dataset, max_fingerprint_distance)
VALUES (:param, :domain, :dataset, :max_fingerprint_distance)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET max_fingerprint_distance=:max_fingerprint_distance
""")
with cdsdb.begin() as connection:
connection.execute(SET_ALPHA,
param=self.param,
domain=self.domain,
dataset=self.dataset,
max_fingerprint_distance=max_fingerprint_distance)
return self._max_fingerprint_distance
########################################################################
@property
def smoothness1_maximum(self):
if self._smoothness1_maximum is None:
GET_MINIMUM = text("SELECT smoothness1_maximum FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._smoothness1_maximum = connection.execute(GET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._smoothness1_maximum is None:
self._smoothness1_maximum = 0.0
return self._smoothness1_maximum
@smoothness1_maximum.setter
def smoothness1_maximum(self, smoothness1_maximum):
self._smoothness1_maximum = smoothness1_maximum
SET_MINIMUM = text("""
INSERT INTO alpha (param, domain, dataset, smoothness1_maximum)
VALUES (:param, :domain, :dataset, :smoothness1_maximum)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET smoothness1_maximum=:smoothness1_maximum
""")
with cdsdb.begin() as connection:
connection.execute(SET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
smoothness1_maximum=smoothness1_maximum)
return self._smoothness1_maximum
@property
def smoothness1_average(self):
if self._smoothness1_average is None:
GET_MINIMUM = text("SELECT smoothness1_average FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._smoothness1_average = connection.execute(GET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._smoothness1_average is None:
self._smoothness1_average = 0.0
return self._smoothness1_average
@smoothness1_average.setter
def smoothness1_average(self, smoothness1_average):
self._smoothness1_average = smoothness1_average
SET_MINIMUM = text("""
INSERT INTO alpha (param, domain, dataset, smoothness1_average)
VALUES (:param, :domain, :dataset, :smoothness1_average)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET smoothness1_average=:smoothness1_average
""")
with cdsdb.begin() as connection:
connection.execute(SET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
smoothness1_average=smoothness1_average)
return self._smoothness1_average
@property
def smoothness1_average_no_constants(self):
if self._smoothness1_average_no_constants is None:
GET_MINIMUM = text("SELECT smoothness1_average_no_constants FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._smoothness1_average_no_constants = connection.execute(GET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._smoothness1_average_no_constants is None:
self._smoothness1_average_no_constants = 0.0
return self._smoothness1_average_no_constants
@smoothness1_average_no_constants.setter
def smoothness1_average_no_constants(self, smoothness1_average_no_constants):
self._smoothness1_average_no_constants = smoothness1_average_no_constants
SET_MINIMUM = text("""
INSERT INTO alpha (param, domain, dataset, smoothness1_average_no_constants)
VALUES (:param, :domain, :dataset, :smoothness1_average_no_constants)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET smoothness1_average_no_constants=:smoothness1_average_no_constants
""")
with cdsdb.begin() as connection:
connection.execute(SET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
smoothness1_average_no_constants=smoothness1_average_no_constants)
return self._smoothness1_average_no_constants
########################################################################
@property
def smoothness2_maximum(self):
if self._smoothness2_maximum is None:
GET_MINIMUM = text("SELECT smoothness2_maximum FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._smoothness2_maximum = connection.execute(GET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._smoothness2_maximum is None:
self._smoothness2_maximum = 0.0
return self._smoothness2_maximum
@smoothness2_maximum.setter
def smoothness2_maximum(self, smoothness2_maximum):
self._smoothness2_maximum = smoothness2_maximum
SET_MINIMUM = text("""
INSERT INTO alpha (param, domain, dataset, smoothness2_maximum)
VALUES (:param, :domain, :dataset, :smoothness2_maximum)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET smoothness2_maximum=:smoothness2_maximum
""")
with cdsdb.begin() as connection:
connection.execute(SET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
smoothness2_maximum=smoothness2_maximum)
return self._smoothness2_maximum
@property
def smoothness2_average(self):
if self._smoothness2_average is None:
GET_MINIMUM = text("SELECT smoothness2_average FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._smoothness2_average = connection.execute(GET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._smoothness2_average is None:
self._smoothness2_average = 0.0
return self._smoothness2_average
@smoothness2_average.setter
def smoothness2_average(self, smoothness2_average):
self._smoothness2_average = smoothness2_average
SET_MINIMUM = text("""
INSERT INTO alpha (param, domain, dataset, smoothness2_average)
VALUES (:param, :domain, :dataset, :smoothness2_average)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET smoothness2_average=:smoothness2_average
""")
with cdsdb.begin() as connection:
connection.execute(SET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
smoothness2_average=smoothness2_average)
return self._smoothness2_average
@property
def smoothness2_average_no_constants(self):
if self._smoothness2_average_no_constants is None:
GET_MINIMUM = text("SELECT smoothness2_average_no_constants FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._smoothness2_average_no_constants = connection.execute(GET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._smoothness2_average_no_constants is None:
self._smoothness2_average_no_constants = 0.0
return self._smoothness2_average_no_constants
@smoothness2_average_no_constants.setter
def smoothness2_average_no_constants(self, smoothness2_average_no_constants):
self._smoothness2_average_no_constants = smoothness2_average_no_constants
SET_MINIMUM = text("""
INSERT INTO alpha (param, domain, dataset, smoothness2_average_no_constants)
VALUES (:param, :domain, :dataset, :smoothness2_average_no_constants)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET smoothness2_average_no_constants=:smoothness2_average_no_constants
""")
with cdsdb.begin() as connection:
connection.execute(SET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
smoothness2_average_no_constants=smoothness2_average_no_constants)
return self._smoothness2_average_no_constants
########################################################################
@property
def minimum(self):
if self._minimum is None:
GET_MINIMUM = text("SELECT minimum FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._minimum = connection.execute(GET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._minimum is None:
self._minimum = 0.0
return self._minimum
@minimum.setter
def minimum(self, minimum):
self._minimum = minimum
SET_MINIMUM = text("""
INSERT INTO alpha (param, domain, dataset, minimum)
VALUES (:param, :domain, :dataset, :minimum)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET minimum=:minimum
""")
with cdsdb.begin() as connection:
connection.execute(SET_MINIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
minimum=minimum)
return self._minimum
@property
def maximum(self):
if self._maximum is None:
GET_MAXIMUM = text("SELECT maximum FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._maximum = connection.execute(GET_MAXIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._maximum is None:
self._maximum = 0.0
return self._maximum
@maximum.setter
def maximum(self, maximum):
self._maximum = maximum
SET_MAXIMUM = text("""
INSERT INTO alpha (param, domain, dataset, maximum)
VALUES (:param, :domain, :dataset, :maximum)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET maximum=:maximum
""")
with cdsdb.begin() as connection:
connection.execute(SET_MAXIMUM,
param=self.param,
domain=self.domain,
dataset=self.dataset,
maximum=maximum)
return self._maximum
@property
def mean(self):
if self._mean is None:
GET_MEAN = text("SELECT mean FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._mean = connection.execute(GET_MEAN,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._mean is None:
self._mean = 0.0
return self._mean
@mean.setter
def mean(self, mean):
self._mean = mean
SET_MEAN = text("""
INSERT INTO alpha (param, domain, dataset, mean)
VALUES (:param, :domain, :dataset, :mean)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET mean=:mean
""")
with cdsdb.begin() as connection:
connection.execute(SET_MEAN,
param=self.param,
domain=self.domain,
dataset=self.dataset,
mean=mean)
return self._mean
@property
def stddev(self):
if self._stddev is None:
GET_STDDEV = text("SELECT stddev FROM alpha where param=:param and domain=:domain and dataset=:dataset")
with cdsdb.begin() as connection:
self._stddev = connection.execute(GET_STDDEV,
param=self.param,
domain=self.domain,
dataset=self.dataset).scalar()
if self._stddev is None:
self._stddev = 0.0
return self._stddev
@stddev.setter
def stddev(self, stddev):
self._stddev = stddev
SET_STDDEV = text("""
INSERT INTO alpha (param, domain, dataset, stddev)
VALUES (:param, :domain, :dataset, :stddev)
ON CONFLICT (param, domain, dataset)
DO UPDATE SET stddev=:stddev
""")
with cdsdb.begin() as connection:
connection.execute(SET_STDDEV,
param=self.param,
domain=self.domain,
dataset=self.dataset,
stddev=stddev)
return self._stddev
def update_missing_sql_fields(self):
select = text("""
SELECT valid_date FROM {table}
WHERE file_id IS NOT NULL
AND (field_max IS NULL OR field_min IS NULL)
""".format(table=self.fingerprint_table))
update = text("""
UPDATE {table}
SET field_max = :field_max,
field_min = :field_min
WHERE valid_date = :valid_date
""".format(table=self.fingerprint_table))
with cdsdb.begin() as connection:
result = connection.execute(select)
dates = [cdsdb.sql_to_datetime(x[0]) for x in result]
count = 0
for d in dates:
count += 1
values = self.array(d)
# print('Update', d)
with cdsdb.begin() as connection:
connection.execute(update, field_max=np.amax(values), field_min=np.amin(values), valid_date=d)
if count:
print('update_missing_sql_fields', count)
def index_grib_file(self, target):
insert_files = text("""
INSERT INTO {table} (path) VALUES (:path)
--ON CONFLICT (path) DO NOTHING -- 9.5
""".format(table=self.file_table))
select_file_id = text("""
SELECT id FROM {table} WHERE path=:path
""".format(table=self.file_table))
# query_7 = text("""
# update {table}
# set file_id = :file_id,
# position = :position,
# fingerprint_r = :fingerprint_r,
# fingerprint_s = :fingerprint_s
# where valid_date = :valid_date
# """.format(table=self.fingerprint_table))
query_7 = text("""
INSERT INTO {table} (file_id,
position,
fingerprint_r,
fingerprint_s,
field_max,
field_min,
valid_date)
VALUES(:file_id, :position, :fingerprint_r, :fingerprint_s, :field_max, :field_min, :valid_date)
ON CONFLICT(valid_date) DO UPDATE
SET file_id = :file_id,
position = :position,
fingerprint_r = :fingerprint_r,
fingerprint_s = :fingerprint_s,
field_max = :field_max,
field_min = :field_min
""".format(table=self.fingerprint_table))
n = 0
with cdsdb.begin() as connection:
connection.execute(insert_files, path=target)
fileid = connection.execute(select_file_id,
path=target).scalar()
assert fileid is not None
for g in GribFile(target):
d = dict(file_id=fileid,
valid_date=g.valid_date,
position=int(g.offset))
finger = FingerPrint(g.array,
depth=3)
finger.to_db(d)
# print(query_7)
d['field_max'] = np.amax(g.array)
d['field_min'] = np.amin(g.array)
# print(d)
connection.execute(query_7, **d)
n += 1
print(self, 'added', n, 'field(s)')
@property
def sql_dates(self):
if self._sql_dates is None:
STMT = text("""
SELECT valid_date FROM {table}
WHERE file_id IS NOT NULL
ORDER BY valid_date
""".format(table=self.fingerprint_table))
with cdsdb.begin() as connection:
result = connection.execute(STMT)
self._sql_dates = [cdsdb.sql_to_datetime(x[0]) for x in result]
return self._sql_dates
|
py | 7df77e32d966a288590a0b2f98f336d40c74c007 | AccessTypeDict = {
"@graph": [
{
"@id": "dandi:AccessType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of access status options",
"rdfs:label": "Access status type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
},
{
"@id": "dandi:OpenAccess",
"@type": "dandi:AccessType",
"rdfs:comment": "The dandiset is openly accessible",
"rdfs:label": "Open access",
},
{
"@id": "dandi:EmbargoedAccess",
"@type": "dandi:AccessType",
"rdfs:comment": "The dandiset is embargoed",
"rdfs:label": "Embargoed",
},
]
}
"""
Uncomment and add to accesstype dict when restricted access is implemented
{
"@id": "dandi:RestrictedAccess",
"@type": "dandi:AccessType",
"rdfs:comment": "The dandiset is restricted",
"rdfs:label": "Restricted",
},
"""
DigestTypeDict = {
"@graph": [
{
"@id": "dandi:DigestType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of checksum types",
"rdfs:label": "Checksum Type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
},
{
"@id": "dandi:md5",
"@type": "dandi:DigestType",
"rdfs:comment": "MD5 checksum",
"rdfs:label": "MD5",
},
{
"@id": "dandi:sha1",
"@type": "dandi:DigestType",
"rdfs:comment": "SHA1 checksum",
"rdfs:label": "SHA1",
},
{
"@id": "dandi:sha2-256",
"@type": "dandi:DigestType",
"rdfs:comment": "SHA2-256 checksum",
"rdfs:label": "SHA2-256",
},
{
"@id": "dandi:sha3-256",
"@type": "dandi:DigestType",
"rdfs:comment": "SHA3-256 checksum",
"rdfs:label": "SHA3-256",
},
{
"@id": "dandi:blake2b-256",
"@type": "dandi:DigestType",
"rdfs:comment": "BLAKE2B-256 checksum",
"rdfs:label": "BLAKE2B-256",
},
{
"@id": "dandi:blake3",
"@type": "dandi:DigestType",
"rdfs:comment": "BLAKE3-256 checksum",
"rdfs:label": "BLAKE3-256",
},
{
"@id": "dandi:dandi-etag",
"@type": "dandi:DigestType",
"rdfs:comment": "S3-style ETag",
"rdfs:label": "DANDI ETag",
},
{
"@id": "dandi:dandi-zarr-checksum",
"@type": "dandi:DigestType",
"rdfs:comment": "DANDI Zarr checksum",
"rdfs:label": "DANDI Zarr",
},
]
}
IdentifierTypeDict = {
"@graph": [
{
"@id": "dandi:IdentifierType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of identifiers",
"rdfs:label": "License type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
},
{
"@id": "dandi:doi",
"sameAs": "idorg:doi",
"@type": "dandi:IdentifierType",
"rdfs:label": "DOI",
},
{
"@id": "dandi:orcid",
"sameAs": "idorg:orcid",
"@type": "dandi:IdentifierType",
"rdfs:label": "ORCID",
},
{
"@id": "dandi:ror",
"sameAs": "https://ror.org/",
"@type": "dandi:IdentifierType",
"rdfs:label": "ROR",
},
{
"@id": "dandi:dandi",
"sameAs": "idorg:dandi",
"@type": "dandi:IdentifierType",
"rdfs:label": "DANDI",
},
{
"@id": "dandi:rrid",
"sameAs": "idorg:rrid",
"@type": "dandi:IdentifierType",
"rdfs:label": "RRID",
},
]
}
LicenseTypeDict = {
"@graph": [
{
"@id": "dandi:LicenseType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of supported licenses",
"rdfs:label": "License type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
},
{
"@id": "spdx:CC0-1.0",
"rdfs:seeAlso": "https://creativecommons.org/publicdomain/zero/1.0/legalcode",
"@type": "dandi:LicenseType",
"rdfs:label": "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
},
{
"@id": "spdx:CC-BY-4.0",
"rdfs:seeAlso": "https://creativecommons.org/licenses/by/4.0/legalcode",
"@type": "dandi:LicenseType",
"rdfs:label": "Attribution 4.0 International (CC BY 4.0)",
},
]
}
RelationTypeDict = {
"@graph": [
{
"@id": "dandi:RelationType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of resource relations",
"rdfs:label": "Resource relation type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
"prov:wasDerivedFrom": (
"https://schema.datacite.org/meta/"
"kernel-4.2/doc/DataCite-MetadataKernel_v4.2.pdf"
),
},
{
"@id": "dcite:IsCitedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates that B includes A in a citation",
"rdfs:label": "IsCitedBy",
},
{
"@id": "dcite:Cites",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates that A includes B in a citation",
"rdfs:label": "Cites",
},
{
"@id": "dcite:IsSupplementTo",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates that A is a supplement to B",
"rdfs:label": "IsSupplementTo",
},
{
"@id": "dcite:IsSupplementedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates that B is a supplement to A",
"rdfs:label": "IsSupplementedBy",
},
{
"@id": "dcite:IsContinuedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is continued by the work B",
"rdfs:label": "IsContinuedBy",
},
{
"@id": "dcite:Continues",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is a continuation of the work B",
"rdfs:label": "Continues",
},
{
"@id": "dcite:Describes",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A describes B",
"rdfs:label": "Describes",
},
{
"@id": "dcite:IsDescribedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is described by B",
"rdfs:label": "IsDescribedBy",
},
{
"@id": "dcite:HasMetadata",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates resource A has additional metadata B",
"rdfs:label": "HasMetadata",
},
{
"@id": "dcite:IsMetadataFor",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates additional metadata A for a resource B",
"rdfs:label": "IsMetadataFor",
},
{
"@id": "dcite:HasVersion",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A has a version (B)",
"rdfs:label": "HasVersion",
},
{
"@id": "dcite:IsVersionOf",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is a version of B",
"rdfs:label": "IsVersionOf",
},
{
"@id": "dcite:IsNewVersionOf",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is a new edition of B",
"rdfs:label": "IsNewVersionOf",
},
{
"@id": "dcite:IsPreviousVersionOf",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is a previous edition of B",
"rdfs:label": "IsPreviousVersionOf",
},
{
"@id": "dcite:IsPartOf",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is a portion of B",
"rdfs:label": "IsPartOf",
},
{
"@id": "dcite:HasPart",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A includes the part B",
"rdfs:label": "HasPart",
},
{
"@id": "dcite:IsReferencedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is used as a source of information by B",
"rdfs:label": "IsReferencedBy",
},
{
"@id": "dcite:References",
"@type": "dcite:RelationType",
"rdfs:comment": "Indicates B is used as a source of information for A",
"rdfs:label": "References",
},
{
"@id": "dcite:IsDocumentedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates B is documentation about/explaining A",
"rdfs:label": "IsDocumentedBy",
},
{
"@id": "dcite:Documents",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is documentation about B",
"rdfs:label": "Documents",
},
{
"@id": "dcite:IsCompiledBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates B is used to compile or create A",
"rdfs:label": "IsCompiledBy",
},
{
"@id": "dcite:Compiles",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates B is the result of a compile or creation event using A",
"rdfs:label": "Compiles",
},
{
"@id": "dcite:IsVariantFormOf",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is a variant or different form of B",
"rdfs:label": "IsVariantFormOf",
},
{
"@id": "dcite:IsOriginalFormOf",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is the original form of B",
"rdfs:label": "IsOriginalFormOf",
},
{
"@id": "dcite:IsIdenticalTo",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates that A is identical to B",
"rdfs:label": "IsIdenticalTo",
},
{
"@id": "dcite:IsReviewedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates that A is reviewed by B",
"rdfs:label": "IsReviewedBy",
},
{
"@id": "dcite:Reviews",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates that A is a review of B",
"rdfs:label": "Reviews",
},
{
"@id": "dcite:IsDerivedFrom",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates B is a source upon which A is based",
"rdfs:label": "IsDerivedFrom",
},
{
"@id": "dcite:IsSourceOf",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is a source upon which B is based",
"rdfs:label": "IsSourceOf",
},
{
"@id": "dcite:IsRequiredBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is required by B",
"rdfs:label": "IsRequiredBy",
},
{
"@id": "dcite:Requires",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A requires B",
"rdfs:label": "Requires",
},
{
"@id": "dcite:Obsoletes",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A replaces B",
"rdfs:label": "Obsoletes",
},
{
"@id": "dcite:IsObsoletedBy",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is replaced by B",
"rdfs:label": "IsObsoletedBy",
},
{
"@id": "dcite:IsPublishedIn",
"@type": "dandi:RelationType",
"rdfs:comment": "Indicates A is published in B",
"rdfs:label": "IsPublishedIn",
},
]
}
ParticipantRelationTypeDict = {
"@graph": [
{
"@id": "dandi:ParticipantRelationType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of participant relations",
"rdfs:label": "Participant relation type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
"prov:wasDerivedFrom": (
"https://www.ebi.ac.uk/biosamples/docs/guides/relationships"
),
},
{
"@id": "dandi:isChildOf",
"@type": "dandi:ParticipantRelationType",
"rdfs:comment": "Indicates that A is a child of B",
"rdfs:label": "Child of",
},
{
"@id": "dandi:isParentOf",
"@type": "dandi:ParticipantRelationType",
"rdfs:comment": "Indicates that A is a parent of B",
"rdfs:label": "Parent of",
},
{
"@id": "dandi:isSiblingOf",
"@type": "dandi:ParticipantRelationType",
"rdfs:comment": "Indicates that A is a sibling of B",
"rdfs:label": "Sibling of",
},
{
"@id": "dandi:isMonozygoticTwinOf",
"@type": "dandi:ParticipantRelationType",
"rdfs:comment": "Indicates that A is a monozygotic twin of B",
"rdfs:label": "Monozygotic twin of",
},
{
"@id": "dandi:isDizygoticTwinOf",
"@type": "dandi:ParticipantRelationType",
"rdfs:comment": "Indicates that A is a dizygotic twin of B",
"rdfs:label": "Dizygotic twin of",
},
]
}
RoleTypeDict = {
"@graph": [
{
"@id": "dandi:RoleType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of roles",
"rdfs:label": "Role Type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
},
{
"@id": "dcite:Author",
"@type": "dandi:RoleType",
"rdfs:comment": "Author",
"rdfs:label": "Author",
},
{
"@id": "dcite:Conceptualization",
"@type": "dandi:RoleType",
"rdfs:comment": "Conceptualization",
"rdfs:label": "Conceptualization",
},
{
"@id": "dcite:ContactPerson",
"@type": "dandi:RoleType",
"rdfs:comment": "Contact Person",
"rdfs:label": "Contact Person",
},
{
"@id": "dcite:DataCollector",
"@type": "dandi:RoleType",
"rdfs:comment": "Data Collector",
"rdfs:label": "Data Collector",
},
{
"@id": "dcite:DataCurator",
"@type": "dandi:RoleType",
"rdfs:comment": "Data Curator",
"rdfs:label": "Data Curator",
},
{
"@id": "dcite:DataManager",
"@type": "dandi:RoleType",
"rdfs:comment": "Data Manager",
"rdfs:label": "Data Manager",
},
{
"@id": "dcite:FormalAnalysis",
"@type": "dandi:RoleType",
"rdfs:comment": "Formal Analysis",
"rdfs:label": "Formal Analysis",
},
{
"@id": "dcite:FundingAcquisition",
"@type": "dandi:RoleType",
"rdfs:comment": "Funding Acquisition",
"rdfs:label": "Funding Acquisition",
},
{
"@id": "dcite:Investigation",
"@type": "dandi:RoleType",
"rdfs:comment": "Investigation",
"rdfs:label": "Investigation",
},
{
"@id": "dcite:Maintainer",
"@type": "dandi:RoleType",
"rdfs:comment": "Maintainer",
"rdfs:label": "Maintainer",
},
{
"@id": "dcite:Methodology",
"@type": "dandi:RoleType",
"rdfs:comment": "Methodology",
"rdfs:label": "Methodology",
},
{
"@id": "dcite:Producer",
"@type": "dandi:RoleType",
"rdfs:comment": "Producer",
"rdfs:label": "Producer",
},
{
"@id": "dcite:ProjectLeader",
"@type": "dandi:RoleType",
"rdfs:comment": "Project Leader",
"rdfs:label": "Project Leader",
},
{
"@id": "dcite:ProjectManager",
"@type": "dandi:RoleType",
"rdfs:comment": "Project Manager",
"rdfs:label": "Project Manager",
},
{
"@id": "dcite:ProjectMember",
"@type": "dandi:RoleType",
"rdfs:comment": "Project Member",
"rdfs:label": "Project Member",
},
{
"@id": "dcite:ProjectAdministration",
"@type": "dandi:RoleType",
"rdfs:comment": "Project Administration",
"rdfs:label": "Project Administration",
},
{
"@id": "dcite:Researcher",
"@type": "dandi:RoleType",
"rdfs:comment": "Researcher",
"rdfs:label": "Researcher",
},
{
"@id": "dcite:Resources",
"@type": "dandi:RoleType",
"rdfs:comment": "Resources",
"rdfs:label": "Resources",
},
{
"@id": "dcite:Software",
"@type": "dandi:RoleType",
"rdfs:comment": "Software",
"rdfs:label": "Software",
},
{
"@id": "dcite:Supervision",
"@type": "dandi:RoleType",
"rdfs:comment": "Supervision",
"rdfs:label": "Supervision",
},
{
"@id": "dcite:Validation",
"@type": "dandi:RoleType",
"rdfs:comment": "Validation",
"rdfs:label": "Validation",
},
{
"@id": "dcite:Visualization",
"@type": "dandi:RoleType",
"rdfs:comment": "Visualization",
"rdfs:label": "Visualization",
},
{
"@id": "dcite:Funder",
"@type": "dandi:RoleType",
"rdfs:comment": "Funder",
"rdfs:label": "Funder",
},
{
"@id": "dcite:Sponsor",
"@type": "dandi:RoleType",
"rdfs:comment": "Sponsor",
"rdfs:label": "Sponsor",
},
{
"@id": "dcite:StudyParticipant",
"@type": "dandi:RoleType",
"rdfs:comment": "Participant in a study",
"rdfs:label": "Study participant",
},
{
"@id": "dcite:Affiliation",
"@type": "dandi:RoleType",
"rdfs:comment": "Affiliated with an entity",
"rdfs:label": "Affiliation",
},
{
"@id": "dcite:EthicsApproval",
"@type": "dandi:RoleType",
"rdfs:comment": "Approved ethics protocol",
"rdfs:label": "Ethics approval",
},
{
"@id": "dcite:Other",
"@type": "dandi:RoleType",
"rdfs:comment": "Other",
"rdfs:label": "Other",
},
]
}
AgeReferenceTypeDict = {
"@graph": [
{
"@id": "dandi:AgeReferenceType",
"@type": "rdfs:Class",
"rdfs:comment": "An enumeration of age reference",
"rdfs:label": "Age reference type",
"rdfs:subClassOf": {"@id": "schema:Enumeration"},
},
{
"@id": "dandi:BirthReference",
"@type": "dandi:AgeReferenceType",
"rdfs:comment": "Age since Birth",
"rdfs:label": "BirthReference",
},
{
"@id": "dandi:GestationalReference",
"@type": "dandi:AgeReferenceType",
"rdfs:comment": "Age of a pregnancy (https://en.wikipedia.org/wiki/Gestational_age)",
"rdfs:label": "GestationalReference",
},
]
}
|
py | 7df77e50c7d48cee179fcc25c900ece847fc9427 |
from sys import maxsize
class Group:
def __init__(self, name=None,header=None,footer=None, id= None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s%s:%s" % (self.id, self.name,self.header,self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize |
py | 7df780114f105c36918f83f06d8341bac0cf235d | # -*- coding: utf-8 -*-
from pip_services3_commons.refer import References, Descriptor
from service_beacons_python.logic import BeaconsController
from service_beacons_python.persistence import BeaconsMemoryPersistence
from src.version1.BeaconsDirectClientV1 import BeaconsDirectClientV1
from .BeaconsClientV1Fixture import BeaconsClientV1Fixture
class TestBeaconsDirectClientV1():
@classmethod
def setup_class(cls):
cls.controller = BeaconsController()
cls.persistence = BeaconsMemoryPersistence()
cls.client = BeaconsDirectClientV1()
cls.references = References.from_tuples(
Descriptor('beacons', 'persistence', 'memory', 'default', '1.0'), cls.persistence,
Descriptor('beacons', 'controller', 'default', 'default', '1.0'), cls.controller,
Descriptor('beacons', 'client', 'http', 'default', '1.0'), cls.client
)
cls.controller.set_references(cls.references)
cls.client.set_references(cls.references)
cls.fixture = BeaconsClientV1Fixture(cls.client)
cls.persistence.open(None)
def teardown_method(self, method):
self.persistence.close(None)
def test_crud_operations(self):
self.fixture.test_crud_operations()
def test_calculate_position(self):
self.fixture.test_calculate_position()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.